text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
```
%matplotlib inline
import numpy as np
from scipy.sparse.linalg import spsolve
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from condlib import conductance_matrix_READ
from timeit import default_timer as timer
# Memory array parameters
rL = 12
rHRS = 1e6
rPU = 1e3
n = 16
vRead = [0.5, 1.0, 1.6, 2.0, 2.5, 3.0, 4.0]
hubList = []
lsbList = []
WLvoltagesList = []
BLvoltagesList = []
cellVoltagesList = []
mask = np.ones((n, n), dtype=bool)
mask[n-1][n-1] = False
for v in vRead:
# Voltages for BLs and WLs (read voltages, unselected floating)
vBLsel = 0.0
vWLsel = v
start_t = timer()
# Create conductance matrix
conductancematrix, iinvector = conductance_matrix_READ(n, rL, rHRS, rPU,
vWLsel, vBLsel,
isel=n-1, jsel=n-1, verbose=False)
# Convert to sparse matrix (CSR)
conductancematrix = csr_matrix(conductancematrix)
# Solve
voltages = spsolve(conductancematrix, iinvector)
stop_t = timer()
# Separate WL and BL nodes and calculate cell voltages
WLvoltages = voltages[:n*n].reshape((n, n))
BLvoltages = voltages[n*n:].reshape((n, n))
WLvoltagesList.append(WLvoltages)
BLvoltagesList.append(BLvoltages)
cellVoltages = abs(BLvoltages - WLvoltages)
cellVoltagesList.append(cellVoltages)
# Calculate Highest Unselected Bit and Lowest Selected Bit
hub = np.max(cellVoltages[mask])
lsb = cellVoltages[n-1][n-1]
hubList.append(hub)
lsbList.append(lsb)
print "{:.4f} sec".format(stop_t - start_t)
print "Write voltage : {:.4f} V".format(v)
print "Highest unselected bit : {:.4f} V".format(hub)
print "Lowest selected bit : {:.4f} V".format(lsb)
if n < 9:
sns.heatmap(WLvoltagesList[2], square=True)
else:
sns.heatmap(WLvoltagesList[2], square=True, xticklabels=n/8, yticklabels=n/8)
plt.savefig("figures/read_mapWL_{}.png".format(n), dpi=300)
if n < 9:
sns.heatmap(BLvoltagesList[2], square=True)
else:
sns.heatmap(BLvoltagesList[2], square=True, xticklabels=n/8, yticklabels=n/8)
plt.savefig("figures/read_mapBL_{}.png".format(n), dpi=300, figsize=(10,10))
if n < 9:
sns.heatmap(cellVoltagesList[2], square=True)
else:
sns.heatmap(cellVoltagesList[2], square=True, xticklabels=n/8, yticklabels=n/8)
plt.savefig("figures/read_mapCell_{}.png".format(n), dpi=300, figsize=(10,10))
plt.plot(vRead, hubList, vRead, lsbList)
plt.plot([0.5, 4], [1.1, 1.1], [0.5, 4], [2.2, 2.2], c='gray', ls='--')
plt.plot([0.5, 4], [1.2, 1.2], c='gray', ls='--')
plt.xlim([0,4.5])
plt.ylim([0,4.5])
plt.ylabel("Vcell")
plt.xlabel("Vread")
plt.savefig("figures/read_margin_{}.png".format(n), dpi=300, figsize=(10,12))
plt.show()
# Find window
windowlsb = np.interp([1.2, 2.2], lsbList, vRead)
windowhub = np.interp(1.1, hubList, vRead)
print windowlsb
print windowhub
# Output data to csv
np.savetxt("data/read_margin_{}.csv".format(n),
np.vstack((vRead, lsbList, hubList)).T,
delimiter=',',
header="Vread,VcellLSB,VcellHUB",
footer=",WindowLSB = {} - {}, WindowHSB < {}".format(windowlsb[0], windowlsb[1], windowhub),
comments='')
np.savetxt("data/read_mapCell_{}.csv".format(n),
cellVoltagesList[2],
delimiter=',')
np.savetxt("data/read_mapWL_{}.csv".format(n),
WLvoltagesList[2],
delimiter=',')
np.savetxt("data/read_mapBL_{}.csv".format(n),
BLvoltagesList[2],
delimiter=',')
```
|
github_jupyter
|
# Telescopes: Tutorial 5
This notebook will build on the previous tutorials, showing more features of the `PsrSigSim`. Details will be given for new features, while other features have been discussed in the previous tutorial notebook. This notebook shows the details of different telescopes currently included in the `PsrSigSim`, how to call them, and how to define a user `telescope` for a simulated observation.
We again simulate precision pulsar timing data with high signal-to-noise pulse profiles in order to clearly show the input pulse profile in the final simulated data product. We note that the use of different telescopes will result in different signal strengths, as would be expected.
This example will follow previous notebook in defining all necessary classes except for `telescope`.
```
# import some useful packages
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# import the pulsar signal simulator
import psrsigsim as pss
```
## The Folded Signal
Here we will use the same `Signal` definitions that have been used in the previous tutorials. We will again simulate a 20-minute-long observation total, with subintegrations of 1 minute. The other simulation parameters will be 64 frequency channels each 12.5 MHz wide (for 800 MHz bandwidth).
We will simulate a real pulsar, J1713+0747, as we have a premade profile for this pulsar. The period, dm, and other relavent pulsar parameters come from the NANOGrav 11-yr data release.
```
# Define our signal variables.
f0 = 1500 # center observing frequecy in MHz
bw = 800.0 # observation MHz
Nf = 64 # number of frequency channels
# We define the pulse period early here so we can similarly define the frequency
period = 0.00457 # pulsar period in seconds for J1713+0747
f_samp = (1.0/period)*2048*10**-6 # sample rate of data in MHz (here 2048 samples across the pulse period)
sublen = 60.0 # subintegration length in seconds, or rate to dump data at
# Now we define our signal
signal_1713_GBT = pss.signal.FilterBankSignal(fcent = f0, bandwidth = bw, Nsubband=Nf, sample_rate = f_samp,
sublen = sublen, fold = True) # fold is set to `True`
```
## The Pulsar and Profiles
Now we will load the pulse profile as in Tutorial 3 and initialize a single `Pulsar` object.
```
# First we load the data array
path = 'psrsigsim/data/J1713+0747_profile.npy'
J1713_dataprof = np.load(path)
# Now we define the data profile
J1713_prof = pss.pulsar.DataProfile(J1713_dataprof)
# Define the values needed for the puslar
Smean = 0.009 # The mean flux of the pulsar, J1713+0747 at 1400 MHz from the ATNF pulsar catatlog, here 0.009 Jy
psr_name = "J1713+0747" # The name of our simulated pulsar
# Now we define the pulsar with the scaled J1713+0747 profiles
pulsar_J1713 = pss.pulsar.Pulsar(period, Smean, profiles=J1713_prof, name = psr_name)
# define the observation length
obslen = 60.0*20 # seconds, 20 minutes in total
```
## The ISM
Here we define the `ISM` class used to disperse the simulated pulses.
```
# Define the dispersion measure
dm = 15.921200 # pc cm^-3
# And define the ISM object, note that this class takes no initial arguements
ism_sim = pss.ism.ISM()
```
## Defining Telescopes
Here we will show how to use the two predefined telescopes, Green Bank and Arecibo, and the systems accociated with them. We will also show how to define a `telescope` from scratch, so that any current or future telescopes and systems can be simulated.
### Predefined Telescopes
We start off by showing the two predefined telescopes.
```
# Define the Green Bank Telescope
tscope_GBT = pss.telescope.telescope.GBT()
# Define the Arecibo Telescope
tscope_AO = pss.telescope.telescope.Arecibo()
```
Each telescope is made up of one or more `systems` consisting of a `Reciever` and a `Backend`. For the predefined telescopes, the systems for the `GBT` are the L-band-GUPPI system or the 800 MHz-GUPPI system. For `Arecibo` these are the 430 MHz-PUPPI system or the L-band-PUPPI system. One can check to see what these systems and their parameters are as we show below.
```
# Information about the GBT systems
print(tscope_GBT.systems)
# We can also find out information about a receiver that has been defined here
rcvr_LGUP = tscope_GBT.systems['Lband_GUPPI'][0]
print(rcvr_LGUP.bandwidth, rcvr_LGUP.fcent, rcvr_LGUP.name)
```
### Defining a new system
One can also add a new system to one of these existing telescopes, similarly to what will be done when define a new telescope from scratch. Here we will add the 350 MHz receiver with the GUPPI backend to the Green Bank Telescope.
First we define a new `Receiver` and `Backend` object. The `Receiver` object needs a center frequency of the receiver in MHz, a bandwidth in MHz to be centered on that center frequency, and a name. The `Backend` object needs only a name and a sampling rate in MHz. This sampling rate should be the maximum sampling rate of the backend, as it will allow lower sampling rates, but not higher sampling rates.
```
# First we define a new receiver
rcvr_350 = pss.telescope.receiver.Receiver(fcent=350, bandwidth=100, name="350")
# And then we want to use the GUPPI backend
guppi = pss.telescope.backend.Backend(samprate=3.125, name="GUPPI")
# Now we add the new system. This needs just the receiver, backend, and a name
tscope_GBT.add_system(name="350_GUPPI", receiver=rcvr_350, backend=guppi)
# And now we check that it has been added
print(tscope_GBT.systems["350_GUPPI"])
```
### Defining a new telescope
We can also define a new telescope from scratch. In addition to needing the `Receiver` and `Backend` objects to define at least one system, the `telescope` also needs the aperture size in meters, the total area in meters^2, the system temperature in kelvin, and a name. Here we will define a small 3-meter aperture circular radio telescope that you might find at a University or somebody's backyard.
```
# We first need to define the telescope parameters
aperture = 3.0 # meters
area = (0.5*aperture)**2*np.pi # meters^2
Tsys = 250.0 # kelvin, note this is not a realistic system temperature for a backyard telescope
name = "Backyard_Telescope"
# Now we can define the telescope
tscope_bkyd = pss.telescope.Telescope(aperture, area=area, Tsys=Tsys, name=name)
```
Now similarly to defining a new system before, we must add a system to our new telescope by defining a receiver and a backend. Since this just represents a little telescope, the system won't be comparable to the previously defined telescope.
```
rcvr_bkyd = pss.telescope.receiver.Receiver(fcent=1400, bandwidth=20, name="Lband")
backend_bkyd = pss.telescope.backend.Backend(samprate=0.25, name="Laptop") # Note this is not a realistic sampling rate
# Add the system to our telecope
tscope_bkyd.add_system(name="bkyd", receiver=rcvr_bkyd, backend=backend_bkyd)
# And now we check that it has been added
print(tscope_bkyd.systems)
```
## Observing with different telescopes
Now that we have three different telescopes, we can observe our simulated pulsar with all three and compare the sensitivity of each telescope for the same initial `Signal` and `Pulsar`. Since the radiometer noise from the telescope is added directly to the signal though, we will need to define two additional `Signals` and create pulses for them before we can observe them with different telescopes.
```
# We define three new, similar, signals, one for each telescope
signal_1713_AO = pss.signal.FilterBankSignal(fcent = f0, bandwidth = bw, Nsubband=Nf, sample_rate = f_samp,
sublen = sublen, fold = True)
# Our backyard telescope will need slightly different parameters to be comparable to the other signals
f0_bkyd = 1400.0 # center frequency of our backyard telescope
bw_bkyd = 20.0 # Bandwidth of our backyard telescope
Nf_bkyd = 1 # only process one frequency channel 20 MHz wide for our backyard telescope
signal_1713_bkyd = pss.signal.FilterBankSignal(fcent = f0_bkyd, bandwidth = bw_bkyd, Nsubband=Nf_bkyd, \
sample_rate = f_samp, sublen = sublen, fold = True)
# Now we make pulses for all three signals
pulsar_J1713.make_pulses(signal_1713_GBT, tobs = obslen)
pulsar_J1713.make_pulses(signal_1713_AO, tobs = obslen)
pulsar_J1713.make_pulses(signal_1713_bkyd, tobs = obslen)
# And disperse them
ism_sim.disperse(signal_1713_GBT, dm)
ism_sim.disperse(signal_1713_AO, dm)
ism_sim.disperse(signal_1713_bkyd, dm)
# And now we observe with each telescope, note the only change is the system name. First the GBT
tscope_GBT.observe(signal_1713_GBT, pulsar_J1713, system="Lband_GUPPI", noise=True)
# Then Arecibo
tscope_AO.observe(signal_1713_AO, pulsar_J1713, system="Lband_PUPPI", noise=True)
# And finally our little backyard telescope
tscope_bkyd.observe(signal_1713_bkyd, pulsar_J1713, system="bkyd", noise=True)
```
Now we can look at the simulated data and compare the sensitivity of the different telescopes. We first plot the observation from the GBT, then Arecibo, and then our newly defined backyard telescope.
```
# We first plot the first two pulses in frequency-time space to show the undispersed pulses
time = np.linspace(0, obslen, len(signal_1713_GBT.data[0,:]))
# Since we know there are 2048 bins per pulse period, we can index the appropriate amount
plt.plot(time[:4096], signal_1713_GBT.data[0,:4096], label = signal_1713_GBT.dat_freq[0])
plt.plot(time[:4096], signal_1713_GBT.data[-1,:4096], label = signal_1713_GBT.dat_freq[-1])
plt.ylabel("Intensity")
plt.xlabel("Time [s]")
plt.legend(loc = 'best')
plt.title("L-band GBT Simulation")
plt.show()
plt.close()
# And the 2-D plot
plt.imshow(signal_1713_GBT.data[:,:4096], aspect = 'auto', interpolation='nearest', origin = 'lower', \
extent = [min(time[:4096]), max(time[:4096]), signal_1713_GBT.dat_freq[0].value, signal_1713_GBT.dat_freq[-1].value])
plt.ylabel("Frequency [MHz]")
plt.xlabel("Time [s]")
plt.colorbar(label = "Intensity")
plt.show()
plt.close()
# Since we know there are 2048 bins per pulse period, we can index the appropriate amount
plt.plot(time[:4096], signal_1713_AO.data[0,:4096], label = signal_1713_AO.dat_freq[0])
plt.plot(time[:4096], signal_1713_AO.data[-1,:4096], label = signal_1713_AO.dat_freq[-1])
plt.ylabel("Intensity")
plt.xlabel("Time [s]")
plt.legend(loc = 'best')
plt.title("L-band AO Simulation")
plt.show()
plt.close()
# And the 2-D plot
plt.imshow(signal_1713_AO.data[:,:4096], aspect = 'auto', interpolation='nearest', origin = 'lower', \
extent = [min(time[:4096]), max(time[:4096]), signal_1713_AO.dat_freq[0].value, signal_1713_AO.dat_freq[-1].value])
plt.ylabel("Frequency [MHz]")
plt.xlabel("Time [s]")
plt.colorbar(label = "Intensity")
plt.show()
plt.close()
# Since we know there are 2048 bins per pulse period, we can index the appropriate amount
plt.plot(time[:4096], signal_1713_bkyd.data[0,:4096], label = "1400.0 MHz")
plt.ylabel("Intensity")
plt.xlabel("Time [s]")
plt.legend(loc = 'best')
plt.title("L-band Backyard Telescope Simulation")
plt.show()
plt.close()
```
We can see that, as expected, the Arecibo telescope is more sensitive than the GBT when observing over the same timescale. We can also see that even though the simulated pulsar here is easily visible with these large telescopes, our backyard telescope is not able to see the pulsar over the same amount of time, since the output is pure noise. The `PsrSigSim` can be used to determine the approximate sensitivity of an observation of a simulated pulsar with any given telescope that can be defined.
### Note about randomly generated pulses and noise
`PsrSigSim` uses `numpy.random` under the hood in order to generate the radio pulses and various types of noise. If a user desires or requires that this randomly generated data is reproducible we recommend using a call to the seed generator native to `Numpy` before calling the function that produces the random noise/pulses. Newer versions of `Numpy` are moving toward slightly different [functionality/syntax](https://numpy.org/doc/stable/reference/random/index.html), but are essentially used in the same way.
```
numpy.random.seed(1776)
pulsar_1.make_pulses(signal_1, tobs=obslen)
```
|
github_jupyter
|
```
import glob
import os
import sys
import struct
import pandas as pd
from nltk.tokenize import sent_tokenize
from tensorflow.core.example import example_pb2
sys.path.append('../src')
import data_io, params, SIF_embedding
def return_bytes(reader_obj):
len_bytes = reader_obj.read(8)
str_len = struct.unpack('q', len_bytes)[0]
e_s = struct.unpack("%ds" % str_len, reader_obj.read(str_len))
es = e_s[0]
c = example_pb2.Example.FromString(es)
article = str(c.features.feature['article'].bytes_list.value[0])
abstract = str(c.features.feature['abstract'].bytes_list.value[0])
ab = sent_tokenize(abstract)
clean_article = sent_tokenize(article)
clean_abstract = '. '.join([' '.join(s for s in x.split() if s.isalnum()) for x in ''.join(ab).replace("<s>","").split("</s>")]).strip()
return clean_abstract, clean_article, abstract
def load_embed(wordfile, weightfile, weightpara=1e-3, param=None, rmpc=0):
'''
wordfile: : location of embedding data (e.g., glove embedings)
weightfile: : location of TF data for words
weightpara: : the parameter in the SIF weighting scheme, usually in range [3e-5, 3e-3]
rmpc: : number of principal components to remove in SIF weighting scheme
'''
# input
wordfile = '/home/francisco/GitHub/SIF/data/glove.840B.300d.txt' # word vector file, can be downloaded from GloVe website
weightfile = '/home/francisco/GitHub/SIF/auxiliary_data/enwiki_vocab_min200.txt' # each line is a word and its frequency
# load word vectors
(words, Weights) = data_io.getWordmap(wordfile)
# load word weights
word2weight = data_io.getWordWeight(weightfile, weightpara) # word2weight['str'] is the weight for the word 'str'
weight4ind = data_io.getWeight(words, word2weight) # weight4ind[i] is the weight for the i-th word
# set parameters
param.rmpc = rmpc
return Weights, words, word2weight, weight4ind
def return_sif(sentences, words, weight4ind, param, Weights):
# x is the array of word indices, m is the binary mask indicating whether there is a word in that location
x, m = data_io.sentences2idx(sentences, words)
w = data_io.seq2weight(x, m, weight4ind) # get word weights
# get SIF embedding
embeddings = SIF_embedding.SIF_embedding(Weights, x, w, param) # embedding[i,:] is the embedding for sentence i
return embeddings
def embed_sentences(wordfile, weightfile, weightpara, param, rmpc, file_list):
Weights, words, word2weight, weight4ind = load_embed(wordfile, weightfile, weightpara, param, rmpc)
print('embeddings loaded...')
for file_i in file_list:
input_file = open(file_i, 'rb')
while input_file:
clean_abstract, clean_article = return_bytes(input_file)
clean_article = [' '.join([s for s in x if s.isalnum()]) for x in sdf['sentence'].str.split(" ")]
print('article cleaned...')
embeddings = return_sif(clean_article, words, weight4ind, param, Weights)
sdf = pd.DataFrame(clean_article, columns=['sentence'])
sdf['clean_sentence'] = [' '.join([s for s in x if s.isalnum()]) for x in sdf['sentence'].str.split(" ")]
sdf['summary'] = clean_abstract
sdf.ix[1:, 'summary'] = ''
embcols = ['emb_%i'%i for i in range(embeddings.shape[1])]
emb = pd.DataFrame(embeddings, columns = embcols)
sdf = pd.concat([sdf, emb], axis=1)
sdf = sdf[[sdf.columns[[2, 0, 1]].tolist() + sdf.columns[3:].tolist()]]
print(sdf.head())
break
break
myparams = params.params()
mainpath = 'home/francisco/GitHub/SIF/'
wordf = os.path.join(mainpath, 'data/glove.840B.300d.txt')
weightf = os.path.join(mainpath, 'auxiliary_data/enwiki_vocab_min200.txt')
wp = 1e-3
rp = 0
fl = ['/home/francisco/GitHub/cnn-dailymail/finished_files/chunked/train_000.bin']
wordfile, weightfile, weightpara, param, rmpc, file_list = wordf, weightf, wp, myparams, rp, fl
Weights, words, word2weight, weight4ind = load_embed(wordfile, weightfile, weightpara, param, rmpc)
clean_abstract
print('embeddings loaded...')
for file_i in file_list:
input_file = open(file_i, 'rb')
while input_file:
clean_abstract, clean_article, abstractx = return_bytes(input_file)
print('article cleaned...')
embeddings = return_sif(clean_article, words, weight4ind, param, Weights)
sdf = pd.DataFrame(clean_article, columns=['sentence'])
sdf['clean_sentence'] = [' '.join([s for s in x if s.isalnum()]) for x in sdf['sentence'].str.split(" ")]
sdf['summary'] = clean_abstract
sdf.ix[1:, 'summary'] = ''
embcols = ['emb_%i'%i for i in range(embeddings.shape[1])]
emb = pd.DataFrame(embeddings, columns = embcols)
sdf = pd.concat([sdf, emb], axis=1)
sdf = sdf[['summary', 'sentence', 'clean_sentence'] + sdf.columns[3:].tolist()].head()
print(sdf.head())
break
break
clean_abstract
abstractx
sdf['sentence'][0].split(" ")[0]
dfile = "/home/francisco/GitHub/DQN-Event-Summarization/SIF/data/metadata/cnn_dm_metadata.csv"
md = pd.read_csv(dfile)
md.head()
md.shape
md.describe()
import matplotlib.pyplot as plt
from sklearn.neighbors.kde import KernelDensity
import numpy as np
def cdfplot(xvar):
sortedvals=np.sort( xvar)
yvals=np.arange(len(sortedvals))/float(len(sortedvals))
plt.plot( sortedvals, yvals )
plt.grid()
plt.show()
%matplotlib inline
cdfplot(md['nsentences'])
cdfplot(md['sentences_nchar'])
cdfplot(md['summary_ntokens'])
```
|
github_jupyter
|
## FCLA/FNLA Fast.ai Numerical/Computational Linear Algebra
### Lecture 3: New Perspectives on NMF, Randomized SVD
Notes / In-Class Questions
WNixalo - 2018/2/8
Question on section: [Truncated SVD](http://nbviewer.jupyter.org/github/fastai/numerical-linear-algebra/blob/master/nbs/2.%20Topic%20Modeling%20with%20NMF%20and%20SVD.ipynb#More-Details)
Given A: `m` x `n` and Q: `m` x `r`; is Q the identity matrix?
A≈QQTA
```
import torch
import numpy as np
Q = np.eye(3)
print(Q)
print(Q.T)
print(Q @ Q.T)
# construct I matrix
Q = torch.eye(3)
# torch matrix multip
# torch.mm(Q, Q.transpose)
Q @ torch.t(Q)
```
So if A is *approx equal* to Q•Q.T•A .. but *not* equal.. then Q is **not** the identity, but is very close to it.
Oh, right. Q: m x r, **not** m x m...
If both the columns and rows of Q had been orthonormal, then it would have been the Identity, but only the columns (r) are orthonormal.
Q is a tall, skinny matrix.
---
AW gives range(A). AW has far more rows than columns ==> in practice these columns are approximately orthonormal (v.unlikely to get lin-dep cols when choosing random values).
QR decomposition is foundational to Numerical Linear Algebra.
Q consists of orthonormal columns, R is upper-triangular.
**Calculating Truncated-SVD:**
1\. Compute approximation to range(A). We want Q with r orthonormal columns such that $$A\approx QQ^TA$$
2\. Construct $B = Q^T A$, which is small ($r\times n$)
3\. Compute the SVD of $B$ by standard methods (fast since $B$ is smaller than $A$): $B = S\, Σ V^T$
4\. Since: $$A \approx QQ^TA = Q(S \, ΣV^T)$$ if we set $U = QS$, then we have a low rank approximation $A \approx UΣV^T$.
**How to choose $r$?**
If we wanted to get 5 cols from a matrix of 100 cols, (5 topics). As a rule of thumb, let's go for 15 instead. You don't want to explicitly pull exactly the amount you want due to the randomized component being present, so you add some buffer.
Since our projection is approximate, we make it a little bigger than we need.
**Implementing Randomized SVD:**
First we want a randomized range finder.
```
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn import decomposition
from scipy import linalg
import matplotlib.pyplot as plt
%matplotlib inline
np.set_printoptions(suppress=True)
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
remove = ('headers', 'footers', 'quotes')
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories, remove=remove)
# newsgroups_test = fetch_20newsgroups(subset='test', categories=categories, remove=remove)
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
vectorizer = CountVectorizer(stop_words='english')
vectors = vectorizer.fit_transform(newsgroups_train.data).todense() # (documents, vocab)
vocab = np.array(vectorizer.get_feature_names())
num_top_words=8
def show_topics(a):
top_words = lambda t: [vocab[i] for i in np.argsort(t)[:-num_top_words-1:-1]]
topic_words = ([top_words(t) for t in a])
return [' '.join(t) for t in topic_words]
# computes an orthonormal matrix whose range approximates the range of A
# power_iteration_normalizer can be safe_sparse_dot (fast but unstable), LU (imbetween), or QR (slow but most accurate)
def randomized_range_finder(A, size, n_iter=5):
# randomly init our Mat to our size; size: num_cols
Q = np.random.normal(size=(A.shape[1], size))
# LU decomp (lower triang * upper triang mat)
# improves accuracy & normalizes
for i in range(n_iter):
Q, _ = linalg.lu(A @ Q, permute_l=True)
Q, _ = linalg.lu(A.T @ Q, permute_l=True)
# QR decomp on A & Q
Q, _ = linalg.qr(A @ Q, mode='economic')
return Q
```
Randomized SVD method:
```
def randomized_svd(M, n_components, n_oversamples=10, n_iter=4):
# number of random columns we're going to create is the number of
# columns we want + number of oversamples (extra buffer)
n_random = n_components + n_oversamples
Q = randomized_range_finder(M, n_random, n_iter)
# project M to the (k + p) dimensional space using basis vectors
B = Q.T @ M
# compute SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = Q @ Uhat
# return the number of components we want from U, s, V
return U[:, :n_components], s[:n_components], V[:n_components, :]
%time u, s, v = randomized_svd(vectors, 5)
u.shape, s.shape, v.shape
show_topics(v)
```
Computational Complexity for a M`x`N matrix in SVD is $M^2N+N^3$, so Randomized (Truncated?) SVD is a *massive* improvement.
---
2018/3/7
Write a loop to calculate the error of your decomposition as your vary the # of topics. Plot the results.
```
# 1. how do I calculate decomposition error?:
# I guess I'll use MSE?
# # NumPy: # https://stackoverflow.com/questions/16774849/mean-squared-error-in-numpy
# def MSEnp(A,B):
# if type(A) == np.ndarray and type(B) == np.ndarray:
# return ((A - B) ** 2).mean()
# else:
# return np.square((A - B)).mean()
# Scikit-Learn:
from sklearn import metrics
MSE = metrics.mean_squared_error # usg: mse(A,B)
# 2. Now how to recompose my decomposition?:
%time B = vectors # original matrix
%time U, S, V = randomized_svd(B, 10) # num_topics = 10
# S is vector of Σ's singular values. Convert back to matrix:
%time Σ = S * np.eye(S.shape[0])
# from SVD formula: A ≈ U@Σ@V.T
%time A = U@Σ@V ## apparently randomized_svd returns V.T, not V ?
# 3. Finally calculated error I guess:
%time mse_error = MSE(A,B)
print(mse_error)
# Im putting way too much effort into this lol
def fib(n):
if n <= 1:
return n
else:
f1 = 1
f2 = 0
for i in range(n):
t = f1 + f2
tmp = f2
f2 += f1
f1 = tmp
return t
for i,e in enumerate(num_topics):
print(f'Topics: {num_topics[i]:>3} ',
f'Time: {num_topics[i]:>3}')
## Setup
import time
B = vectors
num_topics = [fib(i) for i in range(2,14)]
TnE = [] # time & error
## Loop:
for n_topics in num_topics:
t0 = time.time()
U, S, Vt = randomized_svd(B, n_topics)
Σ = S * np.eye(S.shape[0])
A = U@Σ@Vt
TnE.append([time.time() - t0, MSE(A,B)])
for i, tne in enumerate(TnE):
print(f'Topics: {num_topics[i]:>3} '
f'Time: {np.round(tne[0],3):>3} '
f'Error: {np.round(tne[1],12):>3}')
# https://matplotlib.org/users/pyplot_tutorial.html
plt.plot(num_topics, [tne[1] for tne in TnE])
plt.xlabel('No. Topics')
plt.ylabel('MSE Error')
plt.show()
## R.Thomas' class solution:
step = 20
n = 20
error = np.zeros(n)
for i in range(n):
U, s, V = randomized_svd(vectors, i * step)
reconstructed = U @ np.diag(s) @ V
error[i] = np.linalg.norm(vectors - reconstructed)
plt.plot(range(0,n*step,step), error)
```
Looks like she used the Norm instead of MSE. Same curve shape.
Here's why I used the fibonacci sequence for my topic numbers. This solution took much longer than mine (i=20 vs i=12) with more steps, yet mine appears smoother. Why? I figured this was the shape of curve I'd get: ie interesting bit is in the beginning, so I used a number sequence that spread out as you went so you'd get higher resolution early on. Yay.
---
**NOTE**: random magical superpower Machine Learning Data Analytics *thing*: ***Johnson-Lindenstrauss lemma***:
basically if you have a matrix with too many columns to work with (leading to overfitting or w/e else), multiple it by some random (square?) matrix and you'll preserve its properties but in a workable shape
https://en.wikipedia.org/wiki/Johnson-Lindenstrauss_lemma
|
github_jupyter
|
# USDA Unemployment
<hr>
```
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
```
# Data
## US Unemployment data by county
Economic Research Service
U.S. Department of Agriculture
link:
### Notes
- Year 2020, Median Household Income (2019), & '% of State Median HH Income had 78 Nan Values that are all from Puerto Rico.
- I am going to drop all rows from Puerto Rico, Puerto Rico does not show up in any of the other USDA data. If we want it back in, it will be easy to re-add the Puerto Rico data.
## Contants
<hr>
```
stats_master_list = ['Vermont',
'Mississippi',
'Maine',
'Montana',
'Washington',
'District of Columbia',
'Texas',
'Alabama',
'Michigan',
'Maryland',
'Rhode Island',
'South Dakota',
'Nebraska',
'Virginia',
'Florida',
'Utah',
'Louisiana',
'Missouri',
'Massachusetts',
'South Carolina',
'Pennsylvania',
'Tennessee',
'Minnesota',
'Idaho',
'Alaska',
'Oklahoma',
'North Dakota',
'Arkansas',
'Georgia',
'New Hampshire',
'Indiana',
'Puerto Rico',
'New Jersey',
'Delaware',
'West Virginia',
'Colorado',
'New York',
'Kansas',
'Arizona',
'Ohio',
'Hawaii',
'Illinois',
'Oregon',
'North Carolina',
'California',
'Kentucky',
'Wyoming',
'Iowa',
'Nevada',
'Connecticut',
'Wisconsin',
'New Mexico']
# column Names
columns = [ 'FIPS ', 'Name',
'2012', 2013,
2014, 2015,
2016, 2017,
2018, 2019,
'2020', 'Median Household Income (2019)',
'% of State Median HH Income']
"""
Duplicate check 3
from
https://thispointer.com/python-3-ways-to-check-if-there-are-duplicates-in-a-list/
"""
def checkIfDuplicates_3(listOfElems):
''' Check if given list contains any duplicates '''
for elem in listOfElems:
if listOfElems.count(elem) > 1:
return True
return False
```
## File managment
<hr>
```
files = os.listdir("../data_raw/USDA_gov-unemplyment/")
# remove mac file
files.remove('.DS_Store')
#files
```
# Example of the csv files
<hr>
```
# random peek
df = pd.read_excel('../data_raw/USDA_gov-unemplyment/UnemploymentReport (14).xlsx', skiprows=2)
df.shape
df.head()
df.tail()
```
# Create master DataFrame
<hr>
```
# Concat
# create master file
master_df = pd.DataFrame(columns = columns)
state_name_list = []
# LOOP
for file in files:
# read excel file
_df = pd.read_excel('../data_raw/USDA_gov-unemplyment/'+file, skiprows=2)
# read state_name
state_name = _df.iloc[0,1]
# DROP
#drop row 0
_df.drop(0, inplace = True)
# Drop last 2 rows
_df.drop(_df.tail(1).index, inplace = True)
# work around to drop NaN column
_temp_df = _df.iloc[:,0:12]
# work around to drop NaN column
_temp_df['% of State Median HH Income'] = _df['% of State Median HH Income']
# add Column for STATE name
# add state column
_temp_df['state'] = state_name
state_name_list.append(state_name)
# Concat
master_df = pd.concat([master_df, _temp_df])
```
<br>
## Dataframe clean up
<hr>
```
# reset Index
master_df.reset_index(drop = True, inplace = True )
master_df.columns
# Rename columns
master_df.rename(columns = {'FIPS ':'FIPS'}, inplace = True)
# shape
master_df.shape
master_df.head()
```
## Remove rows with all nan's
<hr>
```
master_df.isna().sum()
master_df[ master_df['FIPS'].isnull()].head()
nan_rows = master_df[ master_df['FIPS'].isnull()].index
nan_rows
len(nan_rows)
# remove rows with all Nans
master_df.drop(nan_rows, inplace = True)
master_df.isna().sum()
master_df[ master_df['2020'].isnull()].iloc[20:25,:]
```
- There are 78 rows that do have nans for 2020,
- all of the Remaing rows with nan's are form Puerto Rico
- I am going to remove the Nans from Puerto Rico because the other USDA data sets do not have Puerto Rico
```
master_df[ master_df['state'] == 'Puerto Rico' ].index
# Drop all Rows with state as Puerto Rico
index_names = master_df[ master_df['state'] == 'Puerto Rico' ].index
master_df.drop(index_names, inplace = True)
master_df.drop([], inplace = True )
master_df.isna().sum()
master_df.shape
```
<br>
# Sanity Check
<hr>
```
# unique Count of stats
master_df['state'].nunique()
len(state_name_list)
# checks if there are duplicates in state list
checkIfDuplicates_3(state_name_list)
master_df['state'].nunique()
```
# Write to CSV
<hr>
```
master_df.to_csv('../data/USDA/USDA_unemployment.csv', index=False)
master_df.shape
```
<br>
# EDA
```
master_df.shape
master_df.head(2)
plt.figure(figsize = (17, 17))
sns.scatterplot(data = master_df, x = '2020', y = "Median Household Income (2019)", hue = 'state');
plt.xlabel("% of unemployment")
plt.title("% of Unemployment by Household Median income 2019")
set(master_df['FIPS'])
```
|
github_jupyter
|
```
import pandas as pd
import bs4 as bs
dfs=pd.read_html('https://en.wikipedia.org/wiki/Research_stations_in_Antarctica#List_of_research_stations')
dfr=pd.read_html('https://en.wikipedia.org/wiki/Antarctic_field_camps')
df=dfs[1][1:]
df.columns=dfs[1].loc[0].values
df.to_excel('bases.xlsx')
import requests
url='https://en.wikipedia.org/wiki/Research_stations_in_Antarctica'
f=requests.get(url).content
soup = bs.BeautifulSoup(f, 'lxml')
parsed_table = soup.find_all('table')[1]
data = [[''.join(td.strings)+'#'+td.a['href'] if td.find('a') else
''.join(td.strings)
for td in row.find_all('td')]
for row in parsed_table.find_all('tr')]
headers=[''.join(row.strings)
for row in parsed_table.find_all('th')]
df = pd.DataFrame(data[1:], columns=headers)
stations=[]
for i in df.T.iteritems():
helper={}
dummy=i[1][0].split('#')
dummy0=dummy[0].split('[')[0].replace('\n',' ').replace('\n',' ').replace('\n',' ')
helper['name']=dummy0
helper['link']='https://en.wikipedia.org'+dummy[1]
dummy=i[1][2].replace('\n',' ').replace('\n',' ').replace('\n',' ')
if 'ummer since' in dummy:dummy='Permanent'
dummy=dummy.split('[')[0]
if 'emporary summer' in dummy:dummy='Summer'
if 'intermittently Summer' in dummy:dummy='Summer'
helper['type']=dummy
dummy=i[1][3].split('#')[0].replace('\n',' |').replace(']','').replace('| |','|')[1:]
if '' == dummy:dummy='Greenpeace'
helper['country']=dummy
dummy=i[1][4].replace('\n',' ').replace('\n',' ').replace('\n',' ').split(' ')[0]
if 'eteo' in dummy:dummy='1958'
helper['opened']=dummy
dummy=i[1][5].split('#')[0].replace('\n',' | ').replace('| and |','|').split('[')[0].replace('.','')
helper['program']=dummy
dummy=i[1][6].split('#')[0].replace('\n',', ').replace('| and |','|').split('[')[0].replace('.','')
helper['location']=dummy
dummy=i[1][7].replace('\n',' ')
if ' ' in dummy:
if 'Active' in dummy: dummy='Active'
elif 'Relocated to Union Glacier' in dummy: dummy='2014'
elif 'Unmanned activity' in dummy: dummy='Active'
elif 'Abandoned and lost' in dummy: dummy='1999'
elif 'Dismantled 1992' in dummy: dummy='1992'
elif 'Temporary abandoned since March 2017' in dummy: dummy='Active'
elif 'Reopened 23 November 2017' in dummy: dummy='Active'
elif 'Abandoned and lost' in dummy: dummy='1999'
else: dummy=dummy.split(' ')[1]
if dummy=='Active':
helper['active']=True
helper['closed']='9999'
else:
helper['active']=False
helper['closed']=dummy
if dummy=='Closed':
helper['active']=True
helper['closed']='9999'
dummy=i[1][8].replace('\n',', ').split('/')[2].split('(')[0].split('#')[0].split(',')[0].split('Coor')[0].split(u'\ufeff')[0].split(';')
helper['latitude']=dummy[0][1:]
helper['longitude']=dummy[1][1:]#.replace(' 0',' 0.001')[1:]
stations.append(helper)
dta=pd.DataFrame(stations)
dta.to_excel('stations.xlsx')
import cesiumpy
dta
iso2=pd.read_html('https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2')[2]
iso22=iso2[1:].set_index(1)[[0]]
def cc(c):
d=c.split('|')[0].strip()
if d=='Czech Republic': return 'CZ'
elif d=='Greenpeace': return 'AQ'
elif d=='Soviet Union': return 'RU'
elif d=='Russia': return 'RU'
elif d=='United States': return 'US'
elif d=='East Germany': return 'DE'
elif d=='United Kingdom': return 'GB'
elif d=='South Korea': return 'KR'
else: return iso22.loc[d][0]
flags=[]
for i in dta['country']:
flags.append('flags/glass2/'+cc(i).lower()+'.png')
dta['flag']=flags
dta[['name','link','active','type']].to_excel('links.xlsx')
```
Manually filled pop.xlsx
```
pop=pd.read_excel('pop.xlsx')
dta['summer']=pop['summer']
dta['winter']=pop['winter']
dta.to_excel('alldata.xlsx')
dta.set_index('name').T.to_json('antarctica.json')
v = cesiumpy.Viewer(animation=False, baseLayerPicker=True, fullscreenButton=True,
geocoder=False, homeButton=False, infoBox=True, sceneModePicker=True,
selectionIndicator=True, navigationHelpButton=False,
timeline=False, navigationInstructionsInitiallyVisible=True)
x=dta[dta['active']]
for i, row in x.iterrows():
r=0.7
t=10000
lon=float(row['longitude'])
lat=float(row['latitude'])
l0 = float(1**r)*t
cyl = cesiumpy.Cylinder(position=[lon, lat, l0/2.], length=l0,
topRadius=2.5e4, bottomRadius=2.5e4, material='grey',\
name=row['name'])
v.entities.add(cyl)
l1 = (float(row['summer'])**r)*t
cyl = cesiumpy.Cylinder(position=[lon, lat, l1/2.], length=l1*1.1,
topRadius=3e4, bottomRadius=3e4, material='crimson',\
name=row['name'])
v.entities.add(cyl)
l2 = float(row['winter']**r)*t
cyl = cesiumpy.Cylinder(position=[lon, lat, l2/2.], length=l2*1.2,
topRadius=6e4, bottomRadius=6e4, material='royalBlue',\
name=row['name'])
v.entities.add(cyl)
pin = cesiumpy.Pin.fromText(row['name'], color=cesiumpy.color.GREEN)
b = cesiumpy.Billboard(position=[float(row['longitude']), float(row['latitude']), l1*1.1+70000], \
image = row['flag'], scale=0.6,\
name=row['name'], pixelOffset = (0,0))
v.entities.add(b)
label = cesiumpy.Label(position=[float(row['longitude']), float(row['latitude']), l1*1.1+70000],\
text=row['name'], scale=0.6, name=row['name'],
pixelOffset = (0,22))
v.entities.add(label)
with codecs.open("index.html", "w", encoding="utf-8") as f:
f.write(v.to_html())
v
```
|
github_jupyter
|
# Python Collections
* Lists
* Tuples
* Dictionaries
* Sets
## lists
```
x = 10
x = 20
x
x = [10, 20]
x
x = [10, 14.3, 'abc', True]
x
print(dir(x))
l1 = [1, 2, 3]
l2 = [4, 5, 6]
l1 + l2 # concat
l3 = [1, 2, 3, 4, 5, 6]
l3.append(7)
l3
l3.count(2)
l3.count(8)
len(l3)
sum(l3), max(l3), min(l3)
l1
l2
l_sum = [] # l_sum = list()
if len(l1) == len(l2):
for i in range(len(l1)):
l_sum.append(l1[i] + l2[i])
l_sum
zip(l1, l2)
list(zip(l1, l2))
list(zip(l1, l3))
l_sum = [a + b for a,b in zip(l1, l2)]
l_sum
l_sum = [a + b for a,b in zip(l1, l3)]
l_sum
l3
l_sum.extend(l3[len(l_sum):])
l_sum
```
## tuple
tupe is immutable list
```
point = (3, 5)
print(dir(point))
l1[0]
point[0]
```
## comparison in tuples
```
(2, 3) > (1, 7)
(1, 4) > (5, 9)
(1, 10) > (5, 9)
(5, 10) > (5, 9)
(5, 7) > (5, 9)
```
## dictionaries
```
s = [134, 'Ahmed', 'IT']
s[1]
s[2]
# dic = {k:v, k:v, ....}
student = {'id' : 123, 'name': 'Ahmed', 'dept': 'IT'}
student
student['name']
print(dir(student))
student['age']
if 'age' in student:
print(student['age'])
student['age']
student['age'] = 22 # add item
student
student['age'] = 24 # update item
student
student.get('gpa')
print(student.get('gpa'))
student.get('gpa', 0)
student.get('address', 'NA')
student.items()
student.keys()
student.values()
gpa = student.pop('age')
gpa
student
item
student
```
### set
```
set1 = {'a', 'b', 'c'}
print(dir(set1))
set1.add('d')
set1
set1.add('a')
set1
'a' in set1
for e in set1:
print(e)
```
## count word freq
```
text = '''
middletons him says Garden offended do shoud asked or ye but narrow are first knows but going taste by six zealously said weeks come partiality great simplicity mr set By sufficient an blush enquire of Then projection into mean county mile garden with up people should shameless little Started get bed agreement get him as get around mrs wound next was Full might nay going totally four can happy may packages dwelling sent on face newspaper laughing off a one Houses wont on on thing hundred is he it forming humoured Rose at seems but Likewise supposing too poor good from get ye terminated fact when horrible am ye painful for it His good ask valley too wife led offering call myself favour we Sportsman to get remaining By ye on will be Thoughts carriage wondered in end her met about other me time position and his unknown first explained it breakfast are she draw of september keepf she mr china simple sing Nor would be how came Chicken them so an answered cant how or new and mother Total such knew perceived here does him you no Money warmly wholly people dull formerly an simplicity What pianoforte all favourite at wants doubtful incommode delivered Express formerly as uneasy silent am dear saw why put built had weddings for ought ecstatic he to must as forming like no boy understood use pleasure agreeable Felicity mirth had near yet attention at mean decisively need one mirth should denoting have she now juvenile dried an society speaking entreaties ten you am am pianoforte therefor friendship old no whom in many children law drawn eat views The set my lady will him could Inquietude desirous valley terms few Sir things Preferred though pleasant know then those down these means set garret formed in questions though Melancholy pure preserved strictly curiosity otherwise So oh above offices he who reasonably within she no concluded weeks met On like saw relation design for is because are disposed apartments We yet more an want stop Recommend ham believe who it can in appearance valley they melancholy besides remove ought genius up has Am excited Goodness latter directly my agreed questions case why check moment dine got put next he so steepest held again evening doubt wish not village six contented him indeed if Dashwood wholly so something Depending and all over wooded He mrs like nor forming little that so mrs greatest friendly of if having this you joy entire mrs can this really since Collected by Entrance rapid took up Hearts His newspaper tended so right through fat so An body exercise speedily warmth remarkably strongly disposing need in trifling stood led hence assured of in one He out an of had over to begin been really On do to fulfilled just Evil friends in so mrs do on Prepared neither was west if Could come The his finished own being it pretty may Continuing Spite performed half peculiar true begin disposal west Remain barton Nay unsatiable over gay out as new be True you humoured u old money excuse does what once Subjects it you two Can post kept temper Welcomed had not prudent on although there announcing after via right giving has mr simplicity speaking reserved by ask snug rapturous say at so Direct where wrong since matter very in Visited passed by him Polite itself she between thus concealed shy against Written juvenile explained no Ham expense as packages produce today until why way wife Home on joy its said reserved in Hard sake suspected mr mr plan still at an Led ample their no indeed miss or jennings my Her back has an are an jokes its Dejection she ye roof early we true up he said they prevailed real continual merely our no to in but why expense felt less true Rich yesterday Admitting put stronger drawings now the shortly gay wished whole easily fine compliment Answer yet mean am see departure Necessary found feeling Not existence make compact for his oh now sufficient Neglected men hence happening high part Off message inhabiting strangers on do during Unpleasant any Entered advice great he Projecting be mutual bad Our make did i our in pleasure elsewhere wish material become out length uneasy some offending suitable misery dull ecstatic yet accused leave had Oh suitable ecstatic ten are throwing guest he so felicity you how every residence deal besides attacks estimating bred Mrs hearing blessing nay ago than favourable middleton water stronger barton match steepest or or situation Winter much two yet songs me only thanks no though of do Handsome aften hope Own your dependent up Attended her making come ya do Rich Dear
'''
len(text) # num of chars
len(text.split()) # num of words. whitespace is the delimter
words = text.split()
words[:10]
len(text.split('\n')) # num of lines
count_dict = dict()
for word in words:
if word in count_dict:
count_dict[word] += 1
else:
count_dict[word] = 1
#count_dict
sum(count_dict.values())
count_dict = dict()
for word in words:
count_dict[word] = count_dict.get(word, 0) + 1
sum(count_dict.values())
print(sorted(count_dict.items()))
sorted(count_dict.values(), reverse=True)
r_count_dict = [ (v, k) for k,v in count_dict.items()]
sorted(r_count_dict, reverse=True)[:10]
def find_all_indeces(words, keyword):
postions = []
for i in range(len(words)):
if words[i] == keyword:
postions.append(i)
return postions
find_all_indeces(words, 'suitable')
```
|
github_jupyter
|
# Generate and Perform Tiny Performances from the MDRNN
- Generates unconditioned and conditioned output from RoboJam's MDRNN
- Need to open `touchscreen_performance_receiver.pd` in [Pure Data](http://msp.ucsd.edu/software.html) to hear the sound of performances.
- To test generated performances, there need to be example performances in `.csv` format in `../performances`. These aren't included in the repo right now, but might be updated in future.
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# little path hack to get robojam from one directory up in the filesystem.
from context import * # imports robojam
# import robojam # alternatively do this.
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
```
## Plotting Methods
Mainly using "plot_and_perform" method to generate 2D and 3D plots.
```
input_colour = 'darkblue'
gen_colour = 'firebrick'
plt.style.use('seaborn-talk')
osc_client = robojam.TouchScreenOscClient()
def plot_2D(perf_df, name="foo", saving=False):
"""Plot in 2D"""
## Plot the performance
swipes = divide_performance_into_swipes(perf_df)
plt.figure(figsize=(8, 8))
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
plt.ylim(1.0,0)
plt.xlim(0,1.0)
plt.xticks([])
plt.yticks([])
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_double_2d(perf1, perf2, name="foo", saving=False):
"""Plot two performances in 2D"""
plt.figure(figsize=(8, 8))
swipes = divide_performance_into_swipes(perf1)
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=input_colour, linewidth=5.0)
swipes = divide_performance_into_swipes(perf2)
for swipe in swipes:
p = plt.plot(swipe.x, swipe.y, 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
plt.ylim(1.0,0)
plt.xlim(0,1.0)
plt.xticks([])
plt.yticks([])
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_3D(perf_df, name="foo", saving=False):
"""Plot in 3D"""
## Plot in 3D
swipes = divide_performance_into_swipes(perf_df)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for swipe in swipes:
p = ax.plot(list(swipe.index), list(swipe.x), list(swipe.y), 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
ax.set_ylim(0,1.0)
ax.set_zlim(1.0,0)
ax.set_xlabel('time (s)')
ax.set_ylabel('x')
ax.set_zlabel('y')
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_double_3d(perf1, perf2, name="foo", saving=False):
"""Plot two performances in 3D"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
swipes = divide_performance_into_swipes(perf1)
for swipe in swipes:
p = ax.plot(list(swipe.index), list(swipe.x), list(swipe.y), 'o-')
plt.setp(p, color=input_colour, linewidth=5.0)
swipes = divide_performance_into_swipes(perf2)
for swipe in swipes:
p = ax.plot(list(swipe.index), list(swipe.x), list(swipe.y), 'o-')
plt.setp(p, color=gen_colour, linewidth=5.0)
ax.set_ylim(0,1.0)
ax.set_zlim(1.0,0)
ax.set_xlabel('time (s)')
ax.set_ylabel('x')
ax.set_zlabel('y')
if saving:
plt.savefig(name+".png", bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_and_perform_sequentially(perf1, perf2, perform=True):
total = np.append(perf1, perf2, axis=0)
total = total.T
perf1 = perf1.T
perf2 = perf2.T
perf1_df = pd.DataFrame({'x':perf1[0], 'y':perf1[1], 't':perf1[2]})
perf2_df = pd.DataFrame({'x':perf2[0], 'y':perf2[1], 't':perf2[2]})
total_df = pd.DataFrame({'x':total[0], 'y':total[1], 't':total[2]})
perf1_df['time'] = perf1_df.t.cumsum()
total_perf1_time = perf1_df.t.sum()
perf2_df['time'] = perf2_df.t.cumsum() + total_perf1_time
total_df['time'] = total_df.t.cumsum()
## Plot the performances
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(perf1_df.time, perf1_df.x, perf1_df.y, '.b-')
ax.plot(perf2_df.time, perf2_df.x, perf2_df.y, '.r-')
plt.show()
if perform:
osc_client.playPerformance(total_df)
def divide_performance_into_swipes(perf_df):
"""Divides a performance into a sequence of swipe dataframes."""
touch_starts = perf_df[perf_df.moving == 0].index
performance_swipes = []
remainder = perf_df
for att in touch_starts:
swipe = remainder.iloc[remainder.index < att]
performance_swipes.append(swipe)
remainder = remainder.iloc[remainder.index >= att]
performance_swipes.append(remainder)
return performance_swipes
```
## Generate and play a performance
Performances are generated using the `generate_random_tiny_performance` method which is set to produce performances up to 5 seconds. The LSTM state and first touch can optionally be kept from the last evaluation or re-initialised.
This block can be run multiple times to generate more performances.
```
# Generate and play one unconditioned performance
# Hyperparameters:
HIDDEN_UNITS = 512
LAYERS = 3
MIXES = 16
# Network
net = robojam.MixtureRNN(mode=robojam.NET_MODE_RUN, n_hidden_units=HIDDEN_UNITS, n_mixtures=MIXES, batch_size=1, sequence_length=1, n_layers=LAYERS)
osc_client.setSynth(instrument = "chirp")
model_file = "../models/mdrnn-2d-1d-3layers-512units-16mixtures"
TEMPERATURE = 1.00
# Generate
perf = robojam.generate_random_tiny_performance(net, np.array([0.5, 0.5, 0.1]), time_limit=5.0, temp=TEMPERATURE, model_file=model_file)
# Plot and perform.
perf_df = robojam.perf_array_to_df(perf)
plot_2D(perf_df, saving=False)
plot_3D(perf_df, saving=False)
osc_client.playPerformance(perf_df)
## Generate a number of unconditioned performances
NUMBER = 10
# Hyperparameters:
HIDDEN_UNITS = 512
LAYERS = 3
MIXES = 16
net = robojam.MixtureRNN(mode=robojam.NET_MODE_RUN, n_hidden_units=HIDDEN_UNITS, n_mixtures=MIXES, batch_size=1, sequence_length=1, n_layers=LAYERS)
# Setup synth for performance
osc_client.setSynth(instrument = "chirp")
model_file = "../models/mdrnn-2d-1d-3layers-512units-16mixtures"
TEMPERATURE = 1.00
for i in range(NUMBER):
name = "touchperf-uncond-" + str(i)
net.state = None # reset state if needed.
perf = robojam.generate_random_tiny_performance(net, np.array([0.5, 0.5, 0.1]), time_limit=5.0, temp=TEMPERATURE, model_file=model_file)
perf_df = robojam.perf_array_to_df(perf)
plot_2D(perf_df, name=name, saving=True)
```
# Condition and Generate
Conditions the MDRNN on a random touchscreen performance, then generates a 5 second response.
This requires example performances (`.csv` format) to be in `../performances`.
See `TinyPerformanceLoader` for more details.
```
# Load the sample touchscreen performances:
loader = robojam.TinyPerformanceLoader(verbose=False)
# Fails if example performances are not in ../performance
# Generate and play one conditioned performance
# Hyperparameters:
HIDDEN_UNITS = 512
LAYERS = 3
MIXES = 16
net = robojam.MixtureRNN(mode=robojam.NET_MODE_RUN, n_hidden_units=HIDDEN_UNITS, n_mixtures=MIXES, batch_size=1, sequence_length=1, n_layers=LAYERS)
# Setup synth for performance
osc_client.setSynth(instrument = "chirp")
model_file = "../models/mdrnn-2d-1d-3layers-512units-16mixtures"
TEMPERATURE = 1.00
in_df = loader.sample_without_replacement(n=1)[0]
in_array = robojam.perf_df_to_array(in_df)
output_perf = robojam.condition_and_generate(net, in_array, time_limit=5.0, temp=TEMPERATURE, model_file=model_file)
out_df = robojam.perf_array_to_df(output_perf)
# Plot and perform
plot_double_2d(in_df, out_df)
plot_double_3d(in_df, out_df)
# just perform the output...
osc_client.playPerformance(out_df)
# TODO: implement polyphonic playback. Somehow.
# Generate a number of conditioned performances.
NUMBER = 10
# Hyperparameters:
HIDDEN_UNITS = 512
LAYERS = 3
MIXES = 16
net = robojam.MixtureRNN(mode=robojam.NET_MODE_RUN, n_hidden_units=HIDDEN_UNITS, n_mixtures=MIXES, batch_size=1, sequence_length=1, n_layers=LAYERS)
# Setup synth for performance
osc_client.setSynth(instrument = "chirp")
model_file = "../models/mdrnn-2d-1d-3layers-512units-16mixtures"
TEMPERATURE = 1.00
# make the plots
input_perf_dfs = loader.sample_without_replacement(n=NUMBER)
for i, in_df in enumerate(input_perf_dfs):
title = "touchperf-cond-" + str(i)
in_array = robojam.perf_df_to_array(in_df)
in_time = in_array.T[2].sum()
print("In Time:", in_time)
output_perf = robojam.condition_and_generate(net, in_array, time_limit=5.0, temp=TEMPERATURE, model_file=model_file)
out_df = robojam.perf_array_to_df(output_perf)
print("Out Time:", output_perf.T[2].sum())
plot_double_2d(in_df, out_df, name=title, saving=True)
```
|
github_jupyter
|
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*
Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
- Runs on CPU (not recommended here) or GPU (if available)
# Model Zoo -- Convolutional Neural Network (VGG19 Architecture)
Implementation of the VGG-19 architecture on Cifar10.
Reference for VGG-19:
- Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
The following table (taken from Simonyan & Zisserman referenced above) summarizes the VGG19 architecture:

## Imports
```
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
```
## Settings and Dataset
```
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 20
batch_size = 128
# Architecture
num_features = 784
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
```
## Model
```
##########################
### MODEL
##########################
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
self.block_1 = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
# (1(32-1)- 32 + 3)/2 = 1
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_2 = nn.Sequential(
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_3 = nn.Sequential(
nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_4 = nn.Sequential(
nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_5 = nn.Sequential(
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
#n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#m.weight.data.normal_(0, np.sqrt(2. / n))
m.weight.detach().normal_(0, 0.05)
if m.bias is not None:
m.bias.detach().zero_()
elif isinstance(m, torch.nn.Linear):
m.weight.detach().normal_(0, 0.05)
m.bias.detach().detach().zero_()
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
x = self.block_5(x)
logits = self.classifier(x.view(-1, 512))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = VGG16(num_features=num_features,
num_classes=num_classes)
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
## Training
```
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
```
## Evaluation
```
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
%watermark -iv
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/modichirag/flowpm/blob/master/notebooks/flowpm_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%pylab inline
from flowpm import linear_field, lpt_init, nbody, cic_paint
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
import flowpm
stages = np.linspace(0.1, 1.0, 10, endpoint=True)
initial_conditions = flowpm.linear_field(128, # size of the cube
100, # Physical size of the cube
ipklin, # Initial powerspectrum
batch_size=1)
# Sample particles
state = flowpm.lpt_init(initial_conditions, a0=0.1)
# Evolve particles down to z=0
final_state = flowpm.nbody(state, stages, 128)
# Retrieve final density field
final_field = flowpm.cic_paint(tf.zeros_like(initial_conditions), final_state[0])
with tf.Session() as sess:
sim = sess.run(final_field)
imshow(sim[0].sum(axis=0))
def _binomial_kernel(num_channels, dtype=tf.float32):
"""Creates a 5x5x5 b-spline kernel.
Args:
num_channels: The number of channels of the image to filter.
dtype: The type of an element in the kernel.
Returns:
A tensor of shape `[5, 5, 5, num_channels, num_channels]`.
"""
kernel = np.array((1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype())
kernel = np.einsum('ij,k->ijk', np.outer(kernel, kernel), kernel)
kernel /= np.sum(kernel)
kernel = kernel[:, :, :, np.newaxis, np.newaxis]
return tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype)
def _downsample(cube, kernel):
"""Downsamples the image using a convolution with stride 2.
"""
return tf.nn.conv3d(
input=cube, filters=kernel, strides=[1, 2, 2, 2, 1], padding="SAME")
def _upsample(cube, kernel, output_shape=None):
"""Upsamples the image using a transposed convolution with stride 2.
"""
if output_shape is None:
output_shape = tf.shape(input=cube)
output_shape = (output_shape[0], output_shape[1] * 2, output_shape[2] * 2,
output_shape[3] * 2, output_shape[4])
return tf.nn.conv3d_transpose(
cube,
kernel * 2.0**3,
output_shape=output_shape,
strides=[1, 2, 2, 2, 1],
padding="SAME")
def _build_pyramid(cube, sampler, num_levels):
"""Creates the different levels of the pyramid.
"""
kernel = _binomial_kernel(1, dtype=cube.dtype)
levels = [cube]
for _ in range(num_levels):
cube = sampler(cube, kernel)
levels.append(cube)
return levels
def _split(cube, kernel):
"""Splits the image into high and low frequencies.
This is achieved by smoothing the input image and substracting the smoothed
version from the input.
"""
low = _downsample(cube, kernel)
high = cube - _upsample(low, kernel, tf.shape(input=cube))
return high, low
def downsample(cube, num_levels, name=None):
"""Generates the different levels of the pyramid (downsampling).
"""
with tf.name_scope(name, "pyramid_downsample", [cube]):
cube = tf.convert_to_tensor(value=cube)
return _build_pyramid(cube, _downsample, num_levels)
def merge(levels, name=None):
"""Merges the different levels of the pyramid back to an image.
"""
with tf.name_scope(name, "pyramid_merge", levels):
levels = [tf.convert_to_tensor(value=level) for level in levels]
cube = levels[-1]
kernel = _binomial_kernel(tf.shape(input=cube)[-1], dtype=cube.dtype)
for level in reversed(levels[:-1]):
cube = _upsample(cube, kernel, tf.shape(input=level)) + level
return cube
def split(cube, num_levels, name=None):
"""Generates the different levels of the pyramid.
"""
with tf.name_scope(name, "pyramid_split", [cube]):
cube = tf.convert_to_tensor(value=cube)
kernel = _binomial_kernel(tf.shape(input=cube)[-1], dtype=cube.dtype)
low = cube
levels = []
for _ in range(num_levels):
high, low = _split(low, kernel)
levels.append(high)
levels.append(low)
return levels
def upsample(cube, num_levels, name=None):
"""Generates the different levels of the pyramid (upsampling).
"""
with tf.name_scope(name, "pyramid_upsample", [cube]):
cube = tf.convert_to_tensor(value=cube)
return _build_pyramid(cube, _upsample, num_levels)
field = tf.expand_dims(final_field, -1)
# Split field into short range and large scale components
levels = split(field, 1)
levels
# Compute forces on both fields
def force(field):
shape = field.get_shape()
batch_size, nc = shape[1], shape[2].value
kfield = flowpm.utils.r2c3d(field)
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
lap = tf.cast(flowpm.kernels.laplace_kernel(kvec), tf.complex64)
fknlrange = flowpm.kernels.longrange_kernel(kvec, 0)
kweight = lap * fknlrange
pot_k = tf.multiply(kfield, kweight)
f = []
for d in range(3):
force_dc = tf.multiply(pot_k, flowpm.kernels.gradient_kernel(kvec, d))
forced = flowpm.utils.c2r3d(force_dc)
f.append(forced)
return tf.stack(f, axis=-1)
force_levels = [force(levels[0][...,0]), force(levels[1][...,0])*2]
force_levels
rec = merge(force_levels)
rec
# Direct force computation on input field
dforce = force(field[...,0])
with tf.Session() as sess:
sim, l0, l1, r, df = sess.run([final_field, force_levels[0], force_levels[1], rec, dforce])
figure(figsize=(15,5))
subplot(131)
imshow(sim[0].sum(axis=1))
title('Input')
subplot(132)
imshow(l0[0].sum(axis=1)[...,0])
title('short range forces')
subplot(133)
imshow(l1[0].sum(axis=1)[...,0]);
title('l2')
title('long range forces')
figure(figsize=(15,5))
subplot(131)
imshow(r[0].sum(axis=1)[...,0]);
title('Multi-Grid Force Computation')
subplot(132)
imshow(df[0].sum(axis=1)[...,0]);
title('Direct Force Computation')
subplot(133)
imshow((r - df)[0,8:-8,8:-8,8:-8].sum(axis=1)[...,0]);
title('Residuals');
levels = split(field, 4)
rec = merge(levels)
with tf.Session() as sess:
sim, l0, l1, l2, l3, r = sess.run([final_field, levels[0], levels[1], levels[2], levels[3], rec[...,0]])
figure(figsize=(25,10))
subplot(151)
imshow(sim[0].sum(axis=0))
title('Input')
subplot(152)
imshow(l0[0].sum(axis=0)[...,0])
title('l1')
subplot(153)
imshow(l1[0].sum(axis=0)[...,0]);
title('l2')
subplot(154)
imshow(l2[0].sum(axis=0)[...,0]);
title('l2')
subplot(155)
imshow(l3[0].sum(axis=0)[...,0]);
title('approximation')
figure(figsize=(25,10))
subplot(131)
imshow(sim[0].sum(axis=0))
title('Input')
subplot(132)
imshow(r[0].sum(axis=0))
title('Reconstruction')
subplot(133)
imshow((sim - r)[0].sum(axis=0));
title('Difference')
```
|
github_jupyter
|
# 1. 다변수 가우시안 정규분포MVN
$$\mathcal{N}(x ; \mu, \Sigma) = \dfrac{1}{(2\pi)^{D/2} |\Sigma|^{1/2}} \exp \left( -\dfrac{1}{2} (x-\mu)^T \Sigma^{-1} (x-\mu) \right)$$
- $\Sigma$ : 공분산 행렬, positive semidefinite
- x : 확률변수 벡터 $$x = \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_M \end{bmatrix}
$$
eg.
$\mu = \begin{bmatrix}2 \\ 3 \end{bmatrix}$,
$\Sigma = \begin{bmatrix}1 & 0 \\ 0 & 1 \end{bmatrix}$
```
%matplotlib inline
mu = [2, 3]
cov = [1, 0], [0, 1]
rv = sp.stats.multivariate_normal(mu, cov)
xx = np.linspace(-1, 6, 120)
yy = np.linspace(-1, 6, 150)
XX, YY = np.meshgrid(xx, yy)
plt.contour(XX, YY, rv.pdf(np.dstack([XX, YY])))
plt.axis("equal")
plt.xlim(0, 4)
plt.ylim(0.5, 5.2)
```
eg.
$\mu = \begin{bmatrix}2 \\ 3 \end{bmatrix}$,
$\Sigma = \begin{bmatrix}2 & 3 \\ 3 & 7 \end{bmatrix}$
```
mu = [2, 3]
cov = [2, 3], [3, 7]
rv = sp.stats.multivariate_normal(mu, cov)
xx = np.linspace(-1, 6, 120)
yy = np.linspace(-1, 6, 150)
XX, YY = np.meshgrid(xx, yy)
plt.contour(XX, YY, rv.pdf(np.dstack([XX, YY])))
plt.axis("equal")
plt.show()
```
# 2. 가우시안 정규 분포와 고유값 분해
- 공분산 행렬 $\Sigma$은 대칭행렬이므로, 대각화 가능
$$ \Sigma^{-1} = V \Lambda^{-1}V^T$$
- 따라서
$$
\begin{eqnarray}
\mathcal{N}(x)
&\propto& \exp \left( -\dfrac{1}{2} (x-\mu)^T \Sigma^{-1} (x- \mu) \right) \\
&=& \exp \left( -\dfrac{1}{2}(x-\mu)^T V \Lambda^{-1} V^T (x- \mu) \right) \\
&=& \exp \left( -\dfrac{1}{2} x'^T \Lambda^{-1} x' \right) \\
\end{eqnarray}
$$
- V : $\Sigma$의 eigen vector
- 새로운 확률변수$x' = V^{-1}(x-\mu)$
- Cov[x']: $\Sigma$의 matrix of eigenvalues $\Lambda$
- x' 의미
- $\mu$만큼 평행이동 후 eigen vectors를 basis vector로 하는 변환
- 변수간 상관관관계가 소거
- 활용: PCA, 상관관계 높은 변수를 $x_1',x_2'$로 변환
```
mu = [2, 3]
cov = [[4, 3], [3, 5]]
w, v = np.linalg.eig(cov)
print('eigen value: w', w, 'eigen vector: v', v, sep = '\n')
w_cov = [[1.45861873, 0], [0, 7.54138127]]
xx = np.linspace(-1, 5, 120)
yy = np.linspace(0, 6, 150)
XX, YY = np. meshgrid(xx, yy)
plt. figure(figsize=(8, 4))
d = dict(facecolor='k', edgecolor='k')
plt.subplot(121)
rv1 = sp.stats.multivariate_normal(mu, cov)
plt.contour(XX, YY, rv1.pdf(np.dstack([XX,YY])))
plt.annotate("", xy=(mu + 0.35 * w[0] * v[:, 0]), xytext=mu, arrowprops=d)
plt.annotate("", xy=(mu + 0.35 * w[1] * v[:, 1]), xytext=mu, arrowprops=d)
plt.title("$X_1$,$X_2$ Joint pdf")
plt.axis('equal')
#Cov(x)의 eigen vector(v)에 대한 좌표변환
#Cov(x') = Cov(x)의 matrix of eigen values(w_cov)
plt.subplot(122)
rv2 = sp.stats.multivariate_normal(mu, w_cov) #Cov(x)의 좌표변환
plt.contour(XX, YY, rv2.pdf(np.dstack([XX,YY])))
plt.annotate("", xy=(mu + 0.35 * w[0] * np.array([1, 0])), xytext=mu, arrowprops=d)
plt.annotate("", xy=(mu + 0.35 * w[1] * np.array([0, 1])), xytext=mu, arrowprops=d)
plt.title("$X'_1$,$X'_2$ Joint pdf")
plt.axis('equal')
plt.show()
```
# 3. 다변수 가우시안 정규분포의 조건부 확률분포
- M차원에서 N차원이 관측되더라도, 남은 M-N개 확률변수들의 조건부 분포는, 가우시안 정규분포를 띤다.
# 4. 다변수 가우시안 정규분포의 주변 확률분포
- 마찬가지로 가우시안 정규분포를 띤다.
$$\int p(x_1, x_2) dx_2 = \mathcal{N}(x_1; \mu''1, \sigma''^2_1)$$
|
github_jupyter
|
# First Graph Convolutional Neural Network
This notebook shows a simple GCN learning using the KrasHras dataset from [Zamora-Resendiz and Crivelli, 2019](https://www.biorxiv.org/content/10.1101/610444v1.full).
```
import gcn_prot
import torch
import torch.nn.functional as F
from os.path import join, pardir
from random import seed
ROOT_DIR = pardir
seed = 8
```
## Table of contents
1. [Initialize Data](#Initialize-Data)
## Initialize Data
The data for this experiment is the one used for testing on the [CI of the repository](https://github.com/carrascomj/gcn-prot/blob/master/.travis.yml). Thus, it is already fetched.
The first step is to calculate the length of the largest protein (in number of aminoacids), since all the proteins will be zero padded to that value. That way, all the inputs fed to the model will have the same length.
```
largest = gcn_prot.data.get_longest(join(ROOT_DIR, "new_data", "graph"))
print(f"Largets protein has {largest} aminoacids")
```
However, for this particular dataset, it is known from the aforementioned publication that 185 is enough because the 4 terminal aminoacids were not well determined and would be later discarded by the mask.
```
largest = 185
data_path = join(ROOT_DIR, "new_data")
```
The split is performed with 70/10/20 for train/test/valid.
Note that the generated datasets (custom child classes of `torch.utils.data.Dataset`) doesn't stored the graphs in memory but their paths, generating the graph when accessed by an index.
```
train, test, valid = gcn_prot.data.get_datasets(
data_path=data_path,
nb_nodes=largest,
task_type="classification",
nb_classes=2,
split=[0.7, 0.2, 0.1],
seed=42,
)
print(f"Train: {len(train)}\nTest: {len(test)}\nValidation: {len(valid)}")
type(train)
```
## Define the neural network
Each instance in the dataset retrieves a list of four matrices:
1. **feature matrix**: 29 x 185. This corresponds to the aminoacid type (one-hot encoded vector of length 23), residue depth, residue orientation and 4 features encoding the positional index with a sinusoidal transformation.
2. **coordinates**: 3 x 185. x,y,z coordinates of every aminoacid in the crystal (centered).
3. **mask**: to be applied to the adjacency to discard ill-identified aminoacids.
4. **y**: 2 label, Kras/Hras.
The transformation of this list to the input of the neural network (feature matrix, adjacency matrix), is performed during training.
```
model = gcn_prot.models.GCN_simple(
feats=29, # features in feature matrix
hidden=[8, 8], # number of neurons in convolutional layers (3 in this case)
label=2, # features on y
nb_nodes=largest, # for last layer
dropout=0, # applied in the convolutional layers
bias=False, # default
act=F.relu, # default
cuda=True # required for sparsize and fit_network
).cuda()
```
Now, instantiate the criterion and the optimizer.
```
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss().cuda()
```
## Train the network
```
%matplotlib inline
save_path = join(ROOT_DIR, "models", "GCN_tiny_weigths.pt")
model_na = gcn_prot.models.fit_network(
model, train, test, optimizer, criterion,
batch_size=20, # a lot of batches per epoch
epochs=20,
debug=True, # will print progress of epochs
plot_every=5, # loss plot/epoch
save=save_path # best weights (test set) will be saved here
)
```
Debug with validation.
```
model.eval()
for batch in torch.utils.data.DataLoader(
valid, shuffle=True, batch_size=2, drop_last=False
):
print(gcn_prot.models.train.forward_step(batch, model, False))
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/DingLi23/s2search/blob/pipelining/pipelining/exp-cscv/exp-cscv_cscv_1w_ale_plotting.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Experiment Description
> This notebook is for experiment \<exp-cscv\> and data sample \<cscv\>.
### Initialization
```
%load_ext autoreload
%autoreload 2
import numpy as np, sys, os
in_colab = 'google.colab' in sys.modules
# fetching code and data(if you are using colab
if in_colab:
!rm -rf s2search
!git clone --branch pipelining https://github.com/youyinnn/s2search.git
sys.path.insert(1, './s2search')
%cd s2search/pipelining/exp-cscv/
pic_dir = os.path.join('.', 'plot')
if not os.path.exists(pic_dir):
os.mkdir(pic_dir)
```
### Loading data
```
sys.path.insert(1, '../../')
import numpy as np, sys, os, pandas as pd
from getting_data import read_conf
from s2search_score_pdp import pdp_based_importance
sample_name = 'cscv'
f_list = [
'title', 'abstract', 'venue', 'authors',
'year',
'n_citations'
]
ale_xy = {}
ale_metric = pd.DataFrame(columns=['feature_name', 'ale_range', 'ale_importance', 'absolute mean'])
for f in f_list:
file = os.path.join('.', 'scores', f'{sample_name}_1w_ale_{f}.npz')
if os.path.exists(file):
nparr = np.load(file)
quantile = nparr['quantile']
ale_result = nparr['ale_result']
values_for_rug = nparr.get('values_for_rug')
ale_xy[f] = {
'x': quantile,
'y': ale_result,
'rug': values_for_rug,
'weird': ale_result[len(ale_result) - 1] > 20
}
if f != 'year' and f != 'n_citations':
ale_xy[f]['x'] = list(range(len(quantile)))
ale_xy[f]['numerical'] = False
else:
ale_xy[f]['xticks'] = quantile
ale_xy[f]['numerical'] = True
ale_metric.loc[len(ale_metric.index)] = [f, np.max(ale_result) - np.min(ale_result), pdp_based_importance(ale_result, f), np.mean(np.abs(ale_result))]
# print(len(ale_result))
print(ale_metric.sort_values(by=['ale_importance'], ascending=False))
print()
```
### ALE Plots
```
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import MaxNLocator
categorical_plot_conf = [
{
'xlabel': 'Title',
'ylabel': 'ALE',
'ale_xy': ale_xy['title']
},
{
'xlabel': 'Abstract',
'ale_xy': ale_xy['abstract']
},
{
'xlabel': 'Authors',
'ale_xy': ale_xy['authors'],
# 'zoom': {
# 'inset_axes': [0.3, 0.3, 0.47, 0.47],
# 'x_limit': [89, 93],
# 'y_limit': [-1, 14],
# }
},
{
'xlabel': 'Venue',
'ale_xy': ale_xy['venue'],
# 'zoom': {
# 'inset_axes': [0.3, 0.3, 0.47, 0.47],
# 'x_limit': [89, 93],
# 'y_limit': [-1, 13],
# }
},
]
numerical_plot_conf = [
{
'xlabel': 'Year',
'ylabel': 'ALE',
'ale_xy': ale_xy['year'],
# 'zoom': {
# 'inset_axes': [0.15, 0.4, 0.4, 0.4],
# 'x_limit': [2019, 2023],
# 'y_limit': [1.9, 2.1],
# },
},
{
'xlabel': 'Citations',
'ale_xy': ale_xy['n_citations'],
# 'zoom': {
# 'inset_axes': [0.4, 0.65, 0.47, 0.3],
# 'x_limit': [-1000.0, 12000],
# 'y_limit': [-0.1, 1.2],
# },
},
]
def pdp_plot(confs, title):
fig, axes_list = plt.subplots(nrows=1, ncols=len(confs), figsize=(20, 5), dpi=100)
subplot_idx = 0
plt.suptitle(title, fontsize=20, fontweight='bold')
# plt.autoscale(False)
for conf in confs:
axes = axes if len(confs) == 1 else axes_list[subplot_idx]
sns.rugplot(conf['ale_xy']['rug'], ax=axes, height=0.02)
axes.axhline(y=0, color='k', linestyle='-', lw=0.8)
axes.plot(conf['ale_xy']['x'], conf['ale_xy']['y'])
axes.grid(alpha = 0.4)
# axes.set_ylim([-2, 20])
axes.xaxis.set_major_locator(MaxNLocator(integer=True))
axes.yaxis.set_major_locator(MaxNLocator(integer=True))
if ('ylabel' in conf):
axes.set_ylabel(conf.get('ylabel'), fontsize=20, labelpad=10)
# if ('xticks' not in conf['ale_xy'].keys()):
# xAxis.set_ticklabels([])
axes.set_xlabel(conf['xlabel'], fontsize=16, labelpad=10)
if not (conf['ale_xy']['weird']):
if (conf['ale_xy']['numerical']):
axes.set_ylim([-1.5, 1.5])
pass
else:
axes.set_ylim([-7, 19])
pass
if 'zoom' in conf:
axins = axes.inset_axes(conf['zoom']['inset_axes'])
axins.xaxis.set_major_locator(MaxNLocator(integer=True))
axins.yaxis.set_major_locator(MaxNLocator(integer=True))
axins.plot(conf['ale_xy']['x'], conf['ale_xy']['y'])
axins.set_xlim(conf['zoom']['x_limit'])
axins.set_ylim(conf['zoom']['y_limit'])
axins.grid(alpha=0.3)
rectpatch, connects = axes.indicate_inset_zoom(axins)
connects[0].set_visible(False)
connects[1].set_visible(False)
connects[2].set_visible(True)
connects[3].set_visible(True)
subplot_idx += 1
pdp_plot(categorical_plot_conf, f"ALE for {len(categorical_plot_conf)} categorical features")
# plt.savefig(os.path.join('.', 'plot', f'{sample_name}-1wale-categorical.png'), facecolor='white', transparent=False, bbox_inches='tight')
pdp_plot(numerical_plot_conf, f"ALE for {len(numerical_plot_conf)} numerical features")
# plt.savefig(os.path.join('.', 'plot', f'{sample_name}-1wale-numerical.png'), facecolor='white', transparent=False, bbox_inches='tight')
```
|
github_jupyter
|
# SAT Analysis
**We wish to answer the question whether SAT is a fairt test?**
## Read in the data
```
import pandas as pd
import numpy as np
import re
data_files = [
"ap_2010.csv",
"class_size.csv",
"demographics.csv",
"graduation.csv",
"hs_directory.csv",
"sat_results.csv"
]
data = {}
for file in data_files:
df = pd.read_csv("schools/{0}".format(file))
data[file.replace(".csv", "")] = df
```
# Read in the surveys
```
all_survey = pd.read_csv("schools/survey_all.txt", delimiter="\t", encoding='windows-1252')
d75_survey = pd.read_csv("schools/survey_d75.txt", delimiter="\t", encoding='windows-1252')
survey = pd.concat([all_survey, d75_survey], axis=0)
survey["DBN"] = survey["dbn"]
survey_fields = [
"DBN",
"rr_s",
"rr_t",
"rr_p",
"N_s",
"N_t",
"N_p",
"saf_p_11",
"com_p_11",
"eng_p_11",
"aca_p_11",
"saf_t_11",
"com_t_11",
"eng_t_11",
"aca_t_11",
"saf_s_11",
"com_s_11",
"eng_s_11",
"aca_s_11",
"saf_tot_11",
"com_tot_11",
"eng_tot_11",
"aca_tot_11",
]
survey = survey[survey_fields]
data["survey"] = survey
```
# Add DBN columns
```
data["hs_directory"]["DBN"] = data["hs_directory"]["dbn"]
def pad_csd(num):
str_rep = str(num)
if len(str_rep) > 1:
return str_rep
else:
return "0" + str_rep
data["class_size"]["padded_csd"] = data["class_size"]["CSD"].apply(pad_csd)
data["class_size"]["DBN"] = data["class_size"]["padded_csd"] + data["class_size"]["SCHOOL CODE"]
```
# Convert columns to numeric
```
cols = ['SAT Math Avg. Score', 'SAT Critical Reading Avg. Score', 'SAT Writing Avg. Score']
for c in cols:
data["sat_results"][c] = pd.to_numeric(data["sat_results"][c], errors="coerce")
data['sat_results']['sat_score'] = data['sat_results'][cols[0]] + data['sat_results'][cols[1]] + data['sat_results'][cols[2]]
def find_lat(loc):
coords = re.findall("\(.+, .+\)", loc)
lat = coords[0].split(",")[0].replace("(", "")
return lat
def find_lon(loc):
coords = re.findall("\(.+, .+\)", loc)
lon = coords[0].split(",")[1].replace(")", "").strip()
return lon
data["hs_directory"]["lat"] = data["hs_directory"]["Location 1"].apply(find_lat)
data["hs_directory"]["lon"] = data["hs_directory"]["Location 1"].apply(find_lon)
data["hs_directory"]["lat"] = pd.to_numeric(data["hs_directory"]["lat"], errors="coerce")
data["hs_directory"]["lon"] = pd.to_numeric(data["hs_directory"]["lon"], errors="coerce")
```
# Condense datasets
Condensing the datasets to remove any two rows having same **DBN** so that all the datasets can be easily joined on **"DBN"**
```
class_size = data["class_size"]
class_size = class_size[class_size["GRADE "] == "09-12"]
class_size = class_size[class_size["PROGRAM TYPE"] == "GEN ED"]
class_size = class_size.groupby("DBN").agg(np.mean)
class_size.reset_index(inplace=True)
data["class_size"] = class_size
data["demographics"] = data["demographics"][data["demographics"]["schoolyear"] == 20112012]
data["graduation"] = data["graduation"][data["graduation"]["Cohort"] == "2006"]
data["graduation"] = data["graduation"][data["graduation"]["Demographic"] == "Total Cohort"]
```
# Convert AP scores to numeric
```
cols = ['AP Test Takers ', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']
for col in cols:
data["ap_2010"][col] = pd.to_numeric(data["ap_2010"][col], errors="coerce")
```
# Combine the datasets
Merging the dataset on **DBN** column
```
combined = data["sat_results"]
combined = combined.merge(data["ap_2010"], on="DBN", how="left")
combined = combined.merge(data["graduation"], on="DBN", how="left")
to_merge = ["class_size", "demographics", "survey", "hs_directory"]
for m in to_merge:
combined = combined.merge(data[m], on="DBN", how="inner")
combined = combined.fillna(combined.mean())
combined = combined.fillna(0)
```
# Add a school district column for mapping
```
def get_first_two_chars(dbn):
return dbn[0:2]
combined["school_dist"] = combined["DBN"].apply(get_first_two_chars)
```
# Find correlations
```
correlations = combined.corr()
correlations = correlations["sat_score"]
correlations
```
# Plotting survey correlations
```
# Remove DBN since it's a unique identifier, not a useful numerical value for correlation.
survey_fields.remove("DBN")
import matplotlib.pyplot as plt
import seaborn as sns
% matplotlib inline
fig, ax = plt.subplots(figsize = (8,5))
correlations[survey_fields].plot.bar()
plt.show()
```
#### Findings from above plot
There are high correlations between N_s, N_t, N_p and sat_score. Since these columns are correlated with total_enrollment, it makes sense that they would be high.
It is more interesting that rr_s, the student response rate, or the percentage of students that completed the survey, correlates with sat_score. This might make sense because students who are more likely to fill out surveys may be more likely to also be doing well academically.
How students and teachers percieved safety (saf_t_11 and saf_s_11) correlate with sat_score. This make sense, as it's hard to teach or learn in an unsafe environment.
The last interesting correlation is the aca_s_11, which indicates how the student perceives academic standards, correlates with sat_score, but this is not true for aca_t_11, how teachers perceive academic standards, or aca_p_11, how parents perceive academic standards.
## Investigating safety scores
```
combined.plot.scatter(x = "saf_s_11", y = "sat_score" )
plt.show()
```
There appears to be a correlation between SAT scores and safety, although it isn't thatstrong. It looks like there are a few schools with extremely high SAT scores and high safety scores. There are a few schools with low safety scores and low SAT scores. No school with a safety score lower than 6.5 has an average SAT score higher than 1500 or so.
## Plotting safety scores for districts in NYC
```
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
districts = combined.groupby("school_dist").agg(np.mean)
districts.reset_index(inplace=True)
m = Basemap(
projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i'
)
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
m.fillcontinents(color='#FFC58C',lake_color='#85A6D9')
longitudes = districts["lon"].tolist()
latitudes = districts["lat"].tolist()
m.scatter(longitudes, latitudes, s=50, zorder=2, latlon=True, c=districts["saf_s_11"], cmap="summer")
plt.show()
```
## Investigating racial differences
```
race_cols = ["white_per", "asian_per", "black_per", "hispanic_per"]
correlations[race_cols].plot.bar()
```
It shows higher percentage of white or asian students at a school correlates positively with sat score, whereas a higher percentage of black or hispanic students correlates negatively with sat score. This may be due to a lack of funding for schools in certain areas, which are more likely to have a higher percentage of black or hispanic students.
### Hispanic people vs SAT score
```
combined.plot.scatter(x = "hispanic_per", y = "sat_score")
plt.show()
bool_hispanic_95 = combined["hispanic_per"] > 95
combined[bool_hispanic_95]["SCHOOL NAME"]
```
The schools listed above appear to primarily be geared towards recent immigrants to the US. These schools have a lot of students who are learning English, which would explain the lower SAT scores.
```
bool_hispanic_10 = (combined["hispanic_per"] < 10) & (combined["sat_score"] > 1800)
combined[bool_hispanic_10]["SCHOOL NAME"]
```
Many of the schools above appear to be specialized science and technology schools that receive extra funding, and only admit students who pass an entrance exam. This doesn't explain the low hispanic_per, but it does explain why their students tend to do better on the SAT -- they are students from all over New York City who did well on a standardized test.
## Investigating gender differences
```
gender_cols = ["male_per", "female_per"]
correlations[gender_cols].plot.bar()
plt.show()
```
In the plot above, we can see that a high percentage of females at a school positively correlates with SAT score, whereas a high percentage of males at a school negatively correlates with SAT score. Neither correlation is extremely strong.
```
combined.plot.scatter(x = "female_per", y = "sat_score")
```
Based on the scatterplot, there doesn't seem to be any real correlation between sat_score and female_per. However, there is a cluster of schools with a high percentage of females (60 to 80), and high SAT scores.
```
bool_female = (combined["female_per"] > 60) & (combined["sat_score"] > 1700)
combined[bool_female]["SCHOOL NAME"]
```
These schools appears to be very selective liberal arts schools that have high academic standards.
## AP_test takers vs SAT
In the U.S., high school students take Advanced Placement (AP) exams to earn college credit. There are AP exams for many different subjects.
```
combined["ap_per"] = combined["AP Test Takers "]/ combined["total_enrollment"]
combined.plot.scatter(x = "ap_per", y = "sat_score")
```
It looks like there is a relationship between the percentage of students in a school who take the AP exam, and their average SAT scores. It's not an extremely strong correlation, though.
## potential next steps:
* Determing whether there's a correlation between class size and SAT scores
* Figuring out which neighborhoods have the best schools
* If we combine this information with a dataset containing property values, we could find the least expensive neighborhoods that have good schools.
* Investigating the differences between parent, teacher, and student responses to surveys.
* Assigning scores to schools based on sat_score and other attributes.
|
github_jupyter
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Peeker-Groups" data-toc-modified-id="Peeker-Groups-1"><span class="toc-item-num">1 </span>Peeker Groups</a></span></li></ul></div>
# Peeker Groups
`Peeker` objects are normally stored in a global list, but sometimes you might want
to create a group of `Peeker`s for a set of signals.
This is easily done using the `PeekerGroup` class.
Once again, I'll use the hierarchical adder example to illustrate the use of `PeekerGroup`s.
```
from myhdl import *
from myhdlpeek import Peeker, PeekerGroup
def adder_bit(a, b, c_in, sum_, c_out):
'''Single bit adder.'''
@always_comb
def adder_logic():
sum_.next = a ^ b ^ c_in
c_out.next = (a & b) | (a & c_in) | (b & c_in)
# Add some global peekers to monitor the inputs and outputs.
Peeker(a, 'a')
Peeker(b, 'b')
Peeker(c_in, 'c_in')
Peeker(sum_, 'sum')
Peeker(c_out, 'c_out')
return adder_logic
def adder(a, b, sum_):
'''Connect single-bit adders to create a complete adder.'''
c = [Signal(bool(0)) for _ in range(len(a)+1)] # Carry signals between stages.
s = [Signal(bool(0)) for _ in range(len(a))] # Sum bit for each stage.
stages = [] # Storage for adder bit instances.
# Create the adder bits and connect them together.
for i in range(len(a)):
stages.append( adder_bit(a=a(i), b=b(i), sum_=s[i], c_in=c[i], c_out=c[i+1]) )
# Concatenate the sum bits and send them out on the sum_ output.
@always_comb
def make_sum():
sum_.next = ConcatSignal(*reversed(s))
return instances() # Return all the adder stage instances.
# Create signals for interfacing to the adder.
a, b, sum_ = [Signal(intbv(0,0,8)) for _ in range(3)]
# Clear-out any existing peeker stuff before instantiating the adder.
Peeker.clear()
# Instantiate the adder.
add_1 = adder(a=a, b=b, sum_=sum_)
# Create a group of peekers to monitor the top-level buses.
# Each argument to PeekerGroup assigns a signal to a name for a peeker.
top_pkr = PeekerGroup(a_bus=a, b_bus=b, sum_bus=sum_)
# Create a testbench generator that applies random inputs to the adder.
from random import randrange
def test():
for _ in range(8):
a.next, b.next = randrange(0, a.max), randrange(0, a.max)
yield delay(1)
# Simulate the adder, testbench and peekers.
Simulation(add_1, test(), *Peeker.instances()).run()
# Display only the peekers for the top-level buses.
# The global peekers in the adder bits won't show up.
top_pkr.show_waveforms('a_bus b_bus sum_bus')
top_pkr.to_html_table('a_bus b_bus sum_bus')
```
|
github_jupyter
|
# Multivariate Analysis for Planetary Atmospheres
This notebooks relies on the pickle dataframe in the `notebooks/` folder. You can also compute your own using `3_ColorColorFigs.ipynb`
```
#COLOR COLOR PACKAGE
from colorcolor import compute_colors as c
from colorcolor import stats
import matplotlib.pyplot as plt
import pandas as pd
import pickle as pk
import numpy as np
from itertools import combinations as comb
import seaborn as sns
%matplotlib inline
```
This dataframe contains:
- **independent variables** : filter observations
- **dependent variables** : physical planet parameters
```
data= pk.load(open('wfirst_colors_dataframe.pk','rb'))
data=data.dropna()[~data.dropna().isin([np.inf, -np.inf])].dropna() #drop infinities and nans
#let's specicy our y of interest for this tutorial, feel free to play around with this
yofinterest = 'metallicity'
#lets also specify a filter set. Let's just focus on WFIRST filters
filters = c.print_filters('wfirst')
#lets also specify a filter set. Let's just focus on WFIRST filters
filters = c.print_filters('wfirst')
#and also define the combinations: e.g. Filter1 - Filter2
filter_combinations = [i[0]+i[1] for i in comb(filters,2)] +filters
```
### Explore Correlation Matrix: Fig 6 Batalha+2018
In figure 6 we looked at the difference between the correlation matrix with and without the cloud sample
```
#lets look at only the cloud free sample
corr_matrix = data.loc[(data['cloud']==0)].corr()
fig, ax = plt.subplots(figsize=(25,10))
#here I am simplifying the image by adding in an absolute value
#you can remove it if you are interested in seeing what is positive and nagatively correlated
sns.heatmap(abs(corr_matrix), vmax=1, square=False, linewidths=.5, ax=ax).xaxis.tick_top()
```
Figure 6 in Batalha 2018 is a subset of this larger block
```
#lets look at everything
corr_matrix = data.corr()
fig, ax = plt.subplots(figsize=(25,10))
#here I am simplifying the image by adding in an absolute value
#you can remove it if you are interested in seeing what is positive and nagatively correlated
sns.heatmap(abs(corr_matrix), vmax=1, square=False, linewidths=.5, ax=ax).xaxis.tick_top()
```
** See immediately how there are less strongly correlated values for physical parameters versus filters??**
## Try Linear Discriminant Analysis For Classification
```
#try cloud free first
subset = data.loc[(data['cloud']==0) & (data['phase']==90)]
#separate independent
X = subset.loc[:,filter_combinations]
#and dependent variables (also this make it a string so we can turn it into a label)
y = subset[yofinterest].astype(str)
lda_values=stats.lda_analysis(X,y)
```
These warnings are coming up because we have used both absolute and relative filters. Because LDA, like regression techniques involves computing a matrix inversion, which is inaccurate if the determinant is close to 0 (i.e. two or more variables are almost a linear combination of each other). This means that our relative filter and absolute combinations are nearly a linear combination of each other (which makes sense). For classification purposes this is okay for now.
```
#Now lets unconstrain the phase
subset = data.loc[(data['cloud']==0)]
#separate independent
X = subset.loc[:,filter_combinations]
#and dependent variables (also this make it a string so we can turn it into a label)
y = subset[yofinterest].astype(str)
lda_values=stats.lda_analysis(X,y)
#Now lets unconstrain everything
subset = data
#separate independent
X = subset.loc[:,filter_combinations]
#and dependent variables (also this make it a string so we can turn it into a label)
y = subset[yofinterest].astype(str)
lda_values=stats.lda_analysis(X,y)
```
|
github_jupyter
|
```
from sklearn.cluster import MeanShift, estimate_bandwidth
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import math
import os
import sys
from numpy.fft import fft, ifft
import glob
def remove_periodic(X, df_index, detrending=True, model='additive', frequency_threshold=0.1e12):
rad = np.array(X)
if detrending:
det_rad = rad - np.average(rad)
else:
det_rad = rad
det_rad_fft = fft(det_rad)
# Get the power spectrum
rad_ps = [np.abs(rd)**2 for rd in det_rad_fft]
clean_rad_fft = [det_rad_fft[i] if rad_ps[i] > frequency_threshold else 0
for i in range(len(det_rad_fft))]
rad_series_clean = ifft(clean_rad_fft)
rad_series_clean = [value.real for value in rad_series_clean]
if detrending:
rad_trends = rad_series_clean + np.average(rad)
else:
rad_trends = rad_series_clean
rad_clean_ts = pd.Series(rad_trends, index=df_index)
#rad_clean_ts[(rad_clean_ts.index.hour < 6) | (rad_clean_ts.index.hour > 20)] = 0
residual = rad - rad_clean_ts.values
clean = rad_clean_ts.values
return residual, clean
def load_data(path, resampling=None):
## some resampling options: 'H' - hourly, '15min' - 15 minutes, 'M' - montlhy
## more options at:
## http://benalexkeen.com/resampling-time-series-data-with-pandas/
allFiles = glob.iglob(path + "/**/*.txt", recursive=True)
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
#print("Reading: ",file_)
df = pd.read_csv(file_,index_col="datetime",parse_dates=['datetime'], header=0, sep=",")
if frame.columns is None :
frame.columns = df.columns
list_.append(df)
frame = pd.concat(list_)
if resampling is not None:
frame = frame.resample(resampling).mean()
frame = frame.fillna(method='ffill')
return frame
path = '/Users/cseveriano/spatio-temporal-forecasting/data/processed/NREL/Oahu'
df = load_data(path)
# Corrigir ordem das colunas
df.columns = ['DHHL_3','DHHL_4', 'DHHL_5', 'DHHL_10', 'DHHL_11', 'DHHL_9', 'DHHL_2', 'DHHL_1', 'DHHL_1_Tilt', 'AP_6', 'AP_6_Tilt', 'AP_1', 'AP_3', 'AP_5', 'AP_4', 'AP_7', 'DHHL_6', 'DHHL_7', 'DHHL_8']
#inicio dos dados possui falhas na medicao
df = df.loc[df.index > '2010-03-20']
df.drop(['DHHL_1_Tilt', 'AP_6_Tilt'], axis=1, inplace=True)
```
## Preparação bases de treinamento e testes
```
clean_df = pd.DataFrame(columns=df.columns, index=df.index)
residual_df = pd.DataFrame(columns=df.columns, index=df.index)
for col in df.columns:
residual, clean = remove_periodic(df[col].tolist(), df.index, frequency_threshold=0.01e12)
clean_df[col] = clean.tolist()
residual_df[col] = residual.tolist()
train_df = df[(df.index >= '2010-09-01') & (df.index <= '2011-09-01')]
train_clean_df = clean_df[(clean_df.index >= '2010-09-01') & (clean_df.index <= '2011-09-01')]
train_residual_df = residual_df[(residual_df.index >= '2010-09-01') & (residual_df.index <= '2011-09-01')]
test_df = df[(df.index >= '2010-08-05')& (df.index < '2010-08-06')]
test_clean_df = clean_df[(clean_df.index >= '2010-08-05')& (clean_df.index < '2010-08-06')]
test_residual_df = residual_df[(residual_df.index >= '2010-08-05')& (residual_df.index < '2010-08-06')]
lat = [21.31236,21.31303,21.31357,21.31183,21.31042,21.31268,21.31451,21.31533,21.30812,21.31276,21.31281,21.30983,21.31141,21.31478,21.31179,21.31418,21.31034]
lon = [-158.08463,-158.08505,-158.08424,-158.08554,-158.0853,-158.08688,-158.08534,-158.087,-158.07935,-158.08389,-158.08163,-158.08249,-158.07947,-158.07785,-158.08678,-158.08685,-158.08675]
additional_info = pd.DataFrame({'station': df.columns, 'latitude': lat, 'longitude': lon })
additional_info[(additional_info.station == col)].latitude.values[0]
#ll = []
#for ind, row in train_residual_df.iterrows():
# for col in train_residual_df.columns:
# lat = additional_info[(additional_info.station == col)].latitude.values[0]
# lon = additional_info[(additional_info.station == col)].longitude.values[0]
# doy = ind.dayofyear
# hour = ind.hour
# minute = ind.minute
# irradiance = row[col]
# ll.append([lat, lon, doy, hour, minute, irradiance])
#ms_df = pd.DataFrame(columns=['latitude','longitude','dayofyear', 'hour', 'minute','irradiance'], data=ll)
ll = []
for ind, row in train_residual_df.iterrows():
for col in train_residual_df.columns:
lat = additional_info[(additional_info.station == col)].latitude.values[0]
lon = additional_info[(additional_info.station == col)].longitude.values[0]
irradiance = row[col]
ll.append([lat, lon, irradiance])
ms_df = pd.DataFrame(columns=['latitude','longitude','irradiance'], data=ll)
ms_df
```
## Mean Shift
Normalização dos dados
```
from sklearn import preprocessing
x = ms_df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
bandwidth
bandwidth = estimate_bandwidth(x_scaled, quantile=0.2, n_samples=int(len(ms_df)*0.1), n_jobs=-1)
ms = MeanShift(bandwidth=bandwidth, n_jobs=-1)
ms.fit(x_scaled)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
labels
```
|
github_jupyter
|
## loading an image
```
from PIL import Image
im = Image.open("lena.png")
```
## examine the file contents
```
from __future__ import print_function
print(im.format, im.size, im.mode)
```
- The *format* attribute identifies the source of an image. If the image was not read from a file, it is set to None.
- The *size* attribute is a 2-tuple containing width and height (in pixels).
- The *mode* attribute defines the number and names of the bands in the image,
## let’s display the image we just loaded
```
im.show()
```
- The standard version of show() is not very efficient, since it saves the image to a temporary file and calls the xv utility to display the image. If you don’t have xv installed, it won’t even work. When it does work though, it is very handy for debugging and tests.
## Reading and writing images
- Python Imaging Library (PIL)
- You don’t have to know the file format to open a file. The library automatically determines the format based on the contents of the file.
- Unless you specify the format, the library uses the filename extension to discover which file storage format to use.
## Convert all image files to JPEG
```
## ------------ignore--------------
from __future__ import print_function
import os, sys
from PIL import Image
for infile in sys.argv[1:]:
f, e = os.path.splitext(infile)
outfile = f + ".jpg"
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print("cannot convert", infile)
from __future__ import print_function
import os, sys
from PIL import Image
for infile in os.listdir(os.getcwd()):
f, e = os.path.splitext(infile)
print(f)
print(e)
outfile = f + ".jpg"
print(outfile)
if infile != outfile:
try:
Image.open(infile).save(outfile)
print('converted',infile,'to',outfile)
except IOError:
print("cannot convert", infile)
```
## Create JPEG thumbnails
```
from __future__ import print_function
import os, sys
from PIL import Image
size = (128, 128)
for infile in os.listdir(os.getcwd()):
outfile = os.path.splitext(infile)[0] + ".thumbnail"
print(infile, outfile)
if infile != outfile:
try:
im = Image.open(infile)
im.thumbnail(size)
im.save(outfile, "JPEG")
except IOError:
print("cannot create thumbnail for", infile)
print(os.path.splitext('how/are/you/a.png'))
```
- It is important to note that the library doesn’t decode or load the raster data unless it really has to. When you open a file, the file header is read to determine the file format and extract things like mode, size, and other properties required to decode the file, but the rest of the file is not processed until later.
- This means that opening an image file is a fast operation, which is independent of the file size and compression type.
## Identify Image Files
```
from __future__ import print_function
import sys
from PIL import Image
for infile in os.listdir(os.getcwd()):
#print(infile)
try:
with Image.open(infile) as im:
print(infile, im.format, "%dx%d" % im.size, im.mode)
print(type(im.size))
except IOError:
pass
```
## Cutting, pasting, and merging images
- The Image class contains methods allowing you to manipulate regions within an image. To extract a sub-rectangle from an image, use the crop() method.
## Copying a subrectangle from an image
```
im = Image.open("lena.png")
box = (100, 100, 400, 400)
region = im.crop(box)
```
- The region could now be processed in a certain manner and pasted back.
## Processing a subrectangle, and pasting it back
```
region = region.transpose(Image.ROTATE_180)
im.paste(region, box)
im.show()
im.save('pasted.png')
```
## Rolling an image
```
def roll(image, delta):
"Roll an image sideways"
xsize, ysize = image.size #width and height
delta = delta % xsize
if delta == 0: return image
part1 = image.crop((0, 0, delta, ysize))
part2 = image.crop((delta, 0, xsize, ysize))
image.paste(part2, (0, 0, xsize-delta, ysize))
image.paste(part1, (xsize-delta, 0, xsize, ysize))
return image
im = Image.open("lena.png")
print(im.size)
im.show(roll(im,10))
```
## Splitting and merging bands
```
im = Image.open("lena.png")
r, g, b = im.split()
im1 = Image.merge("RGB", (b, g, r))
im2 = Image.merge("RGB", (r, r, r))
im3 = Image.merge("RGB", (g, g, g))
im4 = Image.merge("RGB", (b, b, b))
im5 = Image.merge("RGB", (g, r, b))
print(im1.mode)
#im1.show()
#im2.show()
#im3.show()
#im4.show()
im5.show()
```
- Note that for a single-band image, split() returns the image itself. To work with individual color bands, you may want to convert the image to “RGB” first.
## Geometrical transforms
- The PIL.Image.Image class contains methods to resize() and rotate() an image. The former takes a tuple giving the new size, the latter the angle in degrees counter-clockwise.
## Simple geometry transforms
```
im = Image.open("lena.png")
out = im.resize((128, 128))
out.show()
out = im.rotate(45) # degrees counter-clockwise
out.show()
out.save('rotated.png')
```
- To rotate the image in 90 degree steps, you can either use the rotate() method or the transpose() method. The latter can also be used to flip an image around its horizontal or vertical axis.
## Transposing an image
```
out = im.transpose(Image.FLIP_LEFT_RIGHT)
out.save('transposing/l2r.png')
out = im.transpose(Image.FLIP_TOP_BOTTOM)
out.save('transposing/t2b.png')
out = im.transpose(Image.ROTATE_90)
out.save('transposing/90degree.png')
out = im.transpose(Image.ROTATE_180)
out.save('transposing/180degree.png')
out = im.transpose(Image.ROTATE_270)
out.save('transposing/270degree.png')
```
- There’s no difference in performance or result between **transpose(ROTATE)** and corresponding **rotate()** operations.
- A more general form of image transformations can be carried out via the transform() method.
## Color transforms
- The Python Imaging Library allows you to convert images between different pixel representations using the ***convert()*** method.
## Converting between modes
```
im = Image.open("lena.png").convert("L")
im.show()
```
- The library supports transformations between each supported mode and the “L” and “RGB” modes. To convert between other modes, you may have to use an intermediate image (typically an “RGB” image).
## Image enhancement
## Applying filters
- The **ImageFilter** module contains a number of pre-defined enhancement filters that can be used with the **filter()** method.
from PIL import ImageFilter
im = Image.open("lena.png")
im.show('im')
im.save('filter/orig.png')
out = im.filter(ImageFilter.DETAIL)
out = out.filter(ImageFilter.DETAIL)
out = out.filter(ImageFilter.DETAIL)
out.show()
out.save('filter/out.png')
## Point Operations
## Applying point transforms
```
# multiply each pixel by 1.2
im = Image.open("lena.png")
im.save('point/orig.png')
out = im.point(lambda i: i * 1.2)
out.save('point/out.png')
```
- Using the above technique, you can quickly apply any simple expression to an image. You can also combine the point() and paste() methods to selectively modify an image:
## Processing individual bands
```
im = Image.open("lena.png")
# split the image into individual bands
source = im.split()
R, G, B = 0, 1, 2
# select regions where red is less than 100
mask = source[R].point(lambda i: i < 100 and 255) # if i < 100 returns 255 else returns false(0)
# process the green band
out = source[G].point(lambda i: i * 0.7)
# paste the processed band back, but only where red was < 100
source[G].paste(out, None, mask) # mask is just filtering here
# build a new multiband image
im = Image.merge(im.mode, source)
im.show()
# here we are reducing the green where red's intensity value is less than 100
```
- Python only evaluates the portion of a logical expression as is necessary to determine the outcome, and returns the last value examined as the result of the expression. So if the expression above is false (0), Python does not look at the second operand, and thus returns 0. Otherwise, it returns 255.
## Enhancement
```
from PIL import Image
from PIL import ImageEnhance
im = Image.open("lena.png")
enh = ImageEnhance.Contrast(im)
enh.enhance(1.3).show("30% more contrast")
```
## Image sequences
## Reading sequences
```
from PIL import Image
im = Image.open("animation.gif")
im.seek(1) # skip to the second frame
try:
while 1:
im.seek(im.tell()+1)
im.show()
# do something to im
except EOFError as e:
print(e)
pass # end of sequence
```
## A sequence iterator class
```
from PIL import Image
im = Image.open("animation.gif")
class ImageSequence:
def __init__(self, im):
self.im = im
def __getitem__(self, ix):
try:
if ix:
self.im.seek(ix)
return self.im
except EOFError:
print('ddd')
raise IndexError # end of sequence
for frame in ImageSequence(im):
# ...do something to frame...
frame.show()
pass
```
## Postscript printing
## Drawing Postscript
```
from PIL import Image
from PIL import PSDraw
im = Image.open("lena.png")
title = "lena"
box = (1*72, 2*72, 7*72, 10*72) # in points
ps = PSDraw.PSDraw() # default is sys.stdout
ps.begin_document(title)
# draw the image (75 dpi)
ps.image(box, im, 75)
ps.rectangle(box)
# draw title
ps.setfont("HelveticaNarrow-Bold", 36)
ps.text((3*72, 4*72), title)
ps.end_document()
```
## More on reading images
## Reading from an open file
```
fp = open("lena.png", "rb")
im = Image.open(fp)
im.show()
```
## Reading from a string
```
!pip install StringIO
import StringIO
im = Image.open(StringIO.StringIO(buffer))
```
## Reading from a tar archive
```
from PIL import TarIO
fp = TarIO.TarIO("Imaging.tar", "lena.png")
im = Image.open(fp)
```
## Controlling the decoder
## Reading in draft mode
```
from __future__ import print_function
from PIL import Image
im = Image.open('lena.png')
print("original =", im.mode, im.size)
im.draft("L", (100, 100))
print("draft =", im.mode, im.size)
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import scanpy as sc
import os
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import homogeneity_score
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
df_metrics = pd.DataFrame(columns=['ARI_Louvain','ARI_kmeans','ARI_HC',
'AMI_Louvain','AMI_kmeans','AMI_HC',
'Homogeneity_Louvain','Homogeneity_kmeans','Homogeneity_HC'])
workdir = './peaks_frequency_results/'
path_fm = os.path.join(workdir,'feature_matrices/')
path_clusters = os.path.join(workdir,'clusters/')
path_metrics = os.path.join(workdir,'metrics/')
os.system('mkdir -p '+path_clusters)
os.system('mkdir -p '+path_metrics)
metadata = pd.read_csv('../../input/metadata.tsv',sep='\t',index_col=0)
num_clusters = len(np.unique(metadata['label']))
print(num_clusters)
files = [x for x in os.listdir(path_fm) if x.startswith('FM')]
len(files)
files
def getNClusters(adata,n_cluster,range_min=0,range_max=3,max_steps=20):
this_step = 0
this_min = float(range_min)
this_max = float(range_max)
while this_step < max_steps:
print('step ' + str(this_step))
this_resolution = this_min + ((this_max-this_min)/2)
sc.tl.louvain(adata,resolution=this_resolution)
this_clusters = adata.obs['louvain'].nunique()
print('got ' + str(this_clusters) + ' at resolution ' + str(this_resolution))
if this_clusters > n_cluster:
this_max = this_resolution
elif this_clusters < n_cluster:
this_min = this_resolution
else:
return(this_resolution, adata)
this_step += 1
print('Cannot find the number of clusters')
print('Clustering solution from last iteration is used:' + str(this_clusters) + ' at resolution ' + str(this_resolution))
for file in files:
file_split = file[:-4].split('_')
method = file_split[1]
print(method)
pandas2ri.activate()
readRDS = robjects.r['readRDS']
df_rds = readRDS(os.path.join(path_fm,file))
fm_mat = pandas2ri.ri2py(robjects.r['data.frame'](robjects.r['as.matrix'](df_rds)))
fm_mat.fillna(0,inplace=True)
fm_mat.columns = metadata.index
adata = sc.AnnData(fm_mat.T)
adata.var_names_make_unique()
adata.obs = metadata.loc[adata.obs.index,]
df_metrics.loc[method,] = ""
#Louvain
sc.pp.neighbors(adata, n_neighbors=15,use_rep='X')
# sc.tl.louvain(adata)
getNClusters(adata,n_cluster=num_clusters)
#kmeans
kmeans = KMeans(n_clusters=num_clusters, random_state=2019).fit(adata.X)
adata.obs['kmeans'] = pd.Series(kmeans.labels_,index=adata.obs.index).astype('category')
#hierachical clustering
hc = AgglomerativeClustering(n_clusters=num_clusters).fit(adata.X)
adata.obs['hc'] = pd.Series(hc.labels_,index=adata.obs.index).astype('category')
#clustering metrics
#adjusted rank index
ari_louvain = adjusted_rand_score(adata.obs['label'], adata.obs['louvain'])
ari_kmeans = adjusted_rand_score(adata.obs['label'], adata.obs['kmeans'])
ari_hc = adjusted_rand_score(adata.obs['label'], adata.obs['hc'])
#adjusted mutual information
ami_louvain = adjusted_mutual_info_score(adata.obs['label'], adata.obs['louvain'],average_method='arithmetic')
ami_kmeans = adjusted_mutual_info_score(adata.obs['label'], adata.obs['kmeans'],average_method='arithmetic')
ami_hc = adjusted_mutual_info_score(adata.obs['label'], adata.obs['hc'],average_method='arithmetic')
#homogeneity
homo_louvain = homogeneity_score(adata.obs['label'], adata.obs['louvain'])
homo_kmeans = homogeneity_score(adata.obs['label'], adata.obs['kmeans'])
homo_hc = homogeneity_score(adata.obs['label'], adata.obs['hc'])
df_metrics.loc[method,['ARI_Louvain','ARI_kmeans','ARI_HC']] = [ari_louvain,ari_kmeans,ari_hc]
df_metrics.loc[method,['AMI_Louvain','AMI_kmeans','AMI_HC']] = [ami_louvain,ami_kmeans,ami_hc]
df_metrics.loc[method,['Homogeneity_Louvain','Homogeneity_kmeans','Homogeneity_HC']] = [homo_louvain,homo_kmeans,homo_hc]
adata.obs[['louvain','kmeans','hc']].to_csv(os.path.join(path_clusters ,method + '_clusters.tsv'),sep='\t')
df_metrics.to_csv(path_metrics+'clustering_scores.csv')
df_metrics
```
|
github_jupyter
|
<h3>Sin Cython</h3>
<p>Este programa genera $N$ enteros aleatorios entre $1$ y $M$, y una vez obtenidos los eleva al cuadrado y devuelve la suma de los cuadrados. Por tanto, calcula el cuadrado de la longitud de un vector aleatorio con coordenadas enteros en el intervalo $[1,M]$.</p>
```
def cuadrados(N,M):
res = 0
for muda in xrange(N):
x = randint(1,M)
res += x*x
return res
for n in srange(3,8):
%time A = cuadrados(10^n,10^6)
```
<h3>Con Cython</h3>
<p>Mismo cálculo:</p>
```
%%cython
import math
import random
def cuadrados_cy(long long N, long long M):
cdef long long res = 0
cdef long long muda
cdef long long x
for muda in xrange(N):
x = random.randint(1,M)
res += math.pow(x,2)
return res
for n in srange(3,8):
%time A = cuadrados_cy(10^n,10^6)
```
<h3>Optimizando el cálculo de números aleatorios:</h3>
```
%%cython
cdef extern from 'gsl/gsl_rng.h':
ctypedef struct gsl_rng_type:
pass
ctypedef struct gsl_rng:
pass
gsl_rng_type *gsl_rng_mt19937
gsl_rng *gsl_rng_alloc(gsl_rng_type * T)
cdef gsl_rng *r = gsl_rng_alloc(gsl_rng_mt19937)
cdef extern from 'gsl/gsl_randist.h':
long int uniform 'gsl_rng_uniform_int'(gsl_rng * r, unsigned long int n)
def main():
cdef int n
n = uniform(r,1000000)
return n
cdef long f(long x):
return x**2
import random
def cuadrados_cy2(int N):
cdef long res = 0
cdef int muda
for muda in range(N):
res += f(main())
return res
for n in srange(3,8):
%time A = cuadrados_cy2(10^n)
```
<h3>Problema similar sin números aleatorios:</h3>
```
%%cython
def cuadrados_cy3(long long int N):
cdef long long int res = 0
cdef long long int k
for k in range(N):
res += k**2
return res
for n in srange(3,8):
%time A = cuadrados_cy3(10^n)
def cuadrados5(N):
res = 0
for k in range(N):
res += k**2
return res
for n in srange(3,8):
%time A = cuadrados5(10^n)
```
<p>Hemos comprobado, de dos maneras, que es en la generación de los números aleatorios donde Python pasa la mayor parte del tiempo en este cálculo. Si optimizamos esa parte, usando una librería en C, o simplemente la suprimimos, el cálculo es mucho más rápido. Cython pierde muchísima eficiencia cuando debe ejecutar funciones de Python que son mucho más lentas que las correspondientes funciones en C.</p>
|
github_jupyter
|
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Regression with Deployment using Hardware Performance Dataset**_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Data](#Data)
1. [Train](#Train)
1. [Results](#Results)
1. [Test](#Test)
1. [Acknowledgements](#Acknowledgements)
## Introduction
In this example we use the Hardware Performance Dataset to showcase how you can use AutoML for a simple regression problem. The Regression goal is to predict the performance of certain combinations of hardware parts.
If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace.
In this notebook you will learn how to:
1. Create an `Experiment` in an existing `Workspace`.
2. Configure AutoML using `AutoMLConfig`.
3. Train the model using local compute.
4. Explore the results.
5. Test the best fitted model.
## Setup
As part of the setup you have already created an Azure ML Workspace object. For AutoML you will need to create an Experiment object, which is a named object in a Workspace used to run experiments.
```
import logging
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import azureml.dataprep as dprep
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
ws = Workspace.from_config()
# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-regression-hardware'
project_folder = './sample_projects/automl-remote-regression'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Create or Attach existing AmlCompute
You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
#### Creation of AmlCompute takes approximately 5 minutes.
If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read this article on the default limits and how to request more quota.
```
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "automlcl"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2", # for GPU, use "STANDARD_NC6"
#vm_priority = 'lowpriority', # optional
max_nodes = 6)
# Create the cluster.
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
```
# Data
Here load the data in the get_data script to be utilized in azure compute. To do this, first load all the necessary libraries and dependencies to set up paths for the data and to create the conda_run_config.
```
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.exists(project_folder):
os.makedirs(project_folder)
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
import pkg_resources
# create a new RunConfig object
conda_run_config = RunConfiguration(framework="python")
# Set compute target to AmlCompute
conda_run_config.target = compute_target
conda_run_config.environment.docker.enabled = True
conda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE
dprep_dependency = 'azureml-dataprep==' + pkg_resources.get_distribution("azureml-dataprep").version
cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', dprep_dependency], conda_packages=['numpy'])
conda_run_config.environment.python.conda_dependencies = cd
```
### Load Data
Here create the script to be run in azure compute for loading the data, load the hardware dataset into the X and y variables. Next split the data using train_test_split and return X_train and y_train for training the model.
```
data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/machineData.csv"
dflow = dprep.read_csv(data, infer_column_types=True)
dflow.get_profile()
X = dflow.drop_columns(columns=['ERP'])
y = dflow.keep_columns(columns=['ERP'], validate_column_exists=True)
X_train, X_test = X.random_split(percentage=0.8, seed=223)
y_train, y_test = y.random_split(percentage=0.8, seed=223)
dflow.head()
```
## Train
Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression|
|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
|**n_cross_validations**|Number of cross validation splits.|
|**X**|(sparse) array-like, shape = [n_samples, n_features]|
|**y**|(sparse) array-like, shape = [n_samples, ], targets values.|
|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
**_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric)
##### If you would like to see even better results increase "iteration_time_out minutes" to 10+ mins and increase "iterations" to a minimum of 30
```
automl_settings = {
"iteration_timeout_minutes": 5,
"iterations": 10,
"n_cross_validations": 5,
"primary_metric": 'spearman_correlation',
"preprocess": True,
"max_concurrent_iterations": 5,
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(task = 'regression',
debug_log = 'automl_errors_20190417.log',
path = project_folder,
run_configuration=conda_run_config,
X = X_train,
y = y_train,
**automl_settings
)
remote_run = experiment.submit(automl_config, show_output = False)
remote_run
```
## Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.widgets import RunDetails
RunDetails(remote_run).show()
# Wait until the run finishes.
remote_run.wait_for_completion(show_output = True)
```
## Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(remote_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
## Retrieve the Best Model
Below we select the best pipeline from our iterations. The get_output method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration.
```
best_run, fitted_model = remote_run.get_output()
print(best_run)
print(fitted_model)
```
#### Best Model Based on Any Other Metric
Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):
```
lookup_metric = "root_mean_squared_error"
best_run, fitted_model = remote_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
iteration = 3
third_run, third_model = remote_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
```
## Register the Fitted Model for Deployment
If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.
```
description = 'AutoML Model'
tags = None
model = remote_run.register_model(description = description, tags = tags)
print(remote_run.model_id) # This will be written to the script file later in the notebook.
```
### Create Scoring Script
The scoring script is required to generate the image for deployment. It contains the code to do the predictions on input data.
```
%%writefile score.py
import pickle
import json
import numpy
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model
def init():
global model
model_path = Model.get_model_path(model_name = '<<modelid>>') # this name is model.id of model that we want to deploy
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
def run(rawdata):
try:
data = json.loads(rawdata)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"error": result})
return json.dumps({"result":result.tolist()})
```
### Create a YAML File for the Environment
To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. Details about retrieving the versions can be found in notebook [12.auto-ml-retrieve-the-training-sdk-versions](12.auto-ml-retrieve-the-training-sdk-versions.ipynb).
```
dependencies = remote_run.get_run_sdk_dependencies(iteration = 1)
for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:
print('{}\t{}'.format(p, dependencies[p]))
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'], pip_packages=['azureml-sdk[automl]'])
conda_env_file_name = 'myenv.yml'
myenv.save_to_file('.', conda_env_file_name)
# Substitute the actual version number in the environment file.
# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.
# However, we include this in case this code is used on an experiment from a previous SDK version.
with open(conda_env_file_name, 'r') as cefr:
content = cefr.read()
with open(conda_env_file_name, 'w') as cefw:
cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))
# Substitute the actual model id in the script file.
script_file_name = 'score.py'
with open(script_file_name, 'r') as cefr:
content = cefr.read()
with open(script_file_name, 'w') as cefw:
cefw.write(content.replace('<<modelid>>', remote_run.model_id))
```
### Create a Container Image
Next use Azure Container Instances for deploying models as a web service for quickly deploying and validating your model
or when testing a model that is under development.
```
from azureml.core.image import Image, ContainerImage
image_config = ContainerImage.image_configuration(runtime= "python",
execution_script = script_file_name,
conda_file = conda_env_file_name,
tags = {'area': "digits", 'type': "automl_regression"},
description = "Image for automl regression sample")
image = Image.create(name = "automlsampleimage",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
if image.creation_state == 'Failed':
print("Image build log at: " + image.image_build_log_uri)
```
### Deploy the Image as a Web Service on Azure Container Instance
Deploy an image that contains the model and other assets needed by the service.
```
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'area': "digits", 'type': "automl_regression"},
description = 'sample service for Automl Regression')
from azureml.core.webservice import Webservice
aci_service_name = 'automl-sample-hardware'
print(aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
```
### Delete a Web Service
Deletes the specified web service.
```
#aci_service.delete()
```
### Get Logs from a Deployed Web Service
Gets logs from a deployed web service.
```
#aci_service.get_logs()
```
## Test
Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values.
```
X_test = X_test.to_pandas_dataframe()
y_test = y_test.to_pandas_dataframe()
y_test = np.array(y_test)
y_test = y_test[:,0]
X_train = X_train.to_pandas_dataframe()
y_train = y_train.to_pandas_dataframe()
y_train = np.array(y_train)
y_train = y_train[:,0]
```
##### Predict on training and test set, and calculate residual values.
```
y_pred_train = fitted_model.predict(X_train)
y_residual_train = y_train - y_pred_train
y_pred_test = fitted_model.predict(X_test)
y_residual_test = y_test - y_pred_test
```
### Calculate metrics for the prediction
Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values
from the trained model that was returned.
```
%matplotlib inline
from sklearn.metrics import mean_squared_error, r2_score
# Set up a multi-plot chart.
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Regression Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(16)
# Plot residual values of training set.
a0.axis([0, 360, -200, 200])
a0.plot(y_residual_train, 'bo', alpha = 0.5)
a0.plot([-10,360],[0,0], 'r-', lw = 3)
a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)
a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)),fontsize = 12)
a0.set_xlabel('Training samples', fontsize = 12)
a0.set_ylabel('Residual Values', fontsize = 12)
# Plot residual values of test set.
a1.axis([0, 90, -200, 200])
a1.plot(y_residual_test, 'bo', alpha = 0.5)
a1.plot([-10,360],[0,0], 'r-', lw = 3)
a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)
a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)),fontsize = 12)
a1.set_xlabel('Test samples', fontsize = 12)
a1.set_yticklabels([])
plt.show()
%matplotlib notebook
test_pred = plt.scatter(y_test, y_pred_test, color='')
test_test = plt.scatter(y_test, y_test, color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
```
## Acknowledgements
This Predicting Hardware Performance Dataset is made available under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication License: https://creativecommons.org/publicdomain/zero/1.0/. Any rights in individual contents of the database are licensed under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication License: https://creativecommons.org/publicdomain/zero/1.0/ . The dataset itself can be found here: https://www.kaggle.com/faizunnabi/comp-hardware-performance and https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
_**Citation Found Here**_
|
github_jupyter
|
```
%matplotlib inline
```
Word Embeddings: Encoding Lexical Semantics
===========================================
Word embeddings are dense vectors of real numbers, one per word in your
vocabulary. In NLP, it is almost always the case that your features are
words! But how should you represent a word in a computer? You could
store its ascii character representation, but that only tells you what
the word *is*, it doesn't say much about what it *means* (you might be
able to derive its part of speech from its affixes, or properties from
its capitalization, but not much). Even more, in what sense could you
combine these representations? We often want dense outputs from our
neural networks, where the inputs are $|V|$ dimensional, where
$V$ is our vocabulary, but often the outputs are only a few
dimensional (if we are only predicting a handful of labels, for
instance). How do we get from a massive dimensional space to a smaller
dimensional space?
How about instead of ascii representations, we use a one-hot encoding?
That is, we represent the word $w$ by
\begin{align}\overbrace{\left[ 0, 0, \dots, 1, \dots, 0, 0 \right]}^\text{|V| elements}\end{align}
where the 1 is in a location unique to $w$. Any other word will
have a 1 in some other location, and a 0 everywhere else.
There is an enormous drawback to this representation, besides just how
huge it is. It basically treats all words as independent entities with
no relation to each other. What we really want is some notion of
*similarity* between words. Why? Let's see an example.
Suppose we are building a language model. Suppose we have seen the
sentences
* The mathematician ran to the store.
* The physicist ran to the store.
* The mathematician solved the open problem.
in our training data. Now suppose we get a new sentence never before
seen in our training data:
* The physicist solved the open problem.
Our language model might do OK on this sentence, but wouldn't it be much
better if we could use the following two facts:
* We have seen mathematician and physicist in the same role in a sentence. Somehow they
have a semantic relation.
* We have seen mathematician in the same role in this new unseen sentence
as we are now seeing physicist.
and then infer that physicist is actually a good fit in the new unseen
sentence? This is what we mean by a notion of similarity: we mean
*semantic similarity*, not simply having similar orthographic
representations. It is a technique to combat the sparsity of linguistic
data, by connecting the dots between what we have seen and what we
haven't. This example of course relies on a fundamental linguistic
assumption: that words appearing in similar contexts are related to each
other semantically. This is called the `distributional
hypothesis <https://en.wikipedia.org/wiki/Distributional_semantics>`__.
Getting Dense Word Embeddings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
How can we solve this problem? That is, how could we actually encode
semantic similarity in words? Maybe we think up some semantic
attributes. For example, we see that both mathematicians and physicists
can run, so maybe we give these words a high score for the "is able to
run" semantic attribute. Think of some other attributes, and imagine
what you might score some common words on those attributes.
If each attribute is a dimension, then we might give each word a vector,
like this:
\begin{align}q_\text{mathematician} = \left[ \overbrace{2.3}^\text{can run},
\overbrace{9.4}^\text{likes coffee}, \overbrace{-5.5}^\text{majored in Physics}, \dots \right]\end{align}
\begin{align}q_\text{physicist} = \left[ \overbrace{2.5}^\text{can run},
\overbrace{9.1}^\text{likes coffee}, \overbrace{6.4}^\text{majored in Physics}, \dots \right]\end{align}
Then we can get a measure of similarity between these words by doing:
\begin{align}\text{Similarity}(\text{physicist}, \text{mathematician}) = q_\text{physicist} \cdot q_\text{mathematician}\end{align}
Although it is more common to normalize by the lengths:
\begin{align}\text{Similarity}(\text{physicist}, \text{mathematician}) = \frac{q_\text{physicist} \cdot q_\text{mathematician}}
{\| q_\text{\physicist} \| \| q_\text{mathematician} \|} = \cos (\phi)\end{align}
Where $\phi$ is the angle between the two vectors. That way,
extremely similar words (words whose embeddings point in the same
direction) will have similarity 1. Extremely dissimilar words should
have similarity -1.
You can think of the sparse one-hot vectors from the beginning of this
section as a special case of these new vectors we have defined, where
each word basically has similarity 0, and we gave each word some unique
semantic attribute. These new vectors are *dense*, which is to say their
entries are (typically) non-zero.
But these new vectors are a big pain: you could think of thousands of
different semantic attributes that might be relevant to determining
similarity, and how on earth would you set the values of the different
attributes? Central to the idea of deep learning is that the neural
network learns representations of the features, rather than requiring
the programmer to design them herself. So why not just let the word
embeddings be parameters in our model, and then be updated during
training? This is exactly what we will do. We will have some *latent
semantic attributes* that the network can, in principle, learn. Note
that the word embeddings will probably not be interpretable. That is,
although with our hand-crafted vectors above we can see that
mathematicians and physicists are similar in that they both like coffee,
if we allow a neural network to learn the embeddings and see that both
mathematicians and physicists have a large value in the second
dimension, it is not clear what that means. They are similar in some
latent semantic dimension, but this probably has no interpretation to
us.
In summary, **word embeddings are a representation of the *semantics* of
a word, efficiently encoding semantic information that might be relevant
to the task at hand**. You can embed other things too: part of speech
tags, parse trees, anything! The idea of feature embeddings is central
to the field.
Word Embeddings in Pytorch
~~~~~~~~~~~~~~~~~~~~~~~~~~
Before we get to a worked example and an exercise, a few quick notes
about how to use embeddings in Pytorch and in deep learning programming
in general. Similar to how we defined a unique index for each word when
making one-hot vectors, we also need to define an index for each word
when using embeddings. These will be keys into a lookup table. That is,
embeddings are stored as a $|V| \times D$ matrix, where $D$
is the dimensionality of the embeddings, such that the word assigned
index $i$ has its embedding stored in the $i$'th row of the
matrix. In all of my code, the mapping from words to indices is a
dictionary named word\_to\_ix.
The module that allows you to use embeddings is torch.nn.Embedding,
which takes two arguments: the vocabulary size, and the dimensionality
of the embeddings.
To index into this table, you must use torch.LongTensor (since the
indices are integers, not floats).
```
# Author: Robert Guthrie
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
lookup_tensor_hello = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor_hello)
print("hello_embed: ", hello_embed)
lookup_tensor_world = torch.tensor([word_to_ix["world"]], dtype=torch.long)
world_embed = embeds(lookup_tensor_world)
print("worlds_embed: ", world_embed)
```
An Example: N-Gram Language Modeling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Recall that in an n-gram language model, given a sequence of words
$w$, we want to compute
\begin{align}P(w_i | w_{i-1}, w_{i-2}, \dots, w_{i-n+1} )\end{align}
Where $w_i$ is the ith word of the sequence.
In this example, we will compute the loss function on some training
examples and update the parameters with backpropagation.
```
CONTEXT_SIZE = 5
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
ngrams = [([test_sentence[i + j] for j in range(CONTEXT_SIZE)], test_sentence[i + CONTEXT_SIZE])
for i in range(len(test_sentence) - CONTEXT_SIZE)]
# trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
# for i in range(len(test_sentence) - 2)]
print("the first 3 ngrams, just so you can see what they look like: ")
print(ngrams[:3])
print("the last 3 ngrams: ")
print(ngrams[-3:])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
# print("out: ", out)
log_probs = F.log_softmax(out, dim=1)
# print("log probs: ", log_probs)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(1):
total_loss = 0
for context, target in ngrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in tensors)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a tensor)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
losses.append(total_loss)
print("losses: ", losses)
print("The loss decreased every iteration over the training data!")
```
Exercise: Computing Word Embeddings: Continuous Bag-of-Words
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Continuous Bag-of-Words model (CBOW) is frequently used in NLP deep
learning. It is a model that tries to predict words given the context of
a few words before and a few words after the target word. This is
distinct from language modeling, since CBOW is not sequential and does
not have to be probabilistic. Typcially, CBOW is used to quickly train
word embeddings, and these embeddings are used to initialize the
embeddings of some more complicated model. Usually, this is referred to
as *pretraining embeddings*. It almost always helps performance a couple
of percent.
The CBOW model is as follows. Given a target word $w_i$ and an
$N$ context window on each side, $w_{i-1}, \dots, w_{i-N}$
and $w_{i+1}, \dots, w_{i+N}$, referring to all context words
collectively as $C$, CBOW tries to minimize
\begin{align}-\log p(w_i | C) = -\log \text{Softmax}(A(\sum_{w \in C} q_w) + b)\end{align}
where $q_w$ is the embedding for word $w$.
Implement this model in Pytorch by filling in the class below. Some
tips:
* Think about which parameters you need to define.
* Make sure you know what shape each operation expects. Use .view() if you need to
reshape.
```
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
EMBEDDING_DIM = 10
raw_text = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
# By deriving a set from `raw_text`, we deduplicate the array
vocab = set(raw_text)
vocab_size = len(vocab)
word_to_ix = {word: i for i, word in enumerate(vocab)}
data = []
for i in range(2, len(raw_text) - 2):
context = [raw_text[i - 2], raw_text[i - 1],
raw_text[i + 1], raw_text[i + 2]]
target = raw_text[i]
data.append((context, target))
print(data[:5])
class CBOW(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(CBOW, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear = nn.Linear(embedding_dim, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs)
# print("embeds: ", embeds)
qsum = torch.sum(embeds, dim=0)
# print("qsum: ", qsum)
out = self.linear(qsum)
# print("out: ", out)
log_probs = F.log_softmax(out, dim=0)
# print("log probs: ", log_probs)
return log_probs
# create your model and train. here are some functions to help you make
# the data ready for use by your module
def make_context_vector(context, word_to_ix):
idxs = [word_to_ix[w] for w in context]
return torch.tensor(idxs, dtype=torch.long)
context_vector = make_context_vector(data[0][0], word_to_ix) # example
print("context vector: ", context_vector)
losses = []
loss_function = nn.NLLLoss()
model = CBOW(len(vocab), EMBEDDING_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
total_loss = 0
for context, target in data:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in tensors)
# context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
context_idxs = make_context_vector(context, word_to_ix)
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a tensor)
# loss_function requires a minibatch index - here we have only 1
loss = loss_function(log_probs.unsqueeze(0), torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
losses.append(total_loss)
print(losses) # The loss decreased every iteration over the training data!
```
|
github_jupyter
|
# Online prediction for radon-small
In online mode, the model is learning as soon as a new data arrives.
It means that when we want our prediction we don't need to provide feature vector,
since all data was already processed by the model.
Explore the following models:
* Constant model - The same value for all future points
* Previous day model - Next day is the same like previous day
* Daily Pattern model - Calculate daily pattern from historical data. Use it as next day prediction.
```
import datetime
import calendar
import pprint
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['figure.figsize'] = 12, 4
```
# Load project
```
project_folder = '../../datasets/radon-small/'
with open(project_folder + 'project.json', 'r') as file:
project = json.load(file)
pprint.pprint(project)
print('Flow1')
flow = pd.read_csv(project_folder + 'flow1.csv', parse_dates=['time'])
flow = flow.set_index('time')['flow'].fillna(0)
flow = flow.resample('5T').pad()
flow.head()
```
## Helper functions
Helper functions for building training and test sets and calculating score
```
class PredictionModel:
def fit(self, data_points):
pass
def predict(self, prediction_day):
pass
def mae(y_hat, y):
"""
Calculate Mean Absolute Error
This metric is better here since serries have quite big outliers
"""
return np.sum(np.absolute(y_hat-y))/y.shape[0]
def split_data(split_day):
"""Get all data up to given day"""
end_day = split_day - pd.Timedelta('1 min')
return flow[:end_day]
def evaluate_day(model, split_day):
"""Evaluate data for single day"""
xs = split_data(split_day)
next_day = split_day + pd.Timedelta(1, 'D')
y = flow[next_day: next_day+pd.Timedelta('1439 min')]
model.fit(xs)
y_hat = model.predict(next_day)
return mae(y_hat, y)
def evaluate_model(model, start_day):
"""
Evaluate model on all days starting from split_day.
Returns 90th percentile error as model score
"""
last_day = pd.Timestamp(project['end-date'])
split_day = start_day
costs = []
while split_day < last_day:
cost = evaluate_day(model, split_day)
costs.append(cost)
split_day += pd.Timedelta(1, 'D')
return np.percentile(costs, 90), costs
split_data(pd.Timestamp('2016-11-10')).tail()
```
# Models
# ConstMeanModel
```
class ConstantMeanModel(PredictionModel):
def __init__(self):
self.mu = 0
def fit(self, xs):
self.mu = np.mean(xs)
def predict(self, day):
return np.ones(12*24) * self.mu
score, costs = evaluate_model(ConstantMeanModel(), pd.Timestamp('2016-11-11'))
print('ConstantMeanModel score: {:.2f}'.format(score))
```
## Previous Day Model
Uses values from last day
```
class LastDayModel(PredictionModel):
def fit(self, xs):
self.y = xs.values[-288:]
def predict(self, day):
return self.y
score, costs = evaluate_model(LastDayModel(), pd.Timestamp('2016-11-11'))
print('LastDayModel score: {:.2f}'.format(score))
```
Model for single day. Easy case
```
evaluate_day(LastDayModel(), pd.Timestamp('2016-11-11'))
```
And when next day is kind of outlier
```
evaluate_day(LastDayModel(), pd.Timestamp('2017-05-01'))
```
## Daily Pattern model
Create pattern of daily usage based on historical data. Use this pattern to predict next values
(This can take up to 10 minutes to calculate)
```
class DailyPatternModel(PredictionModel):
def fit(self, xs):
df = flow.to_frame().reset_index()
self.daily_pattern = df.groupby(by=[df.time.map(lambda x : (x.hour, x.minute))]).flow.mean().values
def predict(self, day):
return self.daily_pattern
score, costs = evaluate_model(DailyPatternModel(), pd.Timestamp('2016-11-11'))
print('DailyPatternModel score: {:.2f}'.format(score))
```
### Daily Pattern Median Model
Calculate median value for each time. Use it as a prediction for the next day.
```
class DayMedianModel(PredictionModel):
def fit(self, xs):
df = flow.to_frame().reset_index()
self.daily_pattern = df.groupby(by=[df.time.map(lambda x : (x.hour, x.minute))]).flow.median().values
def predict(self, day):
return self.daily_pattern
score, costs = evaluate_model(DayMedianModel(), pd.Timestamp('2016-11-11'))
print('DayModel score: {:.2f}'.format(score))
```
## Daily pattern with last value correction
This model calculates daily pattern, but also corrects it based on previous value
$$ x_{t} = \alpha (x_{t-1} - dp(t-1)) + dp(t)$$
where
- dp - daily pattern
|
github_jupyter
|
# classificate speaking_audio files
```
# 균형있게 구성하기
# 1. 성별 50:50
# 2. 지역 25:25:25:25
# 각 지역별로 남 10, 여 10명
# 총 80명.
import os
import shutil
import random
from typing_extensions import final
A = [] # 강원
B = [] # 서울/경기
C = [] # 경상
D = [] # 전라
E = [] # 제주(현재 없음)
F = [] # 충청(현재 없음)
G = [] # 기타(현재 없음)
region = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
# 각 파일들을 지역별로 분류합니다.
# 노인 음성 데이터셋이 있는 디렉토리
basic_path = os.path.join('../Dataset_audio/old_total')
for i in region:
os.makedirs(basic_path + '/' + i)
for (path, dir, files) in os.walk(basic_path):
for filename in files:
ext = os.path.splitext(filename)[-1]
if ext == '.wav':
if os.path.splitext(filename)[0][-1] == 'A':
A.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'A', filename)
)
elif os.path.splitext(filename)[0][-1] == 'B':
B.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'B', filename)
)
elif os.path.splitext(filename)[0][-1] == 'C':
C.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'C', filename)
)
elif os.path.splitext(filename)[0][-1] == 'D':
D.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'D', filename)
)
elif os.path.splitext(filename)[0][-1] == 'E':
E.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'E', filename)
)
elif os.path.splitext(filename)[0][-1] == 'F':
F.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'F', filename)
)
elif os.path.splitext(filename)[0][-1] == 'G':
G.append(filename)
shutil.move(
os.path.join(path, filename),
os.path.join(basic_path, 'G', filename)
)
for i in [A, B, C, D, E, F, G]:
print('file_num: ', len(i))
# 지역별로 나눈 파일을 성별로 나눕니다.
M=[]
F=[]
for i in region:
print(i)
for (path, dir, files) in os.walk(os.path.join(basic_path, i)):
for filename in files:
ext = os.path.splitext(filename)[-1]
if ext == '.wav':
if os.path.splitext(filename)[0][-6] == 'M':
#print(filename, 'M')
M.append(filename)
try:
os.mkdir(
os.path.join(basic_path, i, 'M')
)
except FileExistsError:
pass
shutil.move(
os.path.join(basic_path, i, filename),
os.path.join(basic_path, i, 'M', filename)
)
elif os.path.splitext(filename)[0][-6] == 'F':
#print(filename, 'F')
F.append(filename)
try:
os.mkdir(
os.path.join(basic_path, i, 'F')
)
except FileExistsError:
pass
shutil.move(
os.path.join(basic_path, i, filename),
os.path.join(basic_path, i, 'F', filename)
)
else:
print('Cannot find gender')
# 3. 각 지역별로 최대 남 100, 여 100명의 화자를 선정합니다.
# 랜덤 선정
# random.sample(list, n_sample)
target_path = os.path.join('../Dataset_audio/old_total')
def speaker_select(target_path):
region = ['A', 'B', 'C', 'D', 'E', 'F']
gender = ['M', 'F']
result = []
for i in region:
for g in gender:
print(i, '-', g)
try:
by_gender_files = os.listdir(os.path.join(target_path, i, g))
by_gender_speaker = [file[:6] for file in by_gender_files]
selected_speaker = random.sample(by_gender_speaker, 100)
result.append(selected_speaker)
print('num of selected_speaker: ', len(list(set(selected_speaker))))
except FileNotFoundError:
pass
return result
selected_speakers = speaker_select(target_path)
# file select
target_path = r'../Dataset_audio/old_total'
def file_select(target_path, selected_speakers):
err_count = []
region = ['A', 'B', 'C', 'D', 'E', 'F']
for i in region:
print(i)
for (path, dir, files) in os.walk(os.path.join(target_path, i)):
for filename in files:
# ext = os.path.splitext(filename)[-1]
# if ext == '.wav':
speaker = filename[:6]
g = os.path.splitext(filename)[0][-6]
for x in selected_speakers:
if speaker in x:
#print('he/she is selected speaker.')
if g == 'M':
#print('{} is male'.format(speaker))
try:
os.makedirs(
os.path.join(target_path, i, 'selected_M', speaker)
)
except:
pass
shutil.copy(
os.path.join(target_path, i, 'M', filename),
os.path.join(target_path, i, 'selected_M', speaker, filename)
)
elif g == 'F':
#print('{} is female'.format(speaker))
try:
os.makedirs(
os.path.join(target_path, i, 'selected_F', speaker)
)
except:
pass
shutil.copy(
os.path.join(target_path, i, 'F', filename),
os.path.join(target_path, i, 'selected_F', speaker, filename)
)
else:
print('cannot found gender')
err_count.append(filename)
print(err_count)
file_select(target_path, selected_speakers)
# selected_folders에 있는 파일 찾기
# 한 화자당 최대 30개씩
target_path = r'../Dataset_audio/old_total'
selected_folders = ['selected_M', 'selected_F']
def finding_selected_files(folder_name_list):
filenames_random = []
for i in region:
for (path, dir, files) in os.walk(target_path + '/' + i):
#print('current path:', path)
#print('curren dir:', dir)
if path.split('/')[-2] in folder_name_list:
filenames = []
for filename in files:
#print('filename: ', filename)
ext = os.path.splitext(filename)[-1]
if ext == '.wav':
filenames.append(filename)
filenames_random += random.sample(filenames, min(len(filenames), 30)) #최대 30
return filenames_random
selected_files = finding_selected_files(selected_folders)
len(selected_files)
# 랜덤으로 선택한 파일을 복사하기
speaking_path = r'../Dataset_audio/Speaking'
def final_selected_files(new_path, filename_list):
target_path = r'../Dataset_audio/old_total'
for (path, dir, files) in os.walk(target_path):
for filename in files:
if filename in filename_list:
try:
shutil.copy(
os.path.join(path, filename),
os.path.join(new_path, filename)
)
#print(os.path.join(path, filename))
#print(os.path.join(new_path, filename), 'copied')
except FileNotFoundError:
pass
final_selected_files(speaking_path, selected_files)
len(os.listdir(r'../Dataset_audio/Speaking'))
```
|
github_jupyter
|
## Exploratory data analysis of Dranse discharge data
Summary: The data is stationary even without differencing, but ACF and PACF plots show that an hourly first order difference and a periodic 24h first order difference is needed for SARIMA fitting.
Note: Final fitting done in Google Colab due to memory constraints - this notebook will throw some errors
## SARIMAX model fitting
### 1.) Loading the river flow (discharge) data
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from river_forecast.training_data_access import get_combined_flow
flow_df = get_combined_flow()
plt.plot(flow_df.index, flow_df)
```
### Exploratory Analysis
```
subset_df = flow_df.loc[:]
subset_df['year'] = subset_df.index.year
subset_df['offset_datetime'] = subset_df.index + pd.DateOffset(year=2019)
sns.set(style="whitegrid")
sns.set(rc={'figure.figsize':(15, 8)})
ax = sns.lineplot(x='offset_datetime', y='discharge', hue='year', data=subset_df, markers='')
import matplotlib.dates as mdates
myFmt = mdates.DateFormatter('%b')
ax.get_xaxis().set_major_formatter(myFmt)
ax.set_xlabel('Month')
ax.set_ylabel('Discharge (m^3/s)')
```
### train-test split
```
import statsmodels.api as sm
train = flow_df.loc[flow_df.index < pd.to_datetime('2019-01-01 00:00:00')]
test = flow_df.loc[(flow_df.index >= pd.to_datetime('2019-01-01 00:00:00')) & (flow_df.index < pd.to_datetime('2019-07-01 00:00:00'))]
fig, ax = plt.subplots()
train.plot(ax=ax, label='train')
test.plot(ax=ax, label='test')
plt.legend()
plt.show()
```
### Time series stationarity analysis
```
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
def tsplot(y, lags=None, figsize=(12, 7), style='bmh'):
"""
Plot time series, its ACF and PACF, calculate Dickey–Fuller test
-> Adapted from https://gist.github.com/DmitrySerg/14c1af2c1744bb9931d1eae6d9713b21
y - timeseries
lags - how many lags to include in ACF, PACF calculation
"""
if not isinstance(y, pd.Series):
y = pd.Series(y)
with plt.style.context(style):
fig = plt.figure(figsize=figsize)
layout = (2, 2)
ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
t_statistic, p_value = sm.tsa.stattools.adfuller(y)[:2]
ts_ax.set_title('Time Series Analysis Plots\n Dickey-Fuller: p={0:.5f}'.format(p_value))
smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
plt.tight_layout()
```
#### Augmenteded Dicky-Fuller to check for stationarity
```
flow = flow_df['discharge']
flow_diff_1 = (flow - flow.shift(1)).dropna()
flow_diff_1_24 = (flow_diff_1 - flow_diff_1.shift(24)).dropna()
flow_diff_24 = (flow - flow.shift(24)).dropna()
tsplot(flow, lags=24*5, figsize=(12, 7))
tsplot(flow_diff_1, lags=24*5, figsize=(12, 7))
tsplot(flow_diff_1_24, lags=24*7, figsize=(12, 7))
tsplot(flow_diff_1_24, lags=12, figsize=(12, 7))
```
#### Fitting SARIMAX
```
train['discharge'].plot()
from statsmodels.tsa.statespace.sarimax import SARIMAX
### Crashed again upon completion, make sure the time series is ok -> computation moved to Colab
# Create a SARIMAX model
model = SARIMAX(train['discharge'], order=(4,1,1), seasonal_order=(0,1,1,24))
# p - try 0, 1, 2, 3, 4; q is cleary one. Q is clearly 1, P is tapering off: 0.
# Fit the model
results = model.fit()
import pickle
pickle.dump(results.params, open('../models/sarimax_211_011-24_model-parameters.pkl', 'wb'))
### # load model
### loaded = ARIMAResults.load('model.pkl')
results = pickle.load(open('../models/sarimax_211_011-24_model.pkl', 'rb'))
pwd
# Print the results summary
print(results.summary())
results
```
#### Plotting the forecast
```
# Generate predictions
one_step_forecast = results.get_prediction(start=-48)
# Extract prediction mean
mean_forecast = one_step_forecast.predicted_mean
# Get confidence intervals of predictions
confidence_intervals = one_step_forecast.conf_int()
# Select lower and upper confidence limits
lower_limits = confidence_intervals.loc[:, 'lower discharge']
upper_limits = confidence_intervals.loc[:, 'upper discharge']
# plot the dranse data
# plot your mean predictions
plt.plot(mean_forecast.index, mean_forecast, color='r', label='forecast')
# shade the area between your confidence limits
plt.fill_between(lower_limits.index, lower_limits,
upper_limits, color='pink')
# set labels, legends and show plot
plt.xlabel('Date')
plt.ylabel('Discharge')
plt.title('hourly forecaset')
plt.legend()
plt.show()
# Generate predictions
dynamic_forecast = results.get_prediction(start=-6, dynamic=True)
# Extract prediction mean
mean_forecast = dynamic_forecast.predicted_mean
# Get confidence intervals of predictions
confidence_intervals = dynamic_forecast.conf_int(alpha=0.32) # 95 percent confidence interval
# Select lower and upper confidence limits
lower_limits = confidence_intervals.loc[:,'lower discharge']
upper_limits = confidence_intervals.loc[:,'upper discharge']
# plot your mean predictions
plt.plot(mean_forecast.index, mean_forecast, color='r', label='forecast')
# shade the area between your confidence limits
plt.fill_between(lower_limits.index, lower_limits,
upper_limits, color='pink', alpha=0.5)
# set labels, legends and show plot
plt.xlabel('Date')
plt.ylabel('Discharge')
plt.title('dynamic forecast')
plt.legend()
```
#### Finding the best model manually
```
# Create empty list to store search results
order_aic_bic=[]
# Loop over p values from 0-2
for p in range(0, 5):
print(p)
# create and fit ARMA(p,q) model
model = SARIMAX(train['discharge'], order=(p,1,1), seasonal_order=(0,1,1,24))
# p - try 0, 1, 2, 3, 4; q is cleary one. Q is clearly 1, P is tapering off: 0.
results = model.fit()
# Append order and results tuple
order_aic_bic.append((p,results.aic, results.bic))
# Construct DataFrame from order_aic_bic
order_df = pd.DataFrame(order_aic_bic,
columns=['p', 'AIC', 'BIC'])
# Print order_df in order of increasing AIC
print(order_df.sort_values('AIC'))
# Print order_df in order of increasing BIC
print(order_df.sort_values('BIC'))
# Create the 4 diagostics plots
results.plot_diagnostics()
plt.show()
# Print summary
print(results.summary())
```
### Forecasting
```
results.forecast(steps=6)
resB.forecast(steps=6)
import river_forecast.api_data_access
import importlib, sys
importlib.reload(river_forecast.api_data_access)
rivermap_data = river_forecast.api_data_access.RivermapDataRetriever()
recent_flow_df = rivermap_data.get_latest_river_flow(n_days=3, station='Dranse')
recent_flow_df
modelB = SARIMAX(recent_flow_df.iloc[:2].asfreq('h'), order=(4,1,1), seasonal_order=(0,1,1,24))
resB = modelB.smooth(results.params)
resB.forecast(steps=6)
from river_forecast.api_data_access import RivermapDataRetriever
data = RivermapDataRetriever().get_standard_dranse_data()
data
import importlib
import river_forecast.forecast
importlib.reload(river_forecast.forecast)
sf = river_forecast.forecast.SARIMAXForecast()
sf.generate_prediction_plot(data)
sf.dynamic_forecast(data)
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
from pymedphys_monomanage.tree import PackageTree
import networkx as nx
from copy import copy
package_tree = PackageTree('../../packages')
package_tree.package_dependencies_digraph
package_tree.roots
modules = list(package_tree.digraph.neighbors('pymedphys_analysis'))
modules
internal_packages = copy(package_tree.roots)
internal_packages.remove('pymedphys')
module_paths = [
item
for package in internal_packages
for item in package_tree.digraph.neighbors(package)
]
modules = {
item: os.path.splitext(item)[0].replace(os.sep, '.')
for item in module_paths
}
modules
module_digraph = nx.DiGraph()
dependencies = {
module.replace(os.sep, '.'): [
'.'.join(item.split('.')[0:2])
for item in
package_tree.descendants_dependencies(module)['internal_module'] + package_tree.descendants_dependencies(module)['internal_package']
]
for module in modules.keys()
}
dependencies
dependents = {
key: [] for key in dependencies.keys()
}
for key, values in dependencies.items():
for item in values:
dependents[item].append(key)
dependents
current_modules = [
item.replace(os.sep, '.')
for item in package_tree.digraph.neighbors('pymedphys_analysis')
]
current_modules
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
else:
return text
graphed_module = 'pymedphys_monomanage'
current_modules = [
item.replace(os.sep, '.')
for item in package_tree.digraph.neighbors(graphed_module)
]
current_modules
def simplify(text):
text = remove_prefix(text, "{}.".format(graphed_module))
text = remove_prefix(text, 'pymedphys_')
return text
current_modules
module_internal_relationships = {
module.replace(os.sep, '.'): [
'.'.join(item.split('.')[0:2])
for item in
package_tree.descendants_dependencies(module)['internal_module']
]
for module in package_tree.digraph.neighbors(graphed_module)
}
module_internal_relationships
dag = nx.DiGraph()
for key, values in module_internal_relationships.items():
dag.add_node(key)
dag.add_nodes_from(values)
edge_tuples = [
(key, value) for value in values
]
dag.add_edges_from(edge_tuples)
dag.edges()
def get_levels(dag):
topological = list(nx.topological_sort(dag))
level_map = {}
for package in topological[::-1]:
depencencies = nx.descendants(dag, package)
levels = {0}
for dependency in depencencies:
try:
levels.add(level_map[dependency])
except KeyError:
pass
max_level = max(levels)
level_map[package] = max_level + 1
levels = {
level: []
for level in range(max(level_map.values()) + 1)
}
for package, level in level_map.items():
levels[level].append(package)
return levels
levels = get_levels(dag)
levels
nodes = ""
for level in range(max(levels.keys()) + 1):
if levels[level]:
trimmed_nodes = [
simplify(node) for node in levels[level]
]
grouped_packages = '"; "'.join(trimmed_nodes)
nodes += """
{{ rank = same; "{}"; }}
""".format(grouped_packages)
print(nodes)
edges = ""
current_packages = ""
current_dependents = set()
current_dependencies = set()
for module in current_modules:
module_repr = simplify(module)
current_packages += '"{}";\n'.format(module_repr)
for dependency in dependencies[module]:
simplified = simplify(dependency)
edges += '"{}" -> "{}";\n'.format(module_repr, simplified)
if not dependency in current_modules:
current_dependencies.add(simplified)
for dependent in dependents[module]:
simplified = simplify(dependent)
edges += '"{}" -> "{}";\n'.format(simplified, module_repr)
if not dependent in current_modules:
current_dependents.add(simplified)
external_ranks = ""
if current_dependents:
grouped_dependents = '"; "'.join(current_dependents)
external_ranks += '{{ rank = same; "{}"; }}\n'.format(grouped_dependents)
if current_dependencies:
grouped_dependencies = '"; "'.join(current_dependencies)
external_ranks += '{{ rank = same; "{}"; }}\n'.format(grouped_dependencies)
print(edges)
dot_file_contents = """
strict digraph {{
rankdir = LR;
{}
subgraph cluster_0 {{
{}
label = "{}";
style = dashed;
{}
}}
{}
}}
""".format(external_ranks, current_packages, graphed_module, nodes, edges)
print(dot_file_contents)
subgraph cluster_0 {
style=filled;
color=lightgrey;
node [style=filled,color=white];
a0 -> a1 -> a2 -> a3;
label = "process #1";
}
package_tree.descendants_dependencies('pymedphys_monomanage/parse')
package_tree.imports
list(package_tree.digraph.nodes)
```
|
github_jupyter
|
##### M5_Idol_lyrics/SongTidy 폴더의 전처리 ipnb을 총정리하고, 잘못된 코드를 수정한 노트북
### 가사 데이터(song_tidy01) 전처리
**df = pd.read_csv('rawdata/song_data_raw_ver01.csv')**<br>
**!!!!!!!!!!!!!순서로 df(번호)로 지정!!!!!!!!!!!!!**
1. Data20180915/song_data_raw_ver01.csv 데이터로 시작함 (키스있는지체크)
- 제목에 리믹스,라이브,inst,영일중,ver 인 행
- 앨범에 나가수, 불명, 복면인 행
- 타이틀, 가사, 앨범에 히라가나/가타카나가 들어간 행
- is_title이 nan인 행을 '수록곡'으로 변경
- 가사에\r\r\n을 공백으로 변경
2. 히라가나/가타카나를 제거한 후에도 일본어 가사가 한글로 포함되어 있는 경우<br>--> contains로 확인한뒤 행제거 반복
3. 가사가 모두 영어, 중국어인 경우<br>--> 가사에 한글이 하나도 들어가지 않은 행 제거
4. creator칼럼을 lyricist, composer, arranger로 나눔 --> creator_tidy_kavin_ver02.ipynb참고<br>
**여기서 df4.to_csv('tidydata/tidy01.csv', index=False) 로 중간 저장**
5. 중복노래(띄어쓰기,대소문자,피처링에 의한)를 제거 --> song_tidy_yoon_ver01.ipynb참고<br>
**!!!확인해보니 크롤링시에 발매일순서대로 담기지 않았음. sort by 'artist', 'release_date'로 주고 중복제거하기.**<br>**여기서 df5.to_csv('tidydata/song_tidy01.csv', index=False) 로 저장**
-----------------
### 작사작곡 데이터(lyricist_tidy01) 전처리
**다시 전파일로 불러오기 df6 = pd.read_csv('tidydata/tidy01.csv')**
6. creator가 없는 행 제거
7. 중복노래(띄어쓰기,대소문자,피처링에 의한)를 제거<br>
**여기서 df7.to_csv('tidydata/lyricist_tidy01.csv', index=False) 로 저장**
```
import pandas as pd
import re
df1 = pd.read_csv('C:/Users/pje17/Desktop/Lyricsis/M5_Idol_lyrics/Data/Data20180921/song_data_raw_20180921_ver02.csv')
df1.head()
# 키스없음확인
df1[df1['artist'] == '키스']
df1.shape
# 인덱스칼럼 드랍
df1 = df1.drop(df1.columns[0], axis=1)
df1
# 가사 정보 없는 행 드랍
df1 = df1[df1.lyrics.notnull()]
df1.shape
# 공백 및 줄바꿈 바꿔주기
df1['lyrics'] = df1['lyrics'].str.replace(r'\r\r\r\n|\r\r\n','<br>')
df1['creator'] = df1['creator'].str.replace(r'\r|\n',' ')
df1
# is_title이 nan일 때 = '수록곡'
df1['is_title'] = df1['is_title'].fillna('수록곡')
df1
# 제목에 리믹스,라이브,inst,영일중,ver 인 행 제거
df1 = df1[df1.title.str.contains(r'\(.*\s*([Rr]emix|[Mm]ix|[Ll]ive|[Ii]nst|[Cc]hn|[Jj]ap|[Ee]ng|[Vv]er)\s*.*\)') == False]
# 앨범에 나가수, 불명, 복면인 행 제거
df1 = df1[df1.album.str.contains(r'(가수다|불후의|복면가왕)') == False]
# 타이틀에 히라가나/가타카나가 들어간 행 삭제
df1 = df1[df1.title.str.contains(u'[\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF]+') == False]
df1
# 안지워진 행이 있음
df1.loc[df1['lyrics'].str.contains((u'[\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF]+'), regex=True)]
# 한 번 더 삭제
df1 = df1[df1.lyrics.str.contains(u'[\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF]+') == False]
df1
# 다 삭제 된 것 확인
df1.loc[df1['lyrics'].str.contains((u'[\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF]+'), regex=True)]
df1.info()
```
## --------------------2번 전처리 시작--------------------
```
# 히라가나/가타카나를 제거한 후에도 일본어 가사가 한글로 포함되어 있는 경우 전처리
df2 = df1[df1.lyrics.str.contains(r'(와타시|혼토|아노히|혼또|마센|에가이|히토츠|후타츠|마치노|몬다이|마에노|아메가)') == False]
df2= df2[df2.lyrics.str.contains(r'(히카리|미라이|오나지|춋|카라다|큥|즛또|나캇|토나리|못또|뎅와|코이|히토리|맛스구|후타리|케시키|쟈나이|잇슌|이츠모|아타라|덴샤|즈쿠|에가오|소라오|난테|고멘네|아이시테|다키시|유메|잇탄다|소레|바쇼)') == False]
df2= df2[df2.lyrics.str.contains(r'(키미니|보쿠|세카이|도코데|즛토|소바니|바쇼|레루|스베테|탓테|싯테|요쿠)') == False]
# 450곡 제거
df2.info()
```
## --------------------3번 전처리 시작--------------------
```
# 한글이 한글자라도 나오는 것만 저장합니다.
# 469곡 제거
df3 = df2[df2.lyrics.str.contains(r'[가-힣]+') == True]
df3.info()
```
## --------------------4번 전처리 시작--------------------
```
# creator칼럼을 lyricist, composer, arranger로 나누기
df4 = df3.copy()
# 리인덱스해줘야 안밀림
df4 = df4.reset_index(drop=True)
# 전처리 함수
def preprocess(text):
splitArr = list(filter(None, re.split("(작사)|(작곡)|(편곡)", text)))
lyricist = []
composer = []
arranger = []
lyricist.clear()
composer.clear()
arranger.clear()
i = 0
for i in range(0, len(splitArr)):
if splitArr[i] == "작사":
lyricist.append(splitArr[i-1].strip())
elif splitArr[i] == "작곡":
composer.append(splitArr[i-1].strip())
elif splitArr[i] == "편곡":
arranger.append(splitArr[i-1].strip())
i = i + 1
result = [', '.join(lyricist), ', '.join(composer), ', '.join(arranger)]
return result
# 행마다 작사/작곡/편곡가 전처리 결과 보기
preprocess(df4.creator[0])
# song 데이터프레임 전처리함수 이용하여 전처리 후 dataframe 추가로 만들기
i = 0
lyricist = []
composer = []
arranger = []
lyricist.clear()
composer.clear()
arranger.clear()
for i in range(0, len(df4)):
try:
lyricist.append(str(preprocess(df4.creator[i])[0]))
composer.append(str(preprocess(df4.creator[i])[1]))
arranger.append(str(preprocess(df4.creator[i])[2]))
except:
lyricist.append('')
composer.append('')
arranger.append('')
preprocessing_result = pd.DataFrame({"lyricist" : lyricist, "composer" : composer, "arranger" : arranger})
# 인덱스 3 은 리믹스라 제거되어서 안보임
preprocessing_result.head()
# 두 개의 데이터프레임 길이가 같은지 확인
len(df4) == len(preprocessing_result)
# 두 개의 데이터프레임 합치기
df4 = pd.concat([df4, preprocessing_result], axis=1)
df4
# 여기서 df4.to_csv('tidydata/tidy01.csv', index=False) 로 중간 저장
df4.to_csv('tidy03.csv', index=False)
```
## --------------------5번 전처리 시작--------------------
```
df5 = df4.copy()
# 발매일이 널값인 곡이 남지 않도록 널값인 것들은 미래의 날짜로 채워준다.
d = {'':'2019.01.01','-':'2019.01.01'}
df5['release_date'] = df5['release_date'].replace(d)
# 확인해보니 크롤링시에 발매일순서대로 담기지 않았음
# !!!!!!sort by 'artist', 'release_date'로 주고 중복제거하기
df5 = df5.sort_values(by=['artist', 'release_date'])
# 중복노래(띄어쓰기,대소문자,피처링에 의한)를 제거
# 제목의 공백(띄어쓰기)를 모두 제거한다
df5['title'] = df5['title'].str.replace(r' ', '')
# 제목의 영어 부분을 전부 소문자로 바꿔준다
df5['title'] = df5['title'].str.lower()
# 그리고 다시 중복값을 제거해준다.
# !!!!!!가장 오래된노래가 위로 올라오므로 keep='first'로 주기
df5 = df5.drop_duplicates(['artist', 'title'], keep='first')
# 중복 값을 찍어보니 잘 지워졌다! (띄어쓰기 제거 테스트)
df5[df5['title'] == '결혼 하지마']
df5[df5['title'] == '결혼하지마']
# 중복 값을 찍어보니 잘 지워졌다! (영어 대->소문자 변환 테스트)
df5[df5['title'] == '어이\(UH-EE\)']
df5[df5['title'].str.contains('어이\(uh-ee\)')]
# 제목 열을 새로 만들어서
df5['t'] = df5['title']
# 괄호 안의 부분을 없앤다.
df5.t = df5.t.str.replace(r'\(.*?\)','')
# 새로 만든 열의 중복값을 제거한다.
df5 = df5.drop_duplicates(['artist', 't'], keep='first')
# 새로 만든 열을 다시 지워준다.
df5 = df5.drop('t', axis = 1)
# 중복 값을 찍어보니 잘 지워졌다! (하나만 남음) (피처링 다른 버전 제거 테스트)
df5[df5['title'].str.contains('highwaystar')]
df5.info()
df5[df5['title'] == '해석남녀']
# 남아있는 2019년 곡은 오투알 뿐
df5[df5['release_date'] == '2019.01.01']
d = {'2019.01.01':'2002.07.19'}
df5['release_date'] = df5['release_date'].replace(d)
# 여기서 df5.to_csv('tidydata/song_tidy01.csv', index=False) 로 저장
df5.to_csv('song_tidy03.csv', index=False)
```
## 가사 데이터 전처리 끝
------------------------------
## 작사작곡 데이터 전처리 시작--------------------6번 전처리 시작--------------------
```
# 위의 df4를 파일로 불러오기 df6 = pd.read_csv('tidydata/tidy01.csv')
df6 = pd.read_csv('tidy03.csv')
df6
# 확인해보니 크롤링시에 발매일순서대로 담기지 않았음
# !!!!!!sort by 'artist', 'release_date'로 주고 시작하기
df6 = df6.sort_values(by=['artist', 'release_date'])
df6
# creator가 없는 행 제거
df6 = df6[pd.notnull(df6['creator'])]
df6
```
## --------------------7번 전처리 시작--------------------
```
# 중복노래(띄어쓰기,대소문자,피처링에 의한)를 제거 << 위 과정 반복
df7 = df6.copy()
# 제목의 공백(띄어쓰기)를 모두 제거한다
df7['title'] = df7['title'].str.replace(r' ', '')
# 제목의 영어 부분을 전부 소문자로 바꿔준다
df7['title'] = df7['title'].str.lower()
# 그리고 다시 중복값을 제거해준다.
df7 = df7.drop_duplicates(['artist', 'title'], keep='first')
# 제목 열을 새로 만들어서
df7['t'] = df7['title']
# 괄호 안의 부분을 없앤다.
df7.t = df7.t.str.replace(r'\(.*?\)','')
# 새로 만든 열의 중복값을 제거한다.
df7 = df7.drop_duplicates(['artist', 't'], keep='first')
# 새로 만든 열을 다시 지워준다.
df7 = df7.drop('t', axis = 1)
# 중복 값을 찍어보니 잘 지워졌다! (하나만 남음) (피처링 다른 버전 제거 테스트)
df7[df7['title'].str.contains('highwaystar')]
# 가사데이터와 다르게 전처리 되었음 확인
df7.info()
# 여기서 df7.to_csv('tidydata/lyricist_tidy01.csv', index=False) 로 저장
df7.to_csv('lyricist_tidy03.csv', index=False)
```
|
github_jupyter
|
```
from linebot import LineBotApi
from linebot.exceptions import LineBotApiError
```
# 官方DEMO- Message Type :https://developers.line.me/en/docs/messaging-api/message-types/
# Doc : https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/send_messages.py
```
CHANNEL_ACCESS_TOKEN = "YOUR CHANNEL TOKEN"
# user ID - Get by reply message object.
to = "YOUR USER ID"
from linebot.models import TextSendMessage
```
# TextSendMessage
```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
try:
line_bot_api.push_message(to, TextSendMessage(text='台科大電腦研習社'))
except LineBotApiError as e:
# error handle
raise e
```
# Output

```
from linebot.models import ImageSendMessage, VideoSendMessage, LocationSendMessage, StickerSendMessage
```
# ImageSendMessage
### 連結需要使用https
物件中的輸入 original_content_url 以及 preview_image_url都要寫才不會報錯。<br>
輸入的網址要是一個圖片,應該說只能是一個圖片,不然不會報錯但是傳過去是灰色不能用的圖<br>
他圖片的功能都有點問題,我都要丟到imgur圖床才能鏈結。。。<br>
直接丟進github 用raw也不能讀<br>
```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
image_url = "https://i.imgur.com/eTldj2E.png?1"
try:
line_bot_api.push_message(to, ImageSendMessage(original_content_url=image_url, preview_image_url=image_url))
except LineBotApiError as e:
# error handle
raise e
```
# Output

```
from linebot.models import LocationSendMessage
```
# LocationSendMessage
```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
title = "國立臺灣科技大學"
address = "106台北市大安區基隆路四段43號"
latitude = 25.0136906
longitude = 121.5406792
try:
line_bot_api.push_message(to, LocationSendMessage(title=title,
address=address,
latitude=latitude,
longitude=longitude))
except LineBotApiError as e:
# error handle
raise e
```
# Output

```
from linebot.models import StickerSendMessage
```
# StickerSendMessage
照下面這段話的意思是說,只能用預設的!!!<br>
Message object which contains the sticker data sent from the source.<br>
For a list of basic LINE stickers and sticker IDs, see sticker list.<br>
stick list:https://developers.line.me/media/messaging-api/messages/sticker_list.pdf
```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
package_id = "1"
sticker_id = "1"
# package_id = "1181660"
# sticker_id = "7389429"
try:
line_bot_api.push_message(to, StickerSendMessage(package_id=package_id, sticker_id=sticker_id))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# ImagemapSendMessage
```
from linebot.models import ImagemapSendMessage, BaseSize, URIImagemapAction, MessageImagemapAction, ImagemapArea
```
這邊解説一下
輸入一張圖片的網址https,
他會顯示一張圖片,
但是可以對這張圖片的點選範圍做一些操作
例如對某區塊點擊會發生什麼事
舉例:輸入一張圖片(如下圖)by colors https://coolors.co/ffb8d1-e4b4c2-e7cee3-e0e1e9-ddfdfe
Imagemap 讓我們可以對圖片的區塊(給定一個範圍)做操作,<br>
例如我們要使用<br>
最左邊的顏色<br>
點擊後輸出色票<br>
<br>
最右邊的顏色<br>
點擊後轉至網址<br>
他圖片的功能都有點問題,我都要丟到imgur圖床才能鏈結。。。<br>
直接丟進github 用raw也不能讀<br>
而且他不會自動縮放,會裁掉

```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
# image_url = "https://raw.githubusercontent.com/xiaosean/Line_tutorial/master/img/colors.png"
image_url = "https://i.imgur.com/mB9yDO0.png"
text = "#FFB8D1"
click_link_1 = "https://www.facebook.com/ntustcc"
try:
line_bot_api.push_message(to, ImagemapSendMessage(base_url=image_url,
alt_text="ImageMap Example",
base_size=BaseSize(height=1040, width=1040),
actions=[
MessageImagemapAction(
text=text,
area=ImagemapArea(
x=0, y=0, width=1040/5, height=1040
)
),
URIImagemapAction(
link_uri=click_link_1,
area=ImagemapArea(
x=int(1040*0.8), y=0, width=int(1040/5), height=1040
)
)
]))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# TemplateSendMessage - ButtonsTemplate 只可在智慧手機上顯示
doc:https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/template.py
這部分我建議看這個人所寫的 - 他的圖片很用心,真好看!!
https://ithelp.ithome.com.tw/articles/10195640?sc=iThomeR
```
from linebot.models import TemplateSendMessage, ButtonsTemplate, PostbackTemplateAction, MessageTemplateAction, URITemplateAction
button_template_message =ButtonsTemplate(
thumbnail_image_url="https://i.imgur.com/eTldj2E.png?1",
title='Menu',
text='Please select',
ratio="1.51:1",
image_size="cover",
actions=[
# PostbackTemplateAction 點擊選項後,
# 除了文字會顯示在聊天室中,
# 還回傳data中的資料,可
# 此類透過 Postback event 處理。
PostbackTemplateAction(
label='postback還會回傳data參數',
text='postback text',
data='action=buy&itemid=1'
),
MessageTemplateAction(
label='message會回傳text文字', text='message text'
),
URITemplateAction(
label='uri可回傳網址', uri='http://www.xiaosean.website/'
)
]
)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
try:
# alt_text 因template只能夠在手機上顯示,因此在PC版會使用alt_Text替代
line_bot_api.push_message(to, TemplateSendMessage(alt_text="Template Example", template=button_template_message))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# TemplateSendMessage - CarouselTemplate 只可在智慧手機上顯示
他和前一個的差別是他可以一次傳送多個Template並且可以用旋轉的方式轉過去 1...n
```
from linebot.models import TemplateSendMessage, CarouselTemplate, CarouselColumn, ButtonsTemplate, PostbackTemplateAction, MessageTemplateAction, URITemplateAction
image_url_1 = "https://i.imgur.com/eTldj2E.png?1"
image_url_2 = "https://i.imgur.com/mB9yDO0.png"
click_link_1 = "https://www.facebook.com/ntustcc"
click_link_2 = "https://www.facebook.com/ntustcc"
carousel_template = template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url=image_url_1,
title='template-1',
text='text-1',
actions=[
PostbackTemplateAction(
label='postback-1',
text='postback text1',
data='result=1'
),
MessageTemplateAction(
label='message-1',
text='message text1'
),
URITemplateAction(
label='uri-1',
uri=click_link_1
)
]
),
CarouselColumn(
thumbnail_image_url=image_url_2,
title='template-2',
text='text-2',
actions=[
PostbackTemplateAction(
label='postback-2',
text='postback text2',
data='result=2'
),
MessageTemplateAction(
label='message-2',
text='message text2'
),
URITemplateAction(
label='link-2',
uri=click_link_2
)
]
)]
)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
try:
# alt_text 因template只能夠在手機上顯示,因此在PC版會使用alt_Text替代
line_bot_api.push_message(to, TemplateSendMessage(alt_text="Carousel Template Example", template=carousel_template))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# TemplateSendMessage - ImageCarouselTemplate 只可在智慧手機上顯示
他和前一個的差別是整個版面都是圖片和一行文字,較為簡潔,請看結果。
```
from linebot.models import TemplateSendMessage, ImageCarouselTemplate, ImageCarouselColumn, PostbackTemplateAction, MessageTemplateAction, URITemplateAction
image_url_1 = "https://i.imgur.com/eTldj2E.png?1"
image_url_2 = "https://i.imgur.com/mB9yDO0.png"
carousel_template = template=ImageCarouselTemplate(
columns=[
ImageCarouselColumn(
image_url=image_url_1,
action=MessageTemplateAction(
label='message-1',
text='message text1'
)
),
ImageCarouselColumn(
image_url=image_url_2,
action=PostbackTemplateAction(
label='postback-2',
text='postback text2',
data='result=2'
),
)]
)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
try:
# alt_text 因template只能夠在手機上顯示,因此在PC版會使用alt_Text替代
line_bot_api.push_message(to, TemplateSendMessage(alt_text="Image Carousel Template Example", template=carousel_template))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# TemplateAction有個DatetimePickerTemplateAction
```
from linebot.models import TemplateSendMessage, ButtonsTemplate, DatetimePickerTemplateAction
button_template_message =ButtonsTemplate(
thumbnail_image_url="https://i.imgur.com/eTldj2E.png?1",
title='Menu',
text='Please select',
actions=[
DatetimePickerTemplateAction(
label="datetime picker date",
# 等同PostbackTemplateAction中的data, in the postback.data property of the postback event
data="action=sell&itemid=2&mode=date",
mode="date",
initial="2013-04-01",
min="2011-06-23",
max="2017-09-08"
),
DatetimePickerTemplateAction(
label="datetime picker time",
data="action=sell&itemid=2&mode=time",
mode="time",
initial="10:00",
min="00:00",
max="23:59"
)
# below part failed, I have reported issue
# https://github.com/line/line-bot-sdk-python/issues/100
# DatetimePickerTemplateAction(
# label="datetime picker datetime",
# data="action=sell&itemid=2&mode=datetime",
# mode="datetime",
# initial="2018-04-01T10:00",
# min="2011-06-23T00:00",
# max="2019-09-08T23:59"
# )
]
)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
try:
# alt_text 因template只能夠在手機上顯示,因此在PC版會使用alt_Text替代
line_bot_api.push_message(to, TemplateSendMessage(alt_text="Template Example", template=button_template_message))
except LineBotApiError as e:
# error handle
raise e
```
# Output

# FileMessage - 沒實作
DOC https://github.com/line/line-bot-sdk-python/blob/master/linebot/models/messages.py
```
from linebot.models import VideoSendMessage
```
# VideoSendMessage - 我沒試成功 Failed
文件中的input有說"影片時長要 < 1miuntes" <br>
<br>
我猜拉 只要找到網址後綴是.mp4應該就可以<br>
只是我找不到這種影片<br>
```
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
viedo_url = ""
image_url = ""
try:
line_bot_api.push_message(to, VideoSendMessage(original_content_url=viedo_url, preview_image_url=image_url))
except LineBotApiError as e:
# error handle
raise e
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction import DictVectorizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.inspection import plot_partial_dependence
from dtreeviz.trees import *
import scipy as sp
from scipy.cluster import hierarchy as hc
import sys
sys.path.append('..')
from fpl_predictor.util import *
# path to project directory
path = Path('../')
# read in training dataset
train_df = pd.read_csv(path/'fpl_predictor/data/train_v8.csv',
index_col=0,
dtype={'season':str,
'squad':str,
'comp':str})
```
## Random Forest
Random Forest is an ensemble tree-based predictive algorithm. In this case we will be using it for regression - we want to predict a continuous number, predicted points, for each player each game. It works by training many separate decision trees, each using a subset of the training data, and outputs the average prediction across all trees.
Applying it to a time series problem, where metrics from recent time periods can be predicitve, requires us to add in window features (e.g. points scored last gameweek). These are created using the player_lag_features function from 00_fpl_features.
```
# add a bunch of player lag features
lag_train_df, team_lag_vars = team_lag_features(train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10])
lag_train_df, player_lag_vars = player_lag_features(lag_train_df, ['total_points'], ['all', 1, 2, 3, 4, 5, 10])
```
Similar to the simple model, we'll set the validation period to be gameweeks 20-25 of the 2019/20 season - the model will be trained on all data prior to that period. This time however, we'll be using some additional features: the season, gameweek, player position, home/away, and both teams, as well as all the lagging features we created above.
```
# set validaton point/length and categorical/continuous variables
valid_season = '2021'
valid_gw = 20
valid_len = 6
cat_vars = ['season', 'position', 'was_home', 'team', 'opponent_team']
cont_vars = ['gw']#, 'minutes']
dep_var = ['total_points']
```
Some of the features have an order (2019/20 season is after 2019 season) whereas others do not (position). We can set this in the data where appropriate using an ordered category (e.g. 1617 < 1718 < 1819 < 1920 < 2021).
```
# we want to set gw and season as ordered categorical variables
# need lists with ordered categories
ordered_gws = list(range(1,39))
ordered_seasons = ['1617', '1718', '1819', '1920', '2021']
# set as categories with correct order
lag_train_df['gw'] = lag_train_df['gw'].astype('category')
lag_train_df['season'] = lag_train_df['season'].astype('category')
lag_train_df['gw'].cat.set_categories(ordered_gws, ordered=True, inplace=True)
lag_train_df['season'].cat.set_categories(ordered_seasons, ordered=True, inplace=True)
lag_train_df['season']
```
And now we can go ahead and create our training and validation sets using the function we defined in the last notebook.
```
# create dataset with adjusted post-validation lag numbers
train_valid_df, train_idx, valid_idx = create_lag_train(lag_train_df,
cat_vars, cont_vars,
player_lag_vars, team_lag_vars, dep_var,
valid_season, valid_gw, valid_len)
```
The way we calculate our lag features means that there will be null values in our dataset. This will cause an error when using random forest in scikit learn, so we will set them all to zero for now (although note that this may not be the best fill strategy).
```
lag_train_df[~np.isfinite(lag_train_df['total_points_pg_last_1'])]
# imp = SimpleImputer(missing_values=np.nan, strategy='mean')
# need to think about imputing NaN instead of setting to zero
# imp.fit(X_train[team_lag_vars + player_lag_vars])
train_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0)
```
The random forest regressor will only take numbers as inputs, so we need to transform our caterogical features into a format that the random forest regressor object will be able to use, numbers instead of strings in one or more columns.
```
# split out dependent variable
X, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy()
# since position is categorical, it should be a string
X['position'] = X['position'].apply(str)
# need to transform season
enc = LabelEncoder()
X['season'] = enc.fit_transform(X['season'])
X_dict = X.to_dict("records")
# Create the DictVectorizer object: dv
dv = DictVectorizer(sparse=False, separator='_')
# Apply dv on df: df_encoded
X_encoded = dv.fit_transform(X_dict)
X_df = pd.DataFrame(X_encoded, columns=dv.feature_names_)
```
For example, season is now represented by a number (0 -> 2016/17, 1 -> 2017/18, etc.) in a single column, and position is represented by a 1 or 0 in multiple columns.
```
X_df[['season', 'position_1', 'position_2', 'position_3', 'position_4']]
X_df.columns
```
Let's now split out our training (everything prior to the validation gameweek) and validation (6 gameweeks from the validation gameweek, only rows with >0 minutes)
```
# split out training and validation sets
X_train = X_df.loc[train_idx]
y_train = y.loc[train_idx]
X_test = X_df.loc[valid_idx]
# we only want look at rows with >0 minutes (i.e. the player played)
# test_mask = (X_test['minutes'] > 0)
# X_test = X_test[test_mask]
# y_test = y.loc[valid_idx][test_mask]
y_test = y.loc[valid_idx]
# X_train = X_train.drop('minutes', axis=1)
# X_test = X_test.drop('minutes', axis=1)
```
We can now create the RandomForestRegessor with set parameters, train using the training data, and look at the error on the validation set.
```
# def rf(xs, y, n_estimators=40, max_samples=50_000,
# max_features=0.5, min_samples_leaf=5, **kwargs):
# return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators,
# max_samples=max_samples, max_features=max_features,
# min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
def rf(xs, y, max_depth=7, **kwargs):
return RandomForestRegressor(n_jobs=-1, max_depth=max_depth, oob_score=True).fit(xs, y)
# fit training data
m = rf(X_train, y_train.values.ravel())
# predict validation set and output metrics
preds = m.predict(X_test)
print("RMSE: %f" % (r_mse(preds, y_test.values.ravel())))
print("MAE: %f" % mae(preds, y_test.values.ravel()))
```
Right away this looks like it's a significant improvement on the simple model, good to see. Let's go ahead and use the same approach with validation across the whole of the 2019/20 season.
```
def rf_season(df, valid_season='2021'):
# empty list for scores
scores = []
valid_len = 6
for valid_gw in range(1,40-valid_len):
# create dataset with adjusted post-validation lag numbers
train_valid_df, train_idx, valid_idx = create_lag_train(df, cat_vars, cont_vars,
player_lag_vars, team_lag_vars, dep_var,
valid_season, valid_gw, valid_len)
train_valid_df[team_lag_vars + player_lag_vars] = train_valid_df[team_lag_vars + player_lag_vars].fillna(0)
# split out dependent variable
X, y = train_valid_df[cat_vars + cont_vars + team_lag_vars + player_lag_vars].copy(), train_valid_df[dep_var].copy()
# since position is categorical, it should be a string
X['position'] = X['position'].apply(str)
# need to transform season
enc = LabelEncoder()
X['season'] = enc.fit_transform(X['season'])
X_dict = X.to_dict("records")
# Create the DictVectorizer object: dv
dv = DictVectorizer(sparse=False, separator='_')
# Apply dv on df: df_encoded
X_encoded = dv.fit_transform(X_dict)
X_df = pd.DataFrame(X_encoded, columns=dv.feature_names_)
# split out training and validation sets
X_train = X_df.loc[train_idx]
y_train = y.loc[train_idx]
X_test = X_df.loc[valid_idx]
# we only want look at rows with >0 minutes (i.e. the player played)
# test_mask = (X_test['minutes'] > 0)
# X_test = X_test[test_mask]
# y_test = y.loc[valid_idx][test_mask]
y_test = y.loc[valid_idx]
m = rf(X_train, y_train.values.ravel())
preds, targs = m.predict(X_test), y_test.values.ravel()
gw_mae = mae(preds, targs)
print("GW%d MAE: %f" % (valid_gw, gw_mae))
scores.append(gw_mae)
return scores
scores = rf_season(lag_train_df)
plt.plot(scores)
plt.ylabel('GW MAE')
plt.xlabel('GW')
plt.text(15, 1.55, 'Season Avg MAE: %.2f' % np.mean(scores), bbox={'facecolor':'white', 'alpha':1, 'pad':5})
plt.show()
```
Looking across the whole season we see about a 10% improvement versus the simple model. Also interesting is that the performance again improves as the season progresses - this makes sense, more data about each of teams and players (particularly new ones) means improved ability to predict the next 6 gameweeks.
Let's add these validation scores to our comparison dataset.
```
model_validation_scores = pd.read_csv(path/'charts/model_validation_scores.csv', index_col=0)
model_validation_scores['random_forest'] = scores
model_validation_scores.to_csv(path/'charts/model_validation_scores.csv')
```
A feature of the random forest algorithm is that we can see how often features are being used in trees. This will give us an indication of how important each feature is i.e. is it predictive of todal points scored. Simple models are usually better, so this also gives us a way of seeing if there are any features that are not particularly useful, and can therefore be removed.
```
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
fi = rf_feat_importance(m, X_train)
fi[:32]
def plot_fi(fi):
return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False).invert_yaxis()
plot_fi(fi[:30]);
```
At the moment this algorithm is given minutes played in the gameweek so it's unsurprising that this is by far the most important feature - the more minutes a player plays, the more opportunity to score points. But strictly speaking we don't actually have this information prior to a gameweek (in practice it is estimated using previous minutes and injury status), so we can ignore it for now.
Below that the top features are:
1. minutes_last_1 - number of minutes in the last fixture for the player
2. minutes_last_2 - number of minutes in the last two fixtures for the player
3. total_points_pg_last_all - the player's average points per game in all of history (since start of 2016/17 season)
4. total_points_team_pg_last_all_opponent - the opposition's average points per game in all of history
5. minutes_last_3 - number of minutes in the last three fixtures for the player
6. total_points_team_pg_last_all - the player's team's average points per game in all of history
7. total_points_pg_last_10 - the player's average points per game in the last 10 fixtures
8. total_points_pg_last_1 - the player's average points per game in the last fixture
This is interesting. It seems to be saying that the amount of minutes a player has played recently and their underlying ability to score points in all of history, along with their team's and opponent team's points scoring in all of history, is most important.
Recent performance (i.e. 'form') is also important, but to a lesser extent.
It also shows that the lag features are far more useful than the categorical features such as team, opponent and position. Again not too surprising since information on these categories are already captured in the lag features.
Let's test this... we can remove anything with a feature importance of less than 0.005 and see how the model performs on the original 2019/20 week 20 validation point (going from 94 features to just 32).
```
to_keep = fi[fi.imp>0.005].cols
len(to_keep)
len(X_train.columns)
X_train_imp = X_train[to_keep]
X_test_imp = X_test[to_keep]
m = rf(X_train_imp, y_train.values.ravel())
mae(m.predict(X_test_imp), y_test.values.ravel())
# mae(m.predict(X_train_imp), y_train.values.ravel())
```
Very similar albeit slightly higher error (less than 1% worse performance) than previously, and still a long way ahead of the simple model.
Continuing our thinking about improving/simplifying the model features, we can also look to see if there are any similar features - quite often we will find that some features are so similar that some of them may be redundant.
The following function determines the similarity between columns in a dataset and visualises it using a dendrogram.
```
def cluster_columns(df, figsize=(10,6), font_size=12):
corr = np.round(sp.stats.spearmanr(df).correlation, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method='average')
fig = plt.figure(figsize=figsize)
hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size)
plt.show()
cluster_columns(X_train_imp)
```
We can see that our lagging features are somewhat similar - absolutely expected since, for example, minutes_last_5 is equal to minutes_last_4 + minutes 5 games ago. They are still different enough to be of value separately, but it does make me wonder whether separating out each historic game in some way (up to a point) would be valuable.
A final useful tool we can use is partial dependency plots. These try to look at the impact of single features on the dependent variable (points scored).
```
fig,ax = plt.subplots(figsize=(12, 3))
plot_partial_dependence(m, X_test_imp, ['total_points_pg_last_all',
'total_points_team_pg_last_all_opponent',
'total_points_pg_last_1'],
grid_resolution=20, ax=ax);
```
Again, these make sense. The higher a player's historic points per game (defined as 90 minutes) is, the higher we predict their score will be. Conversely, the higher their opposition's historic points per game, the harder they are as an opponent and the lower their predicted score will be.
Looking at the player's most recent game, again the higher their score, the more it will push up our prediction (the impact of their 'form'), but the relationship is far weaker than the player's underlying per minute scoring stats.
Here we just try to look at features in isolation, there will lots of interactions going on between features that improve performance. For example, a player may have a high 'total_points_pg_last_1' from the previous fixture but only played 5 minutes in total - in this case the algorithm is likely to have learned that a high 'total_points_pg_last_1' coupled with a low 'minutes_last_1' is not an indicator that the player will score higher in the next fixture.
Ok, now we can move onto the next algorithm - xgboost.
|
github_jupyter
|
# Heikin-Ashi PSAR Strategy
_Roshan Mahes_
In this tutorial, we implement the so-called _Parabolic Stop and Reverse (PSAR)_ strategy. Given any stock, currency or commodity, this indicator tells us whether to buy or sell the stock at any given time. The momentum strategy is based on the open, high, low and close price for each time period. This can be represented with a traditional Japanese candlestick chart. Later on, we apply the PSAR strategy on so-called Heikin-Ashi ('average bar') data, which reduces some noise, making it easier to identify trends.
The following packages are required:
```
%pip install pandas
%pip install yfinance
%pip install plotly
```
Now we can import the following modules:
```
import os
import pandas as pd
import yfinance as yf
import plotly.graph_objects as go
```
This strategy works on any stock. In this notebook, we take the stock of Apple, represented by the ticker symbol AAPL. Let's download the pricing data and plot a (Japanese) candlestick chart:
```
symbol = 'AAPL'
df = yf.download(symbol, start='2020-01-01')
df.index = df.index.strftime('%Y-%m-%d') # format index as dates only
candles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close)
# plot figure
fig = go.Figure(candles)
fig.layout.xaxis.type = 'category' # remove weekend days
fig.layout.xaxis.dtick = 20 # show x-axis ticker once a month
fig.layout.xaxis.rangeslider.visible = False
fig.layout.title = f'Japanese Candlestick Chart ({symbol})'
fig.layout.template = 'plotly_white'
fig.show()
```
## The PSAR Indicator
The _Parabolic Stop and Reverse (PSAR) indicator,_ developed by J. Wells Wilder, is a momentum indicator used by traders to determine trend direction and potential reversals in price. It is a trend-following (lagging) indicator that uses a trailing stop and reverse method called SAR (Stop and Reverse), to identify suitable exit and entry points. The concept draws on the idea that 'time is the enemy', i.e., unless a security can continue to generate more profits over time, it should be liquidated.
The PSAR indicator appears on a chart as a series of dots, either above or below an asset's price, depending on the direction the price is moving. A dot is placed below the price when it is trending upward, and above the price when it is trending downward. There is a dot for every price bar, hence the indicator is always producing information.
The parabolic SAR is calculated almost independently for each trend in the price. When the price is in an uptrend, the SAR emerges below the price and converges upwards towards it. Similarly, on a downtrend, the SAR emerges above the price and converges downwards. At each step within a trend, the SAR is calculated one period in advance, i.e., tomorrow's SAR value is built using data available today. The general formula used for this is:
\begin{align*}
SAR_t = SAR_{t-1} + \alpha_t (EP_t - SAR_{t-1}),
\end{align*}
where $SAR_t$ is the SAR value at time $t$.
The _extreme point_ $EP$ is a record kept during each trend that represents the highest value reached by the price during the current uptrend, or lowest value during a downtrend. During each period, if a new maximum (or minimum) is observed, the EP is updated with that value.
The $\alpha$ value is the _acceleration factor._ Usually, this is initially set to a value of $0.02$. The factor is increased by $0.02$ each time a new EP is recorded. The rate will then quicken to a point where the SAR converges towards the price. To prevent it from getting too large, a maximum value for the acceleration factor is normally set to $0.20$. Generally, it is preferable in stocks to set the acceleration factor to $0.01$ so that it is not too sensitive to local decreases, whereas for commodity or currency trading the preferred value is $0.02$.
There are special cases that modify the SAR value:
1. If the next period's SAR value is inside (or beyond) the current period or the previous period's price range, the SAR must be set to the closest price bound. For example, if in an upward trend, the new SAR value is calculated and if it results to be more than today's or yesterday's lowest price, it must be set equal to that lower boundary.
2. If the next period's SAR value is inside (or beyond) the next period's price range, a new trend direction is then signaled. The SAR must then switch sides.
3. Upon a trend switch, the first SAR value for this new trend is set to the last $EP$ recorded on the prior trend. Then, the $EP$ is reset accordingly to this period's maximum, and the acceleration factor is reset to its initial value of $0.01$ (stocks) or $0.02$ (commodities/currencies).
As we can see, it's quite a difficult strategy as the formulas are not that straightforward. We have implemented it in the following function:
```
def PSAR(df, alpha_start=0.01):
"""
Returns the dataframe with the given PSAR indicator for each time period.
"""
trend = 0
alpha = alpha_start
SAR = [df['Open'][0]] + [0] * (len(df) - 1)
isUpTrend = lambda x: x > 0
trendSwitch = lambda x: abs(x) == 1
# initialisation
if df['Close'][1] > df['Close'][0]:
trend = 1
SAR[1] = df['High'][0]
EP = df['High'][1]
else:
trend = -1
SAR[1] = df['Low'][0]
EP = df['Low'][1]
# recursion
for t in range(2,len(df)):
# general formula
SAR_new = SAR[t-1] + alpha * (EP - SAR[t-1])
# case 1 & 2
if isUpTrend(trend):
SAR[t] = min(SAR_new, df['Low'][t-1], df['Low'][t-2])
if SAR[t] > df['Low'][t]:
trend = -1
else:
trend += 1
else:
SAR[t] = max(SAR_new, df['High'][t-1], df['High'][t-2])
if SAR[t] < df['High'][t]:
trend = 1
else:
trend -= 1
# case 3
if trendSwitch(trend):
SAR[t] = EP
alpha = alpha_start
if isUpTrend(trend):
EP_new = df['High'][t]
else:
EP_new = df['Low'][t]
else:
if isUpTrend(trend):
EP_new = max(df['High'][t], EP)
else:
EP_new = min(df['Low'][t], EP)
if EP != EP_new:
alpha = min(alpha + 0.02, 0.20)
# update EP
EP = EP_new
# store values
df['SAR'] = SAR
df['Signal'] = (df['SAR'] < df['Close']).apply(int).diff() # records trend switches
return df
```
After applying the PSAR strategy on Apple's stock, we end up with the following trading decisions:
```
# apply PSAR
df = PSAR(df)
# extract trend switches (buying/selling advice)
buy = df.loc[df['Signal'] == 1]
sell = df.loc[df['Signal'] == -1]
# candles & psar
candles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close, name='candles')
psar = go.Scatter(x=df.index, y=df['SAR'], mode='markers', name='PSAR', line={'width': 10, 'color': 'midnightblue'})
# buy & sell symbols
buys = go.Scatter(x=buy.index, y=buy.Close, mode='markers', marker_size=15, marker_symbol=5,
marker_color='green', name='Buy', marker_line_color='black', marker_line_width=1)
sells = go.Scatter(x=sell.index, y=sell.Close, mode='markers', marker_size=15, marker_symbol=6,
marker_color='red', name='Sell', marker_line_color='black', marker_line_width=1)
# plot figure
fig = go.Figure(data=[candles, psar, buys, sells])
fig.layout.xaxis.type = 'category' # remove weekend days
fig.layout.xaxis.dtick = 20 # show x-axis ticker once a month
fig.layout.xaxis.rangeslider.visible = False
fig.layout.title = f'PSAR indicator ({symbol})'
fig.layout.template = 'plotly_white'
fig.show()
```
We see that most of the times our indicator predicted a correct trend! Instead of using the open, high, low and close data, represented by this traditional candlestick chart, we can also apply the PSAR strategy on so-called _Heikin-Ashi charts_.
## Heikin-Ashi Charts
_Heikin-Ashi_ means 'average bar' in Japanese. Heikin-Ashi charts, developed by Munehisa Homma in the 1700s, display prices that, at a glance, look similar to a traditional Japanese chart. The Heikin-Ashi technique averages price data to create a Japanese candlestick chart that filters out market noise. Instead of using the open, high, low, and close like standard candlestick charts, the Heikin-Ashi technique uses a modified formula based on two-period averages. This gives the chart a smoother appearance, making it easier to spots trends and reversals, but also obscures gaps and some price data.
The formulas are as follows:
\begin{align*}
H_{open,t} &= \frac{H_{open,t-1} + H_{close,t-1}}{2}, \\
H_{close,t} &= \frac{C_{open,t} + C_{high,t} + C_{low,t} + C_{close,t}}{4}, \\
H_{high,t} &= \max\{H_{open,t}, H_{close,t}, C_{high,t}\}, \\
H_{low,t} &= \min\{H_{open,t}, H_{close,t}, C_{low,t}\},
\end{align*}
with initial condition $H_{open, 0} = C_{open,0}$. In here, $H_{open,t}$ is the opening value in the Heikin-Ashi chart at time $t \in \mathbb{N}_0$, and $C_{open,t}$ is the opening value of the stock, which is used in the traditional Japanese candlestick chart etc.
In the following function we transform a given dataframe of stock prices to a Heikin-Ashi one.
```
def heikin_ashi(df):
"""
Converts a dataframe according to the Heikin-Ashi.
"""
df_HA = pd.DataFrame(index=df.index, columns=['Open', 'High', 'Low', 'Close'])
df_HA['Open'][0] = df['Open'][0]
df_HA['Close'] = (df['Open'] + df['High'] + df['Low'] + df['Close']) / 4
for t in range(1,len(df)):
df_HA.iat[t,0] = (df_HA['Open'][t-1] + df_HA['Close'][t-1]) / 2 # change H_open without warnings
df_HA['High'] = df_HA[['Open', 'Close']].join(df['High']).max(axis=1)
df_HA['Low'] = df_HA[['Open', 'Close']].join(df['Low']).min(axis=1)
return df_HA
```
Let's convert the Apple's (Japanese) candlestick chart to a Heikin-Ashi chart:
```
df_HA = heikin_ashi(df)
candle = go.Candlestick(x=df_HA.index, open=df_HA['Open'], high=df_HA['High'], low=df_HA['Low'], close=df_HA['Close'])
# plot figure
fig = go.Figure(candle)
fig.layout.xaxis.type = 'category' # remove weekend days
fig.layout.xaxis.dtick = 20 # show x-axis ticker once a month
fig.layout.xaxis.rangeslider.visible = False
fig.layout.title = f'Heikin-Ashi Chart ({symbol})'
fig.layout.template = 'plotly_white'
fig.show()
```
As we can see, the Heikin-Ashi technique can be used to identify a trend more easily. Because the Heikin-Ashi technique smooths price information over two periods, it makes trends, price patterns, and reversal points easier to spot. Candles on a traditional candlestick chart frequently change from up to down, which can make them difficult to interpret. Heikin-Ashi charts typically have more consecutive colored candles, helping traders to identify past price movements easily.
The Heikin-Ashi technique reduces false trading signals in sideways and choppy markets to help traders avoid placing trades during these times. For example, instead of getting two false reversal candles before a trend commences, a trader who uses the Heikin-Ashi technique is likely only to receive the valid signal.
## Heikin-Ashi PSAR indicator
It is straightforward to apply the PSAR strategy on our Heikin-Ashi data:
```
# apply PSAR
df = PSAR(df_HA)
# extract trend switches (buying/selling advice)
buy = df.loc[df['Signal'] == 1]
sell = df.loc[df['Signal'] == -1]
# candles & psar
candles = go.Candlestick(x=df.index, open=df.Open, high=df.High, low=df.Low, close=df.Close, name='candles')
psar = go.Scatter(x=df.index, y=df['SAR'], mode='markers', name='PSAR', line={'width': 10, 'color': 'midnightblue'})
# buy & sell symbols
buys = go.Scatter(x=buy.index, y=buy.Close, mode='markers', marker_size=15, marker_symbol=5,
marker_color='green', name='Buy', marker_line_color='black', marker_line_width=1)
sells = go.Scatter(x=sell.index, y=sell.Close, mode='markers', marker_size=15, marker_symbol=6,
marker_color='red', name='Sell', marker_line_color='black', marker_line_width=1)
# plot figure
fig = go.Figure(data=[candles, psar, buys, sells])
fig.layout.xaxis.type = 'category' # remove weekend days
fig.layout.xaxis.dtick = 20 # show x-axis ticker once a month
fig.layout.xaxis.rangeslider.visible = False
fig.layout.title = f'Heikin-Ashi PSAR indicator on Heikin-Ashi ({symbol})'
fig.layout.template = 'plotly_white'
fig.show()
```
In this case, there are small differences. In fact, only on one date the Heikin-Ashi SAR value is different from the traditional SAR value. This might change when clear trends are less visible, so feel free to try other stocks!
|
github_jupyter
|
## 1. Introduction to pyLHD
pyLHD is a python implementation of the R package [LHD](https://cran.r-project.org/web/packages/LHD/index.html) by Hongzhi Wang, Qian Xiao, Abhyuday Mandal. As of now, only the algebraic construction of Latin hypercube designs (LHD) are implemented in this package. For search algorithms to construct LHDs such as: Simulated annealing, particle swarm optimization, and genetic algorithms refer to the R package.
In section 2 algebraic construction methods for LHDs are discussed
To evalute the generated LHDs we consider the following criteria
### Maximin distance Criterion
Let $X$ denote an LHD matrix. Define the $L_q$-distance between two run $x_i$ and $x_j$ of $X$ as $d_q(x_i,x_j) = \left( \sum_{k=1}^m |x_{ik}-x_{jk}|^q \right)^{1/q}$ where $q$ is an integer. Define the $L_q$-distance of design $X$ as $d_q(X) = \min \{ d_q(x_i,x_j), 1 \leq i\leq j \leq n \}$. If $q=1$, we are considering the Manhattan $(L_1)$ distance. If $q=2$, the Euclidean $(L_2)$ distance is considered. A design $X$ is called a maximim $L_q$-distance if it has the unique largest $d_q(X)$ value.
Morris and Mitch (1995) and Jin et al. (2005) proposed the $\phi_p$ criterion which is defined as
$$
\phi_p = \left( \sum_{i=1}^{n-1} \sum_{j=i+1}^n d_q (x_i,x_j)^{-p} \right)^{1/p}
$$
The $\phi_p$ criterion is asymptotically equivalent to the Maximin distance criterion as $p \rightarrow \infty$. In practice $p=15$ often suffices.
### Maximum Projection Criterion
Joseph et al (2015) proposed the maximum projection LHDs that consider designs' space-filling properties in all possible dimensional spaces. Such designs minimize the maximum projection criterion, which is defined as
$$
\underset{X}{\min} \psi(X) = \left( \frac{1}{{n \choose 2}} \sum_{i=1}^{n-1} \sum_{j=i+1}^n \frac{1}{ \prod_{l=1}^k (x_{il}-x_{jl})^2} \right)^{1/k}
$$
We can wee that any two design points should be apart from each other in any projection to minimize the value of $\psi(x)$
### Orthogonality Criteria
Two major correlation-based criteria to measure designs' orthogonality is the average absolute correlation criterion and the maximum absolute correlation
$$
ave(|q|) = \frac{2 \sum_{i=1}^{k-1} \sum_{j=i+1}^k |q_{ij}|}{k(k-1)} \quad \text{and} \quad \max |q| = \underset{i,j}{\max} |q_{ij}|
$$
where $q_{ij}$ is the correlation between the $i$th and $j$th columns of the design matrix $X$. Orthogonal design have $ave(|q|)=0$ and $\max|q|=0$, which may not exist for all design sizes. Designs with smaller $ave(|q|)$ or $\max|q|$ are generally preferred in practice.
```
import pyLHD as pl
```
Lets start by generating a random LHD with 5 rows and 3 columns
```
X = pl.rLHD(nrows=5,ncols=3)
X
```
We evaluate the above design with the different optimamlity criteria described earlier:
The maximin distance criterion (Manhattan)
```
pl.phi_p(X,p=15,q=1) # using default parameters
```
The maximin distance criterion (Euclidean)
```
pl.phi_p(X,p=10,q=2) # different p used than above
```
The average absolute correlation
```
pl.AvgAbsCor(X)
```
The maximum absolute correlation
```
pl.MaxAbsCor(X)
```
The maximum projection criterion
```
pl.MaxProCriterion(X)
```
We can apply Williams transformation on X defined as:
$$
W(x) = \begin{cases}
2x & 0 \leq x \leq (p-1)/2 \\
2(p-x)-1 & (p+1)/2 \leq x \leq p-1
\end{cases}
$$
```
W_x = pl.williams_transform(X)
W_x
```
Lets evaluate the new transformed design
```
pl.phi_p(W_x)
```
The $\phi_p$ value of transformed $W_x$ is smaller than the original design $X$
## 2. Algebraic Construction Functions
The algebraic construction methods are demonstrated in the table below
| | Ye98 | Cioppa07 | Sun10 | Tang93 | Lin09 | Butler01 |
|------------|---|---|---|---|---|----|
| Run # $n$ | $2^m +1$ | $2^m +1$ | $r2^{m +1}$ or $r2^{m +1} +1$ | $n$ | $n^2$ | $n$ |
| Factor # $k$ | $2m-2$ | $m + {m-1 \choose 2}$ | $2^c$ | $m$ | $2fp$ | $k \leq n-1$ |
| Note | $m$ is a positive integer $m\geq 2$ | $m$ is a positive integer $m\geq 2$ | $r$ and $c$ are positive integers | $n$ and $m$ are from $OA(n,m,s,r)$ | $n^2,2f$ and $p$ are from $OA(n^2,2f,n,2)$ and $OLHD(n,p)$ | $n$ is an odd prime number |
For theoretical details on the construction methods, a good overview is **Section 4.2: Algebraic Constuctions for Orthogonal LHDs** from [Musings about Constructions of Efficient Latin Hypercube Designs with Flexible Run-sizes](https://arxiv.org/abs/2010.09154)
We start by implementing Ye 1998 construction, the resulting desig will have
$2^m+1$ runs and $2m-2$ factors
```
Ye98 = pl.OLHD_Ye98(m=4)
Ye98
pl.MaxAbsCor(Ye98) # column-wise correlation are 0
```
Cioppa and Lucas 2007 construction, the resulting design will be a $2^m+1$ by $m+ {m-1 \choose 2}$ orthogonal LHD. Note $m \geq 2$
```
Cioppa07 = pl.OLHD_Cioppa07(m=3)
Cioppa07
pl.MaxAbsCor(Cioppa07) # column-wise correlation are 0
```
Sun et al. 2010 construction, the resulting design will be $r2^{c+1}$ by $2^c$ if type='even'. If type='odd'
the resulting design will be $r2^{c+1} + 1$ by $2^c$, where $r$ and $c$ are positive integers.
```
Sun10_odd = pl.OLHD_Sun10(C=2,r=2,type='odd')
Sun10_odd
Sun10_even = pl.OLHD_Sun10(C=2,r=2,type='even')
Sun10_even
```
Line et al. 2009 construction, the resulting design will be $n^2$ by $2fp$. This is obtained by using a
$n$ by $p$ orthogonal LHD with a $n^2$ by $2f$ strength 2 and level $n$ orthogonal array.
Start by generating an orthogonal LHD
```
OLHD_example = pl.OLHD_Cioppa07(m=2)
```
Next, create an orthogonal array with 25 rows, 6 columns, 5 levels, and strength 2 OA(25,6,5,2)
```
import numpy as np
OA_example = np.array([[2,2,2,2,2,1],[2,1,5,4,3,5],
[3,2,1,5,4,5],[1,5,4,3,2,5],
[4,1,3,5,2,3],[1,2,3,4,5,2],
[1,3,5,2,4,3],[1,1,1,1,1,1],
[4,3,2,1,5,5],[5,5,5,5,5,1],
[4,4,4,4,4,1],[3,1,4,2,5,4],
[3,3,3,3,3,1],[3,5,2,4,1,3],
[3,4,5,1,2,2],[5,4,3,2,1,5],
[2,3,4,5,1,2],[2,5,3,1,4,4],
[1,4,2,5,3,4],[4,2,5,3,1,4],
[2,4,1,3,5,3],[5,3,1,4,2,4],
[5,2,4,1,3,3],[5,1,2,3,4,2],
[4,5,1,2,3,2] ])
```
Now using Lin at al. 2009 construction, we couple OLHD and OA to obtain
```
Lin09 = pl.OLHD_Lin09(OLHD=OLHD_example,OA=OA_example)
Lin09
```
We can convert an orthogonal array into a LHD using the function OA2LHD. Consider the
earlier OA_example with 25 rows and 6 columns.
```
pl.OA2LHD(OA_example)
```
Lastly, we consider Butler 2001 construction by generating a $n$ by $k$ OLHD
```
Butler01 = pl.OLHD_Butler01(nrows=11,ncols=5)
Butler01
```
|
github_jupyter
|
# NEXUS tool: case study for the Souss-Massa basin - energy demand calculations
In this notebook a case study for the Souss-Massa basin is covered using the `nexustool` package. The water requirements for agricultural irrigation and domestic use were previously calculated using the Water Evaluation and Planning System (WEAP) model. In this case study, the energy requirements for groundwater pumping, wastewater treatment, desalination of seawater and pumping energy for water conveyance are estimated.
First import the package by running the following block:
```
import sys
sys.path.append("..") #this is to add the avobe folder to the package directory
import os
import nexustool
import pandas as pd
from dashboard.scripts.plotting import water_delivered_plot, unmet_demand_plot, water_supply_plot, wtd_plot, energy_demand_plot, crop_production
```
## 1. Read scenario data
After importing all required packages, the input GIS data is loaded into the variable `df`. Change the `data_folder`, `scenario` and `climate` variables to reflect the name and relative location of your data file. This dataset should already have the water demand for irrigation results.
```
data_folder = os.path.join('data', 'processed results')
scenario = 'Desalination'
climate = 'Climate Change'
input_folder = os.path.join(data_folder, scenario, climate)
```
## 2. Create nexus model
To create a model simply create an instance of the `nexustool.Model()` class and store it in a variable name. The `nexustool.Model()` class requires a dataframe as input data. Several other properties and parameter values can be defined by explicitly passing values to them. To see a full list of parameters and their explaination refer to the documentation of the package. We wil create a model using the `demand_data.gz` data:
```
#Define the path to read the scenario input data and reads it in
file_path = os.path.join(input_folder, 'demand_data.gz')
df = pd.read_csv(file_path)
#Creates the nexus model with the input dataframe
souss_massa = nexustool.Model(df)
```
## 3. Define variable names
The names of the properties of the model can be changed at any time. This is important for the model to know how each property is called withing your input data. To check the current property names run the `.print_properties()` method, a list with the names of each property and its current value will be displayed.
Then you can provide the right names for each property, calling them and assigning a value as:
```python
souss_massa.elevation_diff = 'elevation_delta'
souss_massa.gw_depth = 'name_of_ground_water_depth'
```
In this particular case we will need to change the following default values:
```
souss_massa.elevation_diff = 'elevation_diff' #for the case of GW, the elevation_diff is set to be the wtd
souss_massa.L = 'distance' #for the case of GW, the distance is set to be the wtd
souss_massa.D = 'Pipe_diameter'
#Defines the name of the variable for Peak Water Demand and Seasonal Scheme Water demand (monthly)
souss_massa.pwd = 'pwd' # Peak Water Demand
souss_massa.sswd = 'sswd' # Seassonal Scheme Water Demand
souss_massa.df.rename(columns={'value': 'sswd'}, inplace=True) #Renames the name of the column value to sswd
souss_massa.pp_e = 'pp_e' # Peak Pumping Energy
souss_massa.pa_e = 'pa_e' # Pumping Average Energy
```
## 4. Define pipelines diameters and average pumping hours, pumping efficiency
Now we need to define the specifications of the water network, giving pipeline / canal diameter values:
```
souss_massa.df['Pipe_diameter'] = 1.2
souss_massa.df.loc[souss_massa.df['type'].str.contains('GW'), 'Pipe_diameter'] = 1000
souss_massa.df.loc[souss_massa.df['type'].str.contains('DS'), 'Pipe_diameter'] = 1.2
souss_massa.df.loc[souss_massa.df['type'].str.contains('Pipeline'), 'Pipe_diameter'] = 1.2
souss_massa.pumping_hours_per_day = 10
souss_massa.pump_eff = 0.6
```
## 5. Peak Water Demand (PWD)
The $PWD$ is definfe as the daily peak cubic meters of water pumped per second withing the month. To accomplish that, the $SSWD$ (m<sup>3</sup>/month) is divided by 30 days per month, 3600 seconds per hour and the amount of average pumping hours in a day. This provides the $PWD$ in m<sup>3</sup>/s:
$$
PWD\,(m^3/s) = \frac{SSWD\,(m^3/month)}{30\,(day/month)\cdot PumpHours\,(h/day)\cdot 3600\, (s/h)}
$$
Moreover, the $PWD$ for agricultural irrigation is assumed as double the normal $PWD$. We make this calculations as per the following cell:
```
#Defines the PWD. It is defined as double the seasonal demand for agricultural sites
souss_massa.df[souss_massa.pwd] = souss_massa.df[souss_massa.sswd] / 30 / souss_massa.pumping_hours_per_day / 3600 #to convert to cubic meter per second [m3/s]
souss_massa.df.loc[souss_massa.df['type']=='Agriculture', souss_massa.pwd] *= 2
```
## 6. Calculate pumping energy requirements
To estimate the pumping energy requirements for conveyance, first we need to calculate the Total Dinamic Head (TDH). This, is a measure in meters that accounts for the elevation difference between two points and the pressure loss in distribution.
For that, the area $A$ `.pipe_area()`, the velocity $V$ `.flow_velocity()`, the Reynolds number $Re$ `.reynolds()` and the friction factor $f$ `.friction_factor()` need to be estimated. The `nexustool` provides simple functions that allows us make an easy estimation of these variables, which have the following formulas implemented in the background:
$$
A\,(m^2) = \pi\cdot \frac{D^2}{4}
$$
$$
V\,(m/s) = \frac{SSWD\,(m^3/month)}{PumpHours\,(h/day)\cdot 30\,(day/month)\cdot 3600\,(s/h)\cdot A\,(m^2)}
$$
$$
Re = \frac{V\,(m/s)\cdot D\,(m)}{v\,(m^2/s)}
$$
Where $v$ is the kinematic viscosity of water at around 1.004e-06 m<sup>2</sup>/s. And the frction factor is estimated according to the Swamee–Jain equation:
$$
f = \frac{0.25}{\left[log_{10}\left(\frac{\epsilon}{3.7D}+\frac{5.74}{Re^{0.9}}\right)\right]^2}
$$
Where $\epsilon$ is the roughness of the material.
```
souss_massa.pipe_area()
souss_massa.flow_velocity()
souss_massa.reynolds()
souss_massa.friction_factor()
```
Then, the TDH can be calculated by simply calling the `.get_tdh()` function.
$$
TDH\,(m) = f\cdot \frac{L\,(m)}{D\,(m)}\cdot \frac{V(m/s)^2}{2\cdot g\,(m/s^2)}
$$
Whereas the conveyance pumping energy requirements by calling the `.get_pumping_energy()` method. The equation used to calculate the Electricity Demand ($E_D$) for pumping is as follows:
$$
E_D\,(kW_h) = \frac{SSWD\,(m^3)\cdot \rho\,(kg/m^3)\cdot g\,(m/s^2)\cdot TDH\,(m)}{PP_{eff}\,(\%)\cdot 3600\,(s/h)\cdot 1000\,(W/kW)}
$$
The variable withing the Model for the $E_D$ is the `pa_e` or Pumping Average Electricity requirements.
Moreover, the Power Demand for pumping ($PD$) is denoted by the variable `pp_e` and calculated by the following formula:
$$
PD\,(kW) = \frac{PWD\,(m^3/s)\cdot \rho\,(kg/m^3)\cdot g\,(m/s^2)\cdot TDH\,(m)}{PP_{eff}\,(\%)\cdot 1000\,(W/kW)}
$$
The `.get_pumping_energy()` method calculates both the $E_D$ (`pa_e`) and $PD$ (`pp_e`).
```
souss_massa.get_tdh()
souss_massa.get_pumping_energy()
souss_massa.df.loc[souss_massa.df.pp_e<0, souss_massa.pp_e] = 0 # ensures no negative energy values are considered
souss_massa.df.loc[souss_massa.df.pa_e<0, souss_massa.pa_e] = 0 # ensures no negative power values are considered
# We exclude energy for pumping calculations done for the Complexe Aoulouz Mokhtar Soussi,
# as this pipeline is known to be driven by gravity only
souss_massa.df.loc[souss_massa.df['Supply point'].str.contains('Complexe Aoulouz Mokhtar Soussi'), 'pa_e'] = None
```
## 7. Calculating desalination energy requirements
Desalination energy requirements are estimated by multipliying the monthly average desalinated water (`sswd`), by an energy intensity factor (`desal_energy_int`) based on the characteristics of the desalination plant.
```
#Define energy intensity for seawater desalination project
desal_energy_int = 3.31 # kWh/m3
#Create a new nexus Model with the data relevant to the desalination plant only, filtering by the key work DS (Desalination)
sm_desal = nexustool.Model(souss_massa.df.loc[souss_massa.df['type'].str.contains('DS')].copy())
#Multiply the sswd by the energy intensity for treatment
sm_desal.df[souss_massa.pa_e] = sm_desal.df[souss_massa.sswd] * desal_energy_int
```
## 8. Calculating wastewater treatment energy requirements
Wastewater treatment energy is dependent on the type of treatment required. Wastewater treatment can be subdivided into three stages: primary, secondary and tertiary. The treatment stages used, are then dependent on the final quality requirements of the treated wastewater. Thus, for wastewater that will be treated and returned to the ecosystem, often primary to secondary treatment is enough. On the other hand, treated wastewater intended for agricultural irrigation or drinking purposes, should go through secondary to terciary treatment to ensure proper desinfecton levels.
Depending on the scenario run, we will need then to use a proper wastewater treatment energy intensity. In general, the higher the number of stages, the higher the energy requirements. In this model, we used an energy intensity of **0.1 kWh/m<sup>3</sup>** for treated wastewater that is not being reused, and **0.8 kWh/m<sup>3</sup>** for treated wastewater reused in agricultural irrigation.
```
#Here we load the WWTP inflow data
file_path = os.path.join(input_folder, 'wwtp_inflow.gz')
df_wwtp = pd.read_csv(file_path)
#We define an energy intensity for wastewater treatment and compute the energy demand
wwtp_energy_int = 0.1 # kWh/m3
df_wwtp['pa_e'] = df_wwtp.value * wwtp_energy_int
```
## 9. Saving the results
Finally, we save the resulting dataframes as `.gz` files, which is a compressed version of a `csv` file:
```
#Define and create the output folder
results_folder = os.path.join('dashboard', 'data', scenario, climate)
os.makedirs(results_folder, exist_ok=True)
#Save the results
souss_massa.df.to_csv(os.path.join(results_folder, 'results.gz'), index=False)
sm_desal.df.to_csv(os.path.join(results_folder, 'desal_data.gz'), index=False)
df_wwtp.to_csv(os.path.join(results_folder, 'wwtp_data.gz'), index=False)
```
## 10. Visualizing some results
Using some functions imported from the visualization tool, we can plot some general results for the scenario:
### Water delivered (Mm<sup>3</sup>)
```
water_delivered_plot(souss_massa.df, 'Year', {})
```
### Enery demand (GWh)
```
energy_demand_plot(souss_massa.df, df_wwtp, sm_desal.df, 'Year', {})
```
### Unmet demand (%)
```
unmet_demand_plot(souss_massa.df, 'Year', {})
```
### Water supplied (Mm<sup>3</sup>/year)
```
water_supply_plot(souss_massa.df, 'Year', {})
```
### Groundwater depth (m)
```
wtd_plot(souss_massa.df, 'Date', {})
```
### Crop production (ton/year)
```
crop = pd.read_csv(os.path.join(input_folder, 'production.gz'))
crop_production(crop, 'crop', {})
```
|
github_jupyter
|
Interactive analysis with python
--------------------------------
Before starting this tutorial, ensure that you have set up _tangos_ [as described here](https://pynbody.github.io/tangos/) and the data sources [as described here](https://pynbody.github.io/tangos/data_exploration.html).
We get started by importing the modules we'll need:
```
%matplotlib inline
import tangos
import pylab as p
```
First let's inspect what simulations are available in our database:
```
tangos.all_simulations()
```
For any of these simulations, we can generate a list of available timesteps as follows:
```
tangos.get_simulation("tutorial_changa").timesteps
```
For any timestep, we can access the halos using `.halos` and a specific halo using standard python 0-based indexing:
```
tangos.get_simulation("tutorial_changa").timesteps[3].halos[3]
```
One can skip straight to getting a specific halo as follows:
```
tangos.get_halo("tutorial_changa/%384/halo_4")
```
Note the use of the SQL wildcard % character which avoids us having to type out the entire path. Whatever way you access it, the resulting object allows you to query what properties have been calculated for that specific halo. We can then access those properties using the normal python square-bracket dictionary syntax.
```
halo = tangos.get_halo("tutorial_changa/%960/halo_1")
halo.keys()
halo['Mvir']
p.imshow(halo['uvi_image'])
```
One can also get meta-information about the computed property. It would be nice to know
the physical size of the image we just plotted. We retrieve the underlying property object
and ask it:
```
halo.get_description("uvi_image").plot_extent()
```
This tells us that the image is 15 kpc across. The example properties that come with _tangos_
use _pynbody_'s units system to convert everything to physical kpc, solar masses and km/s. When
you implement your own properties, you can of course store them in whichever units you like.
Getting a time sequence of properties
-------------------------------------
Often we would like to see how a property varies over time. _Tangos_ provides convenient ways to extract this information, automatically finding
major progenitors or descendants for a halo. Let's see this illustrated on the SubFind _mass_ property:
```
halo = tangos.get_halo("tutorial_gadget/snapshot_020/halo_10")
# Calculate on major progenitor branch:
Mvir, t = halo.calculate_for_progenitors("mass","t()")
# Now perform plotting:
p.plot(t,1e10*Mvir)
p.xlabel("t/Gyr")
p.ylabel(r"$M/h^{-1} M_{\odot}$")
p.semilogy()
```
In the example above, `calculate_for_progenitors` retrieves properties on the major progenitor branch of the chosen halo. One can ask for as many properties as you like, each one being returned as a numpy array in order. In this particular example the first property is the mass (as reported by subfind) and the second is the time. In fact the second property isn't really stored - if you check `halo.keys()` you won't find `t` in there. It's a simple example of a _live property_ which means it's calculated on-the-fly from other data. The time is actually stored in the TimeStep rather than the Halo database entry, so the `t()` live property simply retrieves it from the appropriate location.
Live properties are a powerful aspect of _tangos_. We'll see more of them momentarily.
Histogram properties
--------------------
While the approach above is the main way to get time series of data with _tangos_, sometimes one
wants to be able to use finer time bins than the number of outputs available. For example, star
formation rates or black hole accretion rates often vary on short timescales and the output files
from simulations are sufficient to reconstruct these variations in between snapshots.
_Tangos_ implements `TimeChunkedHistogram` for this purpose. As the name suggests, a _chunk_ of
historical data is stored with each timestep. The full history is then reconstructed by combining
the chunks through the merger tree; this process is customizable. Let's start with the simplest
possible request:
```
halo = tangos.get_halo("tutorial_changa_blackholes/%960/halo_1")
SFR = halo["SFR_histogram"]
# The above is sufficient to retrieve the histogram; however you probably also want to check
# the size of the time bins. The easiest approach is to request a suitable time array to go with
# the SF history:
SFR_property_object = halo.get_objects("SFR_histogram")[0]
SFR_time_bins = SFR_property_object.x_values()
p.plot(SFR_time_bins, SFR)
p.xlabel("Time/Gyr")
p.ylabel("SFR/$M_{\odot}\,yr^{-1}$")
```
The advantage of storing the histogram in chunks is that one can reconstruct it
in different ways. The default is to go along the major progenitor branch, but
one can also sum over all progenitors. The following code shows the fraction of
star formation in the major progenitor:
```
SFR_all = halo.calculate('reassemble(SFR_histogram, "sum")')
p.plot(SFR_time_bins, SFR/SFR_all)
p.xlabel("Time/Gyr")
p.ylabel("Frac. SFR in major progenitor")
```
_Technical note_: It's worth being aware that the merger information is, of course, quantized to the
output timesteps even though the SFR information is stored in small chunks. This is rarely an issue
but with coarse timesteps (such as those in the tutorial simulations), the quantization can cause
noticable artefacts – here, the jump to 100% in the major progenitor shortly before _t_ = 3 Gyr
corresponds to the time of the penultimate stored step, after which no mergers are recorded.
For more information, see the [time-histogram properties](https://pynbody.github.io/tangos/histogram_properties.html) page.
Let's see another example of a histogram property: the black hole accretion rate
```
BH_accrate = halo.calculate('BH.BH_mdot_histogram')
p.plot(SFR_time_bins, BH_accrate)
p.xlabel("Time/Gyr")
p.ylabel("BH accretion rate/$M_{\odot}\,yr^{-1}$")
```
This works fine, but you may have noticed the warning that more than one black hole
is in the halo of interest. There is more information about the way that links between
objects work in _tangos_, and disambiguating between them, in the "using links" section
below.
Getting properties for multiple halos
-------------------------------------
Quite often one wants to collect properties from multiple halos simultaneously. Suppose we want to plot the mass against the vmax for all halos at
a specific snapshot:
```
timestep = tangos.get_timestep("tutorial_gadget/snapshot_019")
mass, vmax = timestep.calculate_all("mass","VMax")
p.plot(mass*1e10,vmax,'k.')
p.loglog()
p.xlabel("$M/h^{-1} M_{\odot}$")
p.ylabel(r"$v_{max}/{\rm km s^{-1}}$")
```
Often when querying multiple halos we still want to know something about their history, and live calculations enable that. Suppose we want to know how much the mass has grown since the previous snapshot:
```
mass, fractional_delta_2 = timestep.calculate_all("mass", "(mass-earlier(2).mass)/mass")
p.hlines(0.0,1e10,1e15, colors="gray")
p.plot(mass*1e10, fractional_delta_2,"r.", alpha=0.2)
p.semilogx()
p.ylim(-0.1,0.9)
p.xlim(1e12,1e15)
p.xlabel("$M/h^{-1} M_{\odot}$")
p.ylabel("Fractional growth in mass")
```
This is a much more ambitious use of the live calculation system. Consider the last property retrieved, which is `(mass-earlier(2).mass)/mass`. This combines algebraic operations with _redirection_: `earlier(2)` finds the major progenitor two steps prior to this one, after which `.mass` retrieves the mass at that earlier timestep. This is another example of a "link", as previously used to retrieve
black hole information above.
Using Links
-----------
_Tangos_ has a concept of "links" between objects including halos and black holes. For example,
the merger tree information that you have already used indirectly is stored as links.
Returning to our example of black holes above, we used a link named `BH`; however this issued a
warning that the result was technically ambiguous. Let's see that warning again. For clarity,
we will use the link named `BH_central` this time around -- it's an alternative set of links
which only includes black holes associated with the central galaxy (rather than any satellites).
```
halo = tangos.get_halo("tutorial_changa_blackholes/%960/halo_1")
BH_mass = halo.calculate('BH_central.BH_mass')
```
We still get the warning, so there's more than one black hole in the central galaxy.
To avoid such warnings, you can specify more about which link you are referring to. For example,
we can specifically ask for the black hole with the _largest mass_ and _smallest impact parameters_
using the following two queries:
```
BH_max_mass = halo.calculate('link(BH_central, BH_mass, "max")')
BH_closest = halo.calculate('link(BH_central, BH_central_distance, "min")')
```
The `link` live-calculation function returns the halo with either the maximum or minimum value of an
associated property, here the `BH_mass` and `BH_central_distance` properties respectively.
Either approach disambiguates the black holes we mean (in fact, they unsurprisingly lead to
the same disambiguation):
```
BH_max_mass == BH_closest
```
However one doesn't always have to name a link to make use of it. The mere existence of a link
is sometimes enough. An example is the merger tree information already used. Another useful
example is when two simulations have the same initial conditions, as in the `tutorial_changa`
and `tutorial_changa_blackholes` examples; these two simulations differ only in that the latter
has AGN feedback. We can identify halos between simulations using the following syntax:
```
SFR_in_other_sim = halo.calculate("match('tutorial_changa').SFR_histogram")
p.plot(SFR_time_bins, halo['SFR_histogram'],color='r', label="With AGN feedback")
p.plot(SFR_time_bins, SFR_in_other_sim, color='b',label="No AGN feedback")
p.legend(loc="lower right")
p.semilogy()
p.xlabel("t/Gyr")
p.ylabel("SFR/$M_{\odot}\,yr^{-1}$")
```
The `match` syntax simply tries to follow links until it finds a halo in the named
_tangos_ context. One can use it to match halos across entire timesteps too; let's
compare the stellar masses of our objects:
```
timestep = tangos.get_timestep("tutorial_changa/%960")
Mstar_no_AGN, Mstar_AGN = timestep.calculate_all("star_mass_profile[-1]",
"match('tutorial_changa_blackholes').star_mass_profile[-1]")
# note that we use star_mass_profile[-1] to get the last entry of the star_mass_profile array,
# as a means to get the total stellar mass from a profile
p.plot(Mstar_no_AGN, Mstar_AGN, 'k.')
p.plot([1e6,1e11],[1e6,1e11],'k-',alpha=0.3)
p.loglog()
p.xlabel("$M_{\star}/M_{\odot}$ without AGN")
p.ylabel("$M_{\star}/M_{\odot}$ with AGN")
```
|
github_jupyter
|
n=b
```
# Binary representation ---> Microsoft
# Difficulty: School Marks: 0
'''
Write a program to print Binary representation of a given number N.
Input:
The first line of input contains an integer T, denoting the number of test cases. Each test case contains an integer N.
Output:
For each test case, print the binary representation of the number N in 14 bits.
Constraints:
1 ≤ T ≤ 100
1 ≤ N ≤ 5000
Example:
Input:
2
2
5
Output:
00000000000010
00000000000101
'''
for _ in range(int(input())):
n=int(input())
x=bin(n).split('b')[1]
print('0'*(14-len(x))+x)
# Alone in couple ---> Ola Cabs
# Difficulty: School Marks: 0
'''
In a party everyone is in couple except one. People who are in couple have same numbers. Find out the person who is not in couple.
Input:
The first line contains an integer 'T' denoting the total number of test cases. In each test cases, the first line contains an integer 'N' denoting the size of array. The second line contains N space-separated integers A1, A2, ..., AN denoting the elements of the array. (N is always odd)
Output:
In each seperate line print number of the person not in couple.
Constraints:
1<=T<=30
1<=N<=500
1<=A[i]<=500
N%2==1
Example:
Input:
1
5
1 2 3 2 1
Output:
3
'''
for _ in range(int(input())):
n=int(input())
s=input()
a=''
for i in s:
if s.count(i)%2==1 and i not in a:
a=i
print(i,end=' ')
# Count total set bits ---> Amazon,Adobe
# Difficulty: Basic Marks: 1
'''
You are given a number N. Find the total count of set bits for all numbers from 1 to N(both inclusive).
Input:
The first line of input contains an integer T denoting the number of test cases. T testcases follow. The first line of each test case is N.
Output:
For each testcase, in a new line, print the total count of all bits.
Constraints:
1 ≤ T ≤ 100
1 ≤ N ≤ 103
Example:
Input:
2
4
17
Output:
5
35
Explanation:
Testcase1:
An easy way to look at it is to consider the number, n = 4:
0 0 0 = 0
0 0 1 = 1
0 1 0 = 1
0 1 1 = 2
1 0 0 = 1
Therefore , the total number of bits is 5.
'''
for _ in range(int(input())):
n=int(input())
s=0
for i in range(n+1):
s+=bin(i).split('b')[1].count('1')
print(s)
```
***IMP***
```
# ------------------------------------------IMP---------------------------------------
"https://practice.geeksforgeeks.org/problems/toggle-bits-given-range/0/?track=sp-bit-magic&batchId=152"
# Toggle bits given range
# Difficulty: Basic Marks: 1
'''
Given a non-negative number N and two values L and R. The problem is to toggle the bits in the range L to R in the binary representation of N, i.e, to toggle bits from the rightmost Lth bit to the rightmost Rth bit. A toggle operation flips a bit 0 to 1 and a bit 1 to 0.
Input:
First line of input contains a single integer T which denotes the number of test cases. Then T test cases follows. First line of each test case contains three space separated integers N, L and R.
Output:
For each test case , print the number obtained by toggling bits from the rightmost Lth bit to the rightmost Rth bit in binary representation of N.
Constraints:
1<=T<=100
1<=N<=1000
1<=L<=R
L<=R<= Number of bits(N)
Example:
Input:
2
17 2 3
50 2 5
Output:
23
44
'''
for _ in range(int(input())):
l=list(map(int,input().split()))
c=0
s1=''
s=bin(l[0])[2:]
n=len(s)
for i in s:
if c>=(n-l[2]) and c<=(n-l[1]):
if i=='0':
s1+='1'
else:
s1+='0'
else:
s1+=i
c+=1
print(int(s1,base=2))
"https://practice.geeksforgeeks.org/problems/set-kth-bit/0/?track=sp-bit-magic&batchId=152"
# Set kth bit ---> Cisco, Qualcomm
# Difficulty: Basic Marks: 1
'''
Given a number N and a value K. From the right, set the Kth bit in the binary representation of N. The position of LSB(or last bit) is 0, second last bit is 1 and so on. Also, 0 <= K < X, where X is the number of bits in the binary representation of N.
Input:
First line of input contains a single integer T, which denotes the number of test cases. T test cases follows. First line of each testcase contains two space separated integers N and K.
Output:
For each test case, print the new number after setting the Kth bit of N.
Constraints:
1 <= T <= 100
1 <= N <= 1000
Example:
Input:
2
10 2
15 3
Output:
14
15
Explanation:
Testcase 1: Binary representation of the given number 10 is: 1 0 1 0, number of bits in the binary reprsentation is 4. Thus 2nd bit from right is 0. The number after changing this bit to 1 is: 14(1 1 1 0).
'''
for _ in range(int(input())):
l=list(map(int,input().split()))
s=bin(l[0])[2:]
s1=''
c=0
if (l[1]+1)>len(s):
s1='0'*(l[1]+1-len(s))+s
s=s1
s1=''
for i in s:
if c==(len(s)-(l[1]+1)):
s1+='1'
else:
s1+=i
c+=1
print(int(s1,2))
"https://practice.geeksforgeeks.org/problems/bit-difference/0/?track=sp-bit-magic&batchId=152"
# Bit Difference ---> Amazon Qualcomm, Samsung
# Difficulty: Basic Marks: 1
'''
You are given two numbers A and B. Write a program to count number of bits needed to be flipped to convert A to B.
Input:
The first line of input contains an integer T denoting the number of test cases. T testcases follow. The first line of each test case is A and B separated by a space.
Output:
For each testcase, in a new line, print the number of bits needed to be flipped.
Constraints:
1 ≤ T ≤ 100
1 ≤ A, B ≤ 103
Example:
Input:
1
10 20
Output:
4
Explanation:
Testcase1:
A = 01010
B = 10100
Number of bits need to flipped = 4
'''
for _ in range(int(input())):
a,c=input().split()
a=bin(int(a))[2:]
c=bin(int(c))[2:]
an=len(a)
cn=len(c)
if an!=cn:
if (an-cn)>0:
c='0'*(an-cn)+c
else:
a='0'*(cn-an)+a
count=0
for i,j in zip(a,c):
if i !=j:
count+=1
print(count)
"https://practice.geeksforgeeks.org/problems/swap-two-nibbles-in-a-byte/0/?track=sp-bit-magic&batchId=152"
# Swap two nibbles in a byte ---> Accolite, Cisco, Amazon, Qualcomm
# Difficulty: Basic Marks: 1
'''
Given a byte, swap the two nibbles in it. For example 100 is be represented as 01100100 in a byte (or 8 bits).
The two nibbles are (0110) and (0100). If we swap the two nibbles, we get 01000110 which is 70 in decimal.
Input:
The first line contains 'T' denoting the number of testcases. Each testcase contains a single positive integer X.
Output:
In each separate line print the result after swapping the nibbles.
Constraints:
1 ≤ T ≤ 70
1 ≤ X ≤ 255
Example:
Input:
2
100
129
Output:
70
24
'''
for _ in range(int(input())):
a=bin(int(input()))[2:]
if len(a)%4!=0:
a='0'*(4-len(a)%4)+a
c=[]
for i in range(1,(len(a)//4)+1):
c.append(a[4*(i-1):4*i])
c=c[::-1]
print(int(''.join(c),2))
```
### [Check whether K-th bit is set or not](https://practice.geeksforgeeks.org/problems/check-whether-k-th-bit-is-set-or-not/0/?track=sp-bit-magic&batchId=152)
- Company Tag: Cisco
- Difficulty: Basic
- Marks: 1
***Given a number N and a bit number K, check if Kth bit of N is set or not. A bit is called set if it is 1. Position of set bit '1' should be indexed starting with 0 from RSB side in binary representation of the number. Consider N = 4(100): 0th bit = 0, 1st bit = 0, 2nd bit = 1.***
***Input:***\
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow.\
Each test case consists of two lines. The first line of each test case contain an integer N. \
The second line of each test case contains an integer K.\
\
***Output:***\
Corresponding to each test case, print "Yes" (without quotes) if Kth bit is set else print "No" (without quotes) in a new line.\
\
***Constraints:***\
1 ≤ T ≤ 200\
1 ≤ N ≤ 109\
0 ≤ K ≤ floor(log2(N) + 1)\
\
***Example:***\
***Input:***\
3\
4\
0\
4\
2\
500\
3\
\
***Output:***\
No\
Yes\
No\
\
***Explanation:***\
***Testcase 1:*** Binary representation of 4 is 100, in which 0th bit from LSB is not set. So, answer is No.\
```
for _ in range(int(input())):
a=bin(int(input()))[2:]
k=int(input())
if a[(len(a)-1)-k]=='1':
print('Yes')
else:
print('No')
```
### [Rightmost different bit](https://practice.geeksforgeeks.org/problems/rightmost-different-bit/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Basic
- Marks: 1
***Given two numbers M and N. The task is to find the position of rightmost different bit in binary representation of numbers.***
***Input:***\
The input line contains T, denoting the number of testcases. Each testcase follows. First line of each testcase contains two space separated integers M and N.
***Output:***\
For each testcase in new line, print the position of rightmost different bit in binary representation of numbers. If both M and N are same then print -1 in this case.
***Constraints:***\
1 <= T <= 100\
1 <= M <= 103\
1 <= N <= 103
***Example:***\
***Input:***\
2\
11 9\
52 4
***Output:***\
2\
5
***Explanation:***\
***Tescase 1:*** Binary representaion of the given numbers are: 1011 and 1001, 2nd bit from right is different.
```
for _ in range(int(input())):
a,c=input().split()
a=bin(int(a))[2:]
c=bin(int(c))[2:]
an=len(a)
cn=len(c)
if an!=cn:
if (an-cn)>0:
c='0'*(an-cn)+c
else:
a='0'*(cn-an)+a
k=len(a)
for i in range(k):
if a[k-1-i]!=c[k-1-i]:
print(i+1)
break
else:
print(-1)
```
### [Number is sparse or not](https://practice.geeksforgeeks.org/problems/number-is-sparse-or-not/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Basic
- Marks: 1
***Given a number N, check whether it is sparse or not. A number is said to be a sparse number if in the binary representation of the number no two or more consecutive bits are set.***
***Input:***\
The first line of input contains an integer T denoting the number of test cases. The first line of each test case is number 'N'.
***Output:***\
Print '1' if the number is sparse and '0' if the number is not sparse.
***Constraints:***\
1 <= T <= 100\
1 <= N <= 103
***Example:***\
***Input:***\
2\
2\
3
***Output:***\
1\
0
***Explanation:***\
***Testcase 1:*** Binary Representation of 2 is 10, which is not having consecutive set bits. So, it is sparse number.\
***Testcase 2:*** Binary Representation of 3 is 11, which is having consecutive set bits in it. So, it is not a sparse number.
```
for _ in range(int(input())):
a=bin(int(input()))[2:]
if a.count('11')>0:
print(0)
else:
print(1)
```
### [Gray Code](https://practice.geeksforgeeks.org/problems/gray-code/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Basic
- Marks: 1
***You are given a decimal number n. You need to find the gray code of the number n and convert it into decimal.
To see how it's done, refer here.***
***Input:***\
The first line contains an integer T, the number of test cases. For each test case, there is an integer n denoting the number
***Output:***\
For each test case, the output is gray code equivalent of n.
***Constraints:***\
1 <= T <= 100\
0 <= n <= 108
***Example:***\
***Input***\
2\
7\
10
***Output***\
4\
15
***Explanation:***\
***Testcase1:*** 7 is represented as 111 in binary form. The gray code of 111 is 100, in the binary form whose decimal equivalent is 4.
***Testcase2:*** 10 is represented as 1010 in binary form. The gray code of 1010 is 1111, in the binary form whose decimal equivalent is 15.
```
for _ in range(int(input())):
a=bin(int(input()))[2:]
c=a[0]
for i in range(1,len(a)):
k=(int(a[i])+int(a[i-1]))
if k==0 or k==1:
c+=str(k)
else:
c+='0'
print(int(c,2))
```
### [Gray to Binary equivalent](https://practice.geeksforgeeks.org/problems/gray-to-binary-equivalent/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Basic
- Marks: 1
***Given N in Gray code equivalent. Find its binary equivalent.***
***Input:***\
The first line contains an integer T, number of test cases. For each test cases, there is an integer N denoting the number in gray equivalent.
***Output:***\
For each test case, in a new line, the output is the decimal equivalent number N of binary form.
***Constraints:***\
1 <= T <= 100\
0 <= n <= 108
***Example:***\
***Input***\
2\
4\
15
***Output***\
7\
10
***Explanation:***\
***Testcase1.*** 4 is represented as 100 and its binary equivalent is 111 whose decimal equivalent is 7.\
***Testcase2.*** 15 is represented as 1111 and its binary equivalent is 1010 i.e. 10 in decimal.
```
for _ in range(int(input())):
a=bin(int(input()))[2:]
c=a[0]
for i in range(1,len(a)):
k=(int(a[i])+int(c[i-1]))
if k==0 or k==1:
c+=str(k)
else:
c+='0'
print(int(c,2))
```
### [Check if a Integer is power of 8 or not](https://practice.geeksforgeeks.org/problems/check-if-a-integer-is-power-of-8-or-not/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Easy
- Marks: 2
***Given a positive integer N, The task is to find if it is a power of eight or not.***
***Input:***\
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case contains an integer N.
***Output:***\
In new line print "Yes" if it is a power of 8, else print "No".
***Constraints:***\
1<=T<=100\
1<=N<=1018
***Example:***\
***Input:***\
2\
64\
75
***Output:***\
Yes\
No
```
for _ in range(int(input())):
n=int(input())
i=1
while 8**i<=n:
i+=1
if 8**(i-1)==n:
print('Yes')
else:
print('No')
```
### [Is Binary Number Multiple of 3](https://practice.geeksforgeeks.org/problems/is-binary-number-multiple-of-3/0/?track=sp-bit-magic&batchId=152)
- Company Tags : Adobe, Amazon, Microsoft
- Difficulty: Medium
- Marks: 4
***Given a binary number, write a program that prints 1 if given binary number is a multiple of 3. Else prints 0. The given number can be big upto 2^100. It is recommended to finish the task using one traversal of input binary string.***
***Input:***\
The first line contains T denoting the number of testcases. Then follows description of testcases.
Each case contains a string containing 0's and 1's.
***Output:***\
For each test case, output a 1 if string is multiple of 3, else 0.
***Constraints:***\
1<=T<=100\
1<=Length of Input String<=100
***Example:***\
***Input:***\
2\
011\
100
***Output:***\
1\
0
```
for _ in range(int(input())):
n=int(input(),2)
if n%3==0:
print(1)
else:
print(0)
```
### [Reverse Bits](https://practice.geeksforgeeks.org/problems/reverse-bits/0/?track=sp-bit-magic&batchId=152)
- Company Tags : Amazon, Cisco, HCL, Nvidia, Qualcomm
- Difficulty: Easy
- Marks: 2
***Given a 32 bit number x, reverse its binary form and print the answer in decimal.***
***Input:***\
The first line of input consists T denoting the number of test cases. T testcases follow. Each test case contains a single 32 bit integer
***Output:***\
For each test case, in a new line, print the reverse of integer.
***Constraints:***\
1 <= T <= 100\
0 <= x <= 4294967295
***Example:***\
***Input:***\
2\
1\
5
***Output:***\
2147483648\
2684354560
***Explanation:***\
***Testcase1:***\
00000000000000000000000000000001 =1\
10000000000000000000000000000000 =2147483648
```
for _ in range(int(input())):
a=bin(int(input()))[2:][::-1]
a+='0'*(32-len(a))
print(int(a,2))
```
### [Swap all odd and even bits](https://practice.geeksforgeeks.org/problems/swap-all-odd-and-even-bits/0/?track=sp-bit-magic&batchId=152)
- Difficulty: Easy
- Marks: 2
***Given an unsigned integer N. The task is to swap all odd bits with even bits. For example, if the given number is 23 (00010111), it should be converted to 43(00101011). Here, every even position bit is swapped with adjacent bit on right side(even position bits are highlighted in binary representation of 23), and every odd position bit is swapped with adjacent on left side.***
***Input:***\
The first line of input contains T, denoting the number of testcases. Each testcase contains single line.
***Output:***\
For each testcase in new line, print the converted number.
***Constraints:***\
1 ≤ T ≤ 100\
1 ≤ N ≤ 100
***Example:***\
***Input:***\
2\
23\
2
***Output:***\
43\
1
***Explanation:***\
***Testcase 1:*** BInary representation of the given number; 00010111 after swapping 00101011.
```
for _ in range(int(input())):
a=bin(int(input()))[2:]
if len(a)%4!=0:
a='0'*(4-len(a)%4)+a
s=''
for i,j in zip(a[1::2],a[::2]):
s=s+i+j
print(int(s,2))
def f(a,c):
a=bin(a)[2:]
c=bin(c)[2:]
an=len(a)
cn=len(c)
if an!=cn:
if (an-cn)>0:
c='0'*(an-cn)+c
else:
a='0'*(cn-an)+a
count=0
for i,j in zip(a,c):
if i !=j:
count+=1
return count
for _ in range(int(input())):
count=0
n=int(input())
a=list(map(int,input().split()))
for i in a:
for j in a:
count+=f(i,j)
print(count)
if __name__ == '__main__':
n = int(input())
while n != 0:
p = int(input())
lis = [int(x) for x in input().split()]
bits = 0
for i in range(0, 32):
k = 0
for j in range(0, len(lis)):
if lis[j] & (1 << i):
k = k + 1
bits += k * (len(lis) - k)
print(2 * bits % 1000000007)
n = n-1
```
### [Bleak Numbers](https://practice.geeksforgeeks.org/problems/bleak-numbers/0/?track=sp-bit-magic&batchId=152)
- Company Tags : SAP Labs
- Difficulty: Medium
- Marks: 4
***Given an integer, check whether it is Bleak or not.***
***A number ‘n’ is called Bleak if it cannot be represented as sum of a positive number x and set bit count in x, i.e., x + [countSetBits(x)](http://www.geeksforgeeks.org/count-set-bits-in-an-integer/) is not equal to n for any non-negative number x.***
***Examples :***
3 is not Bleak as it can be represented
as 2 + countSetBits(2).
4 is t Bleak as it cannot be represented
as sum of a number x and countSetBits(x)
for any number x.
***Input:***\
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of a single line. The first line of each test case contains a single integer N to be checked for Bleak.
***Output:***\
Print "1" or "0" (without quotes) depending on whether the number is Bleak or not.
***Constraints:***\
1 <= T <= 1000\
1 <= N <= 10000
***Example:***\
***Input:***\
3\
4\
167\
3
***Output:***\
1\
0\
0
```
for _ in range(int(input())):
n=int(input())
for i in range(0,n+1,2):
if (i+bin(i).count('1'))==n:
print(0)
break
else:
print(1)
a
a[1::2]
''+'2'
a=bin(-2)
a
int('1b10',2)
a=list(map(int,input().split()))
xor=0
for i in range(len(a)):
for j in range(i+1,len(a)):
if a[i]^a[j]>xor:
xor=a[i]^a[j]
print(xor)
a[::2]
32-len(a)
a=bin(52)[2:]
a
k=0
a[(len(a)-1)-k]
```
|
github_jupyter
|
# Generative models - variational auto-encoders
### Author: Philippe Esling ([email protected])
In this course we will cover
1. A [quick recap](#recap) on simple probability concepts (and in TensorFlow)
2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs)
3. An explanation of the [implementation](#implem) of VAEs
4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)**
<a id="recap"> </a>
## Quick recap on probability
The field of probability aims to model random or uncertain events. Hence, a random variable $X$ denotes a quantity that is uncertain, such as the result of an experiment (flipping a coin) or the measurement of an uncertain property (measuring the temperature). If we observe several occurrences of the variable $\{\mathbf{x}_{i}\}_{i=1}$, it might take different values on each occasion, but some values may occur more often than others. This information is captured by the _probability distribution_ $p(\mathbf{x})$ of the random variable.
To understand these concepts graphically, we will rely on the `Tensorflow Probability` package.
```
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
```
### Probability distributions
#### Discrete distributions
Let $\mathbf{x}$ be a discrete random variable with range $R_{X}=\{x_1,\cdots,x_n\}$ (finite or countably infinite). The function
\begin{equation}
p_{X}(x_{i})=p(X=x_{i}), \forall i\in\{1,\cdots,n\}
\end{equation}
is called the probability mass function (PMF) of $X$.
Hence, the PMF defines the probabilities of all possible values for a random variable. The above notation allows to express that the PMF is defined for the random variable $X$, so that $p_{X}(1)$ gives the probability that $X=1$. For discrete random variables, the PMF is also called the \textit{probability distribution}. The PMF is a probability measure, therefore it satisfies all the corresponding properties
- $0 \leq p_{X}(x_i) < 1, \forall x_i$
- $\sum_{x_i\in R_{X}} p_{X}(x_i) = 1$
- $\forall A \subset R_{X}, p(X \in A)=\sum_{x_a \in A}p_{X}(x_a)$
A very simple example of discrete distribution is the `Bernoulli` distribution. With this distribution, we can model a coin flip. If we throw the coin a very large number of times, we hope to see on average an equal amount of _heads_ and _tails_.
```
bernoulli = tfp.distributions.Bernoulli(probs=0.5)
samples = bernoulli.sample(10000)
sns.distplot(samples)
plt.title("Samples from a Bernoulli (coin toss)")
plt.show()
```
However, we can also _sample_ from the distribution to have individual values of a single throw. In that case, we obtain a series of separate events that _follow_ the distribution
```
vals = ['heads', 'tails']
samples = bernoulli.sample(10)
for s in samples:
print('Coin is tossed on ' + vals[s])
```
#### Continuous distributions
The same ideas apply to _continuous_ random variables, which can model for instance the height of human beings. If we try to guess the height of someone that we do not know, there is a higher probability that this person will be around 1m70, instead of 20cm or 3m. For the rest of this course, we will use the shorthand notation $p(\mathbf{x})$ for the distribution $p(\mathbf{x}=x_{i})$, which expresses for a real-valued random variable $\mathbf{x}$, evaluated at $x_{i}$, the probability that $\mathbf{x}$ takes the value $x_i$.
One notorious example of such distributions is the Gaussian (or Normal) distribution, which is defined as
\begin{equation}
p(x)=\mathcal{N}(\mu,\sigma)=\frac{1}{\sqrt{2\pi\sigma^{2}}}e^{-\frac{(x-\mu)^{2}}{2\sigma^{2}}}
\end{equation}
Similarly as before, we can observe the behavior of this distribution with the following code
```
normal = tfp.distributions.Normal(loc=0., scale=1.)
samples = normal.sample(10000)
sns.distplot(samples)
plt.title("Samples from a standard Normal")
plt.show()
```
### Comparing distributions (KL divergence)
$
\newcommand{\R}{\mathbb{R}}
\newcommand{\bb}[1]{\mathbf{#1}}
\newcommand{\bx}{\bb{x}}
\newcommand{\by}{\bb{y}}
\newcommand{\bz}{\bb{z}}
\newcommand{\KL}[2]{\mathcal{D}_{\text{KL}}\left[#1 \| #2\right]}$
Originally defined in the field of information theory, the _Kullback-Leibler (KL) divergence_ (usually noted $\KL{p(\bx)}{q(\bx)}$) is a dissimilarity measure between two probability distributions $p(\bx)$ and $q(\bx)$. In the view of information theory, it can be understood as the cost in number of bits necessary for coding samples from $p(\bx)$ by using a code optimized for $q(\bx)$ rather than the code optimized for $p(\bx)$. In the view of probability theory, it represents the amount of information lost when we use $q(\bx)$ to approximate the true distribution $p(\bx)$. %that explicit the cost incurred if events were generated by $p(\bx)$ but charged under $q(\bx)$
Given two probability distributions $p(\bx)$ and $q(\bx)$, the Kullback-Leibler divergence of $q(\bx)$ _from_ $p(\bx)$ is defined to be
\begin{equation}
\KL{p(\bx)}{q(\bx)}=\int_{\R} p(\bx) \log \frac{p(\bx)}{q(\bx)}d\bx
\end{equation}
Note that this dissimilarity measure is \textit{asymmetric}, therefore, we have
\begin{equation}
\KL{p(\bx)}{q(\bx)}\neq \KL{q(\bx)}{p(\bx)}
\end{equation}
This asymmetry also describes an interesting behavior of the KL divergence, depending on the order to which it is evaluated. The KL divergence can either be a _mode-seeking_ or _mode-coverage} measure.
<a id="vae"></a>
## Variational auto-encoders
As we have seen in the previous AE course, VAEs are also a form generative models. However, they are defined from a more sound probabilistic perspective. to find the underlying probability distribution of the data $p(\mathbf{x})$ based on a set of examples in $\mathbf{x}\in\mathbb{R}^{d_{x}}$. To do so, we consider *latent variables* defined in a lower-dimensional space $\mathbf{z}\in\mathbb{R}^{d_{z}}$ ($d_{z} \ll d_{x}$) with the joint probability distribution $p(\mathbf{x}, \mathbf{z}) = p(\mathbf{x} \vert \mathbf{z})p(\mathbf{z})$. Unfortunately, for complex distributions this integral is too complex and cannot be found in closed form.
### Variational inference
The idea of *variational inference* (VI) allows to solve this problem through *optimization* by assuming a simpler approximate distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})\in\mathcal{Q}$ from a family $\mathcal{Q}$ of approximate densities. Hence, the goal is to minimize the difference between this approximation and the real distribution. Therefore, this turns into the optimization problem of minimizing the Kullback-Leibler (KL) divergence between the parametric approximation and the original density
$$
q_{\phi}^{*}(\mathbf{z}\vert \mathbf{x})=\text{argmin}_{q_{\phi}(\mathbf{z} \vert \mathbf{x})\in\mathcal{Q}} \mathcal{D}_{KL} \big[ q_{\phi}\left(\mathbf{z} \vert \mathbf{x}\right) \parallel p\left(\mathbf{z} \vert \mathbf{x}\right) \big]
\tag{2}
$$
By developing this KL divergence and re-arranging terms (the detailed development can be found in [3](#reference1)), we obtain
$$
\log{p(\mathbf{x})} - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z} \vert \mathbf{x}) \big] =
\mathbb{E}_{\mathbf{z}} \big[ \log{p(\mathbf{x} \vert \mathbf{z})}\big] - D_{KL} \big[ q_{\phi}(\mathbf{z} \vert \mathbf{x}) \parallel p(\mathbf{z}) \big]
\tag{3}
$$
This formulation describes the quantity we want to maximize $\log p(\mathbf{x})$ minus the error we make by using an approximate $q$ instead of $p$. Therefore, we can optimize this alternative objective, called the *evidence lower bound* (ELBO)
$$
\begin{equation}
\mathcal{L}_{\theta, \phi} = \mathbb{E} \big[ \log{ p_\theta (\mathbf{x|z}) } \big] - \beta \cdot D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel p_\theta(\mathbf{z}) \big]
\end{equation}
\tag{4}
$$
We can see that this equation involves $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ which *encodes* the data $\mathbf{x}$ into the latent representation $\mathbf{z}$ and a *decoder* $p(\mathbf{x} \vert \mathbf{z})$, which allows generating a data vector $\mathbf{x}$ given a latent configuration $\mathbf{z}$. Hence, this structure defines the *Variational Auto-Encoder* (VAE).
The VAE objective can be interpreted intuitively. The first term increases the likelihood of the data generated given a configuration of the latent, which amounts to minimize the *reconstruction error*. The second term represents the error made by using a simpler posterior distribution $q_{\phi}(\mathbf{z} \vert \mathbf{x})$ compared to the true prior $p_{\theta}(\mathbf{z})$. Therefore, this *regularizes* the choice of approximation $q$ so that it remains close to the true posterior distribution [3].
### Reparametrization trick
Now, while this formulation has some very interesting properties, it involves sampling operations, where we need to draw the latent point $\mathbf{z}$ from the distribution $q_{\phi}(\mathbf{z}\vert\mathbf{x})$. The simplest choice for this variational approximate posterior is a multivariate Gaussian with a diagonal covariance structure (which leads to independent Gaussians on every dimension, called the *mean-field* family) so that
$$
\text{log}q_\phi(\mathbf{z}\vert\mathbf{x}) = \text{log}\mathcal{N}(\mathbf{z};\mathbf{\mu}^{(i)},\mathbf{\sigma}^{(i)})
\tag{5}
$$
where the mean $\mathbf{\mu}^{(i)}$ and standard deviation $\mathbf{\sigma}^{(i)}$ of the approximate posterior are different for each input point and are produced by our encoder parametrized by its variational parameters $\phi$. Now the KL divergence between this distribution and a simple prior $\mathcal{N}(\mathbf{0}, \mathbf{I})$ can be very simply obtained with
$$
D_{KL} \big[ q_\phi(\mathbf{z|x}) \parallel \mathcal{N}(\mathbf{0}, \mathbf{I}) \big] = \frac{1}{2}\sum_{j=1}^{D}\left(1+\text{log}((\sigma^{(i)}_j)^2)+(\mu^{(i)}_j)^2+(\sigma^{(i)}_j)^2\right)
\tag{6}
$$
While this looks convenient, we will still have to perform gradient descent through a sampling operation, which is non-differentiable. To solve this issue, we can use the *reparametrization trick*, which takes the sampling operation outside of the gradient flow by considering $\mathbf{z}^{(i)}=\mathbf{\mu}^{(i)}+\mathbf{\sigma}^{(i)}\odot\mathbf{\epsilon}^{(l)}$ with $\mathbf{\epsilon}^{(l)}\sim\mathcal{N}(\mathbf{0}, \mathbf{I})$
<a id="implem"> </a>
## VAE implementation
As we have seen, VAEs can be simply implemented by decomposing the above series of operations into an `encoder` which represents the distribution $q_\phi(\mathbf{z}\vert\mathbf{x})$, from which we will sample some values $\tilde{\mathbf{z}}$ (using the reparametrization trick) and compute the Kullback-Leibler (KL) divergence. Then, we use these values as input to a `decoder` which represents the distribution $p_\theta(\mathbf{x}\vert\mathbf{z})$ so that we can produce a reconstruction $\tilde{\mathbf{x}}$ and compute the reconstruction error.
Therefore, we can define the VAE based on our previous implementation of the AE that we recall here
```
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Model
class AE(Model):
def __init__(self, encoder, decoder, encoding_dim):
super(AE, self).__init__()
self.encoding_dim = encoding_dim
self.encoder = encoder
self.decoder = decoder
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
```
In order to move to a probabilistic version, we need to add the latent space sampling mechanism, and change the behavior of our `call` function. This process is implemented in the following `VAE` class.
Note that we purposedly rely on an implementation of the `encode` function where the `encoder` first produces an intermediate representation of size `encoder_dims`. Then, this representation goes through two separate functions for encoding $\mathbf{\mu}$ and $\mathbf{\sigma}$. This provides a clearer implementation but also the added bonus that we can ensure that $\mathbf{\sigma} > 0$
```
class VAE(AE):
def __init__(self, encoder, decoder, encoding_dims, latent_dims):
super(VAE, self).__init__(encoder, decoder, encoding_dims)
self.latent_dims = latent_dims
self.mu = layers.Dense(self.latent_dims, activation='relu')
self.sigma = layers.Dense(self.latent_dims, activation='softplus')
def encode(self, x):
x = self.encoder(x)
mu = self.mu(x)
sigma = self.sigma(x)
return mu, sigma
def decode(self, z):
return self.decoder(z)
def call(self, x):
# Encode the inputs
z_params = self.encode(x)
# Obtain latent samples and latent loss
z_tilde, kl_div = self.latent(x, z_params)
# Decode the samples
x_tilde = self.decode(z_tilde)
return x_tilde, kl_div
def latent(self, x, z_params):
n_batch = x.shape[0]
# Retrieve mean and var
mu, sigma = z_params
# Re-parametrize
q = tfp.distributions.Normal(np.zeros(mu.shape[1]), np.ones(sigma.shape[1]))
z = (sigma * tf.cast(q.sample(n_batch), 'float32')) + mu
# Compute KL divergence
kl_div = -0.5 * tf.reduce_sum(1 + sigma - tf.pow(mu, 2) - tf.exp(sigma))
kl_div = kl_div / n_batch
return z, kl_div
```
Now the interesting aspect of VAEs is that we can define any parametric function as `encoder` and `decoder`, as long as we can optimize them. Here, we will rely on simple feed-forward neural networks, but these can be largely more complex (with limitations that we will discuss later in the tutorial).
```
def construct_encoder_decoder(nin, n_latent = 16, n_hidden = 512, n_classes = 1):
# Encoder network
encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
])
# Decoder network
decoder = tf.keras.Sequential([
layers.Dense(n_hidden, activation='relu'),
layers.Dense(n_hidden, activation='relu'),
layers.Dense(nin * n_classes, activation='sigmoid'),
layers.Reshape((28, 28))
])
return encoder, decoder
```
### Evaluating the error
In the definition of the `VAE` class, we directly included the computation of the $D_{KL}$ term to regularize our latent space. However, remember that the complete loss of equation (4) also contains a *reconstruction loss* which compares our reconstructed output to the original data.
While there are several options to compare the error between two elements, there are usually two preferred choices among the generative literature depending on how we consider our problem
1. If we consider each dimension (pixel) to be a binary unit (following a Bernoulli distribution), we can rely on the `binary cross entropy` between the two distributions
2. If we turn our problem to a set of classifications, where each dimension can belong to a given set of *intensity classes*, then we can compute the `multinomial loss` between the two distributions
In the following, we define both error functions and regroup them in the `reconstruction_loss` call (depending on the `num_classes` considered). However, as the `multinomial loss` requires a large computational overhead, and for the sake of simplicity, we will train all our first models by relying on the `binary cross entropy`
```
optimizer = tf.keras.optimizers.Adam(1e-4)
def compute_loss(model, x):
x_tilde, kl_div = model(x)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_tilde, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2])
return -tf.reduce_mean(logpx_z + kl_div)
@tf.function
def train_step(model, x, optimizer):
"""Executes one training step and returns the loss."""
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
```
### Optimizing a VAE on a real dataset
For this tutorial, we are going to take a quick shot at a real-life problem by trying to train our VAEs on the `FashionMNIST` dataset. This dataset can be natively used in PyTorch by relying on the `torchvision.datasets` classes as follows
```
# Load (and eventually download) the dataset
(x_train, _), (x_test, _) = fashion_mnist.load_data()
# Normalize the dataset in the [0, 1] range]
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
```
The `FashionMNIST` dataset is composed of simple 28x28 black and white images of different items of clothings (such as shoes, bags, pants and shirts). We put a simple function here to display one batch of the test set (note that we keep a fixed batch from the test set in order to evaluate the different variations that we will try in this tutorial).
```
def plot_batch(batch, nslices=8):
# Create one big image for plot
img = np.zeros(((batch.shape[1] + 1) * nslices, (batch.shape[2] + 1) * nslices))
for b in range(batch.shape[0]):
row = int(b / nslices); col = int(b % nslices)
r_p = row * batch.shape[1] + row; c_p = col * batch.shape[2] + col
img[r_p:(r_p+batch.shape[1]),c_p:(c_p+batch.shape[2])] = batch[b]
im = plt.imshow(img, cmap='Greys', interpolation='nearest'),
return im
# Select a random set of fixed data
fixed_batch = x_test[:64]
print(x_test.shape)
plt.figure(figsize=(10, 10))
plot_batch(fixed_batch);
```
Now based on our proposed implementation, the optimization aspects are defined in a very usual way
```
# Using Bernoulli or Multinomial loss
num_classes = 1
# Number of hidden and latent
n_hidden = 512
n_latent = 2
# Compute input dimensionality
nin = fixed_batch.shape[1] * fixed_batch.shape[2]
# Construct encoder and decoder
encoder, decoder = construct_encoder_decoder(nin, n_hidden = n_hidden, n_latent = n_latent, n_classes = num_classes)
# Build the VAE model
model = VAE(encoder, decoder, n_hidden, n_latent)
```
Now all that is left to do is train the model. We define here a `train_vae` function that we will reuse along the future implementations and variations of VAEs and flows. Note that this function is set to run for only a very few number of `epochs` and also most importantly, *only considers a subsample of the full dataset at each epoch*. This option is just here so that you can test the different models very quickly on any CPU or laptop.
```
def generate_and_save_images(model, epoch, test_sample):
predictions, _ = model(test_sample)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(predictions[i, :, :], cmap='gray')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
epochs=50
test_sample = x_test[0:16, :, :]
for epoch in range(1, epochs + 1):
for train_x in x_train:
train_step(model, tf.expand_dims(train_x, axis=0), optimizer)
loss = tf.keras.metrics.Mean()
for test_x in x_test:
loss(compute_loss(model, tf.expand_dims(test_x, axis=0)))
elbo = -loss.result()
print('Epoch: {}, Test set ELBO: {}'.format(epoch, elbo))
generate_and_save_images(model, epoch, test_sample)
```
### Evaluating generative models
In order to evaluate our upcoming generative models, we will rely on the computation of the Negative Log-Likelihood. This code for the following `evaluate_nll_bpd` is inspired by the [Sylvester flow repository](https://github.com/riannevdberg/sylvester-flows)
```
from scipy.special import logsumexp
def evaluate_nll_bpd(data_loader, model, batch = 500, R = 5):
# Set of likelihood tests
likelihood_test = []
# Go through dataset
for batch_idx, (x, _) in enumerate(data_loader):
for j in range(x.shape[0]):
a = []
for r in range(0, R):
cur_x = x[j].unsqueeze(0)
# Repeat it as batch
x = cur_x.expand(batch, *cur_x.size()[1:]).contiguous()
x = x.view(batch, -1)
x_tilde, kl_div = model(x)
rec = reconstruction_loss(x_tilde, x, average=False)
a_tmp = (rec + kl_div)
a.append(- a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
likelihood_test = np.array(likelihood_test)
nll = - np.mean(likelihood_test)
# Compute the bits per dim (but irrelevant for binary data)
bpd = nll / (np.prod(nin) * np.log(2.))
return nll, bpd
```
Now we can evaluate our VAE model more formally as follows.
```
# Plot final loss
plt.figure()
plt.plot(losses_kld[:, 0].numpy());
# Evaluate log-likelihood and bits per dim
nll, _ = evaluate_nll_bpd(test_loader, model)
print('Negative Log-Likelihood : ' + str(nll))
```
### Limitations of VAEs - (**exercise**)
Although VAEs are extremely powerful tools, they still have some limitations. Here we list the three most important and known limitations (all of them are still debated and topics of active research).
1. **Blurry reconstructions.** As can be witnessed directly in the results of the previous vanilla VAE implementation, the reconstructions appear to be blurry. The precise origin of this phenomenon is still debated, but the proposed explanation are
1. The use of the KL regularization
2. High variance regions of the latent space
3. The reconstruction criterion (expectation)
4. The use of simplistic latent distributions
2. **Posterior collapse.** The previous *blurry reconstructions* issue can be mitigated by using a more powerful decoder. However, relying on a decoder with a large capacity causes the phenomenon of *posterior collapse* where the latent space becomes useless. A nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/)
3. **Simplistic Gaussian approximation**. In the derivation of the VAE objective, recall that the KL divergence term needs to be computed analytically. Therefore, this forces us to rely on quite simplistic families. However, the Gaussian family might be too simplistic to model real world data
In the present tutorial, we show how normalizing flows can be used to mostly solve the third limitation, while also adressing the two first problems. Indeed, we will see that normalizing flows also lead to sharper reconstructions and also act on preventing posterior collapse
<a id="improve"></a>
## Improving the quality of VAEs
As we discussed in the previous section, several known issues have been reported when using the vanilla VAE implementation. We listed some of the major issues as being
1. **Blurry reconstructions.**
2. **Posterior collapse.**
3. **Simplistic Gaussian approximation**.
Here, we discuss some recent developments that were proposed in the VAE literature and simple adjustments that can be made to (at least partly) alleviate these issues. However, note that some more advanced proposals such as PixelVAE [5](#reference1) and VQ-VAE [6](#reference1) can lead to wider increases in quality
### Reducing the bluriness of reconstructions
In this tutorial, we relied on extremely simple decoder functions, to show how we could easily define VAEs and normalizing flows together. However, the capacity of the decoder obviously directly influences the quality of the final reconstruction. Therefore, we could address this issue naively by using deep networks and of course convolutional layers as we are currently dealing with images.
First you need to construct a more complex encoder and decoder
```
def construct_encoder_decoder_complex(nin, n_latent = 16, n_hidden = 512, n_params = 0, n_classes = 1):
# Encoder network
encoder = ...
# Decoder network
decoder = ...
return encoder, decoder
```
### Preventing posterior collapse with Wasserstein-VAE-MMD (InfoVAE)
As we discussed earlier, the reason behind posterior collapse mostly relates to the KL divergence criterion (a nice intuitive explanation can be found [here](https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/). This can be mitigated by relying on a different criterion, such as regularizing the latent distribution by using the *Maximum Mean Discrepancy* (MMD) instead of the KL divergence. This model was independently proposed as the *InfoVAE* and later also as the *Wasserstein-VAE*.
Here we provide a simple implementation of the `InfoVAEMMD` class based on our previous implementations.
```
def compute_kernel(x, y):
return ...
def compute_mmd(x, y):
return ...
class InfoVAEMMD(VAE):
def __init__(self, encoder, decoder):
super(InfoVAEMMD, self).__init__(encoder, decoder)
def latent(self, x, z_params):
return ...
```
### Putting it all together
Here we combine all these ideas (except for the MMD, which is not adequate as the flow definition already regularizes the latent space without the KL divergence) to perform a more advanced optimization of the dataset. Hence, we will rely on the complex encoder and decoder with gated convolutions, the multinomial loss and the normalizing flows in order to improve the overall quality of our reconstructions.
```
# Size of latent space
n_latent = 16
# Number of hidden units
n_hidden = 256
# Rely on Bernoulli or multinomial
num_classes = 128
# Construct encoder and decoder
encoder, decoder = ...
# Create VAE or (InfoVAEMMD - WAE) model
model_flow_p = ...
# Create optimizer algorithm
optimizer = ...
# Add learning rate scheduler
scheduler = ...
# Launch our optimization
losses_flow_param = ...
```
*NB*: It seems that the multinomial version have a hard time converging. Although I only let this run for 200 epochs and only for a subsampling of 5000 examples, it might need more time, but this might also come from a mistake somewhere in my code ... If you spot something odd please let me know :)
### References
<a id="reference1"></a>
[1] Rezende, Danilo Jimenez, and Shakir Mohamed. "Variational inference with normalizing flows." _arXiv preprint arXiv:1505.05770_ (2015). [link](http://arxiv.org/pdf/1505.05770)
[2] Kingma, Diederik P., Tim Salimans, and Max Welling. "Improving Variational Inference with Inverse Autoregressive Flow." _arXiv preprint arXiv:1606.04934_ (2016). [link](https://arxiv.org/abs/1606.04934)
[3] Kingma, D. P., & Welling, M. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114. (2013). [link](https://arxiv.org/pdf/1312.6114)
[4] Rezende, D. J., Mohamed, S., & Wierstra, D. Stochastic backpropagation and approximate inference in deep generative models. arXiv preprint arXiv:1401.4082. (2014). [link](https://arxiv.org/pdf/1401.4082)
[5] Gulrajani, I., Kumar, K., Ahmed, F., Taiga, A. A., Visin, F., Vazquez, D., & Courville, A. (2016). Pixelvae: A latent variable model for natural images. arXiv preprint arXiv:1611.05013. [link](https://arxiv.org/pdf/1611.05013)
[6] Van den Oord, A., & Vinyals, O. (2017). Neural discrete representation learning. In NIPS 2017 (pp. 6306-6315). [link](http://papers.nips.cc/paper/7210-neural-discrete-representation-learning.pdf)
### Inspirations and resources
https://blog.evjang.com/2018/01/nf1.html
https://github.com/ex4sperans/variational-inference-with-normalizing-flows
https://akosiorek.github.io/ml/2018/04/03/norm_flows.html
https://github.com/abdulfatir/normalizing-flows
https://github.com/riannevdberg/sylvester-flows
|
github_jupyter
|
```
!wget https://datahack-prod.s3.amazonaws.com/train_file/train_LZdllcl.csv -O train.csv
!wget https://datahack-prod.s3.amazonaws.com/test_file/test_2umaH9m.csv -O test.csv
!wget https://datahack-prod.s3.amazonaws.com/sample_submission/sample_submission_M0L0uXE.csv -O sample_submission.csv
# Import the required packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# Read the train and test data
train=pd.read_csv("train.csv")
train.drop('employee_id',inplace=True,axis = 1)
test=pd.read_csv("test.csv")
# Check the variables in train data
train.columns
# Print datatype of each variable
train.dtypes
# Dimension of the train dataset
train.shape
# Print the head of train dataset
train.head()
# Unique values in each variable of train dataset
train.nunique()
```
### Univariate Analysis
#### Target Variable
```
train['is_promoted'].value_counts(normalize=True)
# Around 91% trainee have promoted
# Unbalanced dataset
```
#### Categorical Independent Variables
```
plt.figure(1)
plt.subplot(221)
train['department'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Department')
plt.subplot(222)
train['awards_won?'].value_counts(normalize=True).plot.bar(title= 'Awards won')
plt.subplot(223)
train['education'].value_counts(normalize=True).plot.bar(title= 'Education')
plt.subplot(224)
train['gender'].value_counts(normalize=True).plot.bar(title= 'Gender')
plt.show()
# Most of the trainee are enrolled for Y and T program_type.
# More number of trainee enrolment for offline test than online test.
# Most of the test are easy in terms of difficulty level.
train['KPIs_met >80%'].value_counts(normalize=True).plot.bar(title= 'KPI met greater than 80')
plt.figure(1)
plt.subplot(221)
train['region'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Region')
plt.subplot(222)
train['recruitment_channel'].value_counts(normalize=True).plot.bar(title='Recruitment Channels')
plt.subplot(223)
train['no_of_trainings'].value_counts(normalize=True).plot.bar(title= 'No of Trainings')
plt.subplot(224)
train['previous_year_rating'].value_counts(normalize=True).plot.bar(title= 'Previous year ratings')
plt.show()
# More male trainee as compared to female trainee
# Most of the trainee have diploma
# Most of the trainee belongs to tier 3 city
# 10% of the trainee are handicapped
```
#### Numerical Independent Variables
```
sns.distplot(train['age']);
# Most of the trainee are in the age range of 20-30 and 40-50
sns.distplot(train['length_of_service']);
sns.distplot(train['avg_training_score']);
```
### Bivariate Analysis
```
# Correlation between numerical variables
matrix = train.corr()
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(matrix, vmax=.8, square=True, cmap="BuPu");
# Not much correlation between the variables
# program_id vs is_pass
plt.figure(figsize=(12,4))
sns.barplot(train['department'], train['is_promoted'])
plt.figure(figsize=(20,8))
# program_type vs is_pass
sns.barplot(train['region'], train['is_promoted'])
# Trainee in X and Y program type have higher chances to pass the test
# test_type vs is_pass
sns.barplot(train['recruitment_channel'], train['is_promoted'])
# Trainee attending online mode of test have higher chances to pass the test
# difficulty_level vs is_pass
sns.barplot(train['no_of_trainings'], train['is_promoted'])
# If the difficulty level of the test is easy, chances to pass the test are higher
# Gender vs is_pass
sns.barplot(train['previous_year_rating'], train['is_promoted'])
# Gender does not affect the chances to pass the test
# education vs is_pass
plt.figure(figsize=(12,4))
sns.barplot(train['education'], train['is_promoted'])
# Trainee with Masters education level have more chances to pass the test
plt.figure(figsize=(20,8))
# is_handicapped vs is_pass
sns.barplot(train['length_of_service'], train['is_promoted'])
# Handicapped trainee have less chances to pass the test
# city_tier vs is_pass
sns.barplot(train['KPIs_met >80%'], train['is_promoted'])
# Trainee from city tier 1 have higher chances to pass the test
# trainee_engagement_rating vs is_pass
sns.barplot(train['awards_won?'], train['is_promoted'])
# As the trainee engagement rating increases, chances to pass the test also increases
```
### Missing Values Treatment
```
# Check the number of missing values in each variable
train.isnull().sum()
# age and trainee_engagement_rating variables have missing values in it.
test = pd.read_csv('test.csv')
test.drop('employee_id',inplace=True,axis = 1)
test.head()
test['education'].fillna('other',inplace=True)
test['previous_year_rating'].fillna(99,inplace=True)
train['education'].fillna('other',inplace=True)
train['previous_year_rating'].fillna(99,inplace=True)
```
### Logistic Regression
```
train.head()
# Save target variable in separate dataset
X = train.drop('is_promoted',axis=1)
y = train.is_promoted
test.head()
# Apply dummies to the dataset
X=pd.get_dummies(X)
test=pd.get_dummies(test)
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
#same function as xgboost tuning one!
def modelfit(alg, dtrain, predictors, performCV=True, printFeatureImportance=True, cv_folds=5):
#Fit the algorithm on the data
alg.fit(dtrain[predictors],y)
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Perform cross-validation:
if performCV:
cv_score = cross_validation.cross_val_score(alg, dtrain[predictors],y, cv=cv_folds, scoring='f1')
#Print model report:
print("\nModel Report")
print("F1 Score :",metrics.f1_score(y, dtrain_predictions))
if performCV:
print("CV Score : Mean - %.7g | Std - %.7g | Min - %.7g | Max - %.7g" % (np.mean(cv_score),np.std(cv_score),np.min(cv_score),np.max(cv_score)))
#Print Feature Importance:
if printFeatureImportance:
feat_imp = pd.Series(alg.feature_importances_, predictors).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
#Choose all predictors except target & IDcols
predictors = [x for x in X.columns]
gbm0 = GradientBoostingClassifier(random_state=42,verbose = 1)
modelfit(gbm0,X, predictors)
param_test1 = {'n_estimators':np.arange(180,400,20)}
gsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1,verbose = 1, min_samples_split=500,min_samples_leaf=50,max_depth=5,max_features='sqrt',subsample=0.8,random_state=10),
param_grid = param_test1, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose=1)
gsearch1.fit(X,y)
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
#tuning max depth and min samples split
param_test2 = {'max_depth':np.arange(5,10,2),'min_samples_split':np.arange(500,1001,100)}
gsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1,verbose = 1, n_estimators=600, max_features='sqrt', subsample=0.8, random_state=10),
param_grid = param_test2, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose =1)
gsearch2.fit(X,y)
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
#Tuning min_samples_leaf after updating the latest hyperparameter values i.e max_depth and min_samples_split
param_test3 = {'min_samples_leaf':np.arange(50,100,10)}
gsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=600,min_samples_split=600,max_depth=7,max_features='sqrt',verbose = 1, subsample=0.8, random_state=10),
param_grid = param_test3, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose = 1)
gsearch3.fit(X,y)
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
param_test5 = {'subsample':[0.6,0.7,0.75,0.8,0.85,0.9]}
gsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, verbose = 1 , n_estimators=600,max_depth=7,min_samples_split=600, min_samples_leaf=60, subsample=0.8, random_state=10,max_features=7),
param_grid = param_test5, scoring='f1',n_jobs=-1,iid=False, cv=3,verbose = 1)
gsearch5.fit(X,y)
gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_
gbm_tuned_1 = GradientBoostingClassifier(learning_rate=0.1, n_estimators=600,max_depth=7, min_samples_split=600,min_samples_leaf=60, subsample=0.8, random_state=10, max_features=7,verbose=1 )
modelfit(gbm_tuned_1,X,predictors)
pred = gbm_tuned_1.predict(test)
# Read the submission file
submission=pd.read_csv("sample_submission.csv")
submission.head()
# Fill the is_pass variable with the predictions
submission['is_promoted']=pred
submission['is_promoted'] = submission['is_promoted'].astype(np.int64)
submission.head()
submission['is_promoted'].value_counts()
# Converting the submission file to csv format
submission.to_csv('logistic_submission.csv', index=False)
```
score on leaderboard - 0.71145
|
github_jupyter
|
[View in Colaboratory](https://colab.research.google.com/github/3catz/DeepLearning-NLP/blob/master/Time_Series_Forecasting_with_EMD_and_Fully_Convolutional_Neural_Networks_on_the_IRX_data_set.ipynb)
# TIME SERIES FORECASTING -- using Empirical Mode Decomposition with Fully Convolutional Networks for One-step ahead forecasting on the IRX time series.
# Summary:#
A noisy time series is additively decomposed into Intrinsic Mode Functions--oscillating, orthogonal basis functions, using the Empirical Mode Decomposition method pionered by Norden Huang. The IMF components are then used as features for a deep convolutional neural network, which can "learn" the decomposition--divide and conquer--and thereby improve forecasting performance and offer not only forecasting for the series but also the IMF components going into the future. This allows us to focus on forecasting physically significant or interesting IMFs. Note: This is additive, not multiplicative decomposition, which means that you consider the time series to be the sum of various components, rather than the product of various component functions. What it is--or rather, which is the better model--is something you have to explore. It helps to have domain knowledge, though more advanced forms of spectral analysis can also be used to glean insights in this regard.
In this notebook, I demonstrate that using the IMFs a features alongside the original time series can do very well in out-of-sample forecasting, in this case, forecasting 1 step ahead. We used a lookback window of 10 lags from the signal as well as the IMFs to help us predict 1-step ahead in the future. Using the R2 coefficient of determination, we can see that the model can account for over 98% of the variation when applied to an out-of-sample forecast.
# Data#
**IRX opening prices**
IRX is the stock ticker for the [13 Week Treasury Bill](https://finance.yahoo.com/quote/%5EIRX/history/).
I downloaded the data from [Comp-engine.org, a self-organizing database of time series](https://www.comp-engine.org/#!visualize/25c6285e-3872-11e8-8680-0242ac120002) fully accessible to the public.
# Architecture and Process#
1. 4 Conv layers, all from the original input, each with 128 hidden units, filter size of 3, dilation rates exponential powers of 2.
2. Concatenate these 4 layers with the original input--no adding or multiplying, just concatenate on axis = -1.
3. Deconv with hidden units equal to number of IMF-components, in this case 11.
4. Add the predicted IMF-components together to reconstruct the signal, which is your yhat prediction for a step ahead.
5. Compare with ground truth to see how you did.
```
!pip install pyhht
!pip install PeakUtils
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
#import pandas_datareader.data as web
from pandas import Series
from pandas import DataFrame
from pandas import concat
import matplotlib.pyplot as plt
import os
from scipy.integrate import odeint
#keras
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import *
from keras import backend as K
from keras.engine.topology import Layer
import peakutils
#!pip install pyramid-arima
#from pyramid.arima import auto_arima
```
# Utilities: series to supervised
```
def series_to_supervised(data, n_in, n_out, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
```
# Loading Data
```
from google.colab import files
files.upload()
import numpy as np
data = np.fromfile("yourfilehere.dat", sep = "\n")
print(data)
len(data)
import numpy as np
data = np.genfromtxt("FLYahooop_IRX.csv", delimiter = ","); data = np.asarray(data); data.shape
#Plot of Time Series
from scipy.interpolate import interp1d
plt.figure(figsize=(20,6))
plt.plot(data)
plt.tight_layout()
plt.xlim([0,len(data)])
plt.show()
#Scale the Data to -1,1
scaler = MinMaxScaler(feature_range = (-1,1))
scaled_data = scaler.fit_transform(data.reshape(-1,1))
scaled_data.shape
scaled_data = np.squeeze(scaled_data)
scaled_data.shape
scaled_data = np.transpose(scaled_data)
# before you do the EMD, cut out the out of sample part so that the EMDs are not constructed with those future values and information contained within them
in_sample = scaled_data[:-1000]; out_sample = scaled_data[-1000:]
print(in_sample.shape)
```
#Empirical Mode Decomposition
From Yang et. al (2015), a summary:
**Empirical mode decomposition (EMD)** technique to decompose the nonstationary signal into a series of intrinsic mode functions (IMFs) [9–11]. This ability makes HHT competitive in processing various composite signals [12–14]. With HHT, complex signals can be decomposed into multiple single-frequency signals that can further be processed by intrinsic mode function of EMD. *After the nonstationary signals have been decomposed into IMFs through EMD, these signals can easily be obtained by Hilbert transform of each mode function*. By doing so, researchers can obtain the instantaneous frequency and amplitude of each IMF. With the Hilbert spectrum and Hilbert marginal spectrum of IMFs, people can accurately get the joint distribution of energy with frequency and time and further predict whether IoT equipment is normal or not. Compared with FFT and VT, HHT is a strong adaptive time frequency analysis method.
```
from pyhht.emd import EMD
from pyhht.visualization import plot_imfs
decomposer1 = EMD(in_sample, maxiter = 10000)
imfs1 = decomposer1.decompose()
print("There are a total of %s IMFs" % len(imfs1))
#Plot the IMFs, from highest frequency to lowest. The last one should be a monotonic trend function. It is known as the residue,
#the irreducible trend left after the detrending of the EMD process.
for i in range(len(imfs1)):
fig, ax = plt.subplots(figsize=(25,2))
fig = plt.plot(imfs1[i])
plt.show()
import numpy as np
import pylab as plt
from scipy.signal import hilbert
#from PyEMD import EMD
def instant_phase(imfs):
"""Extract analytical signal through Hilbert Transform."""
analytic_signal = hilbert(imfs) # Apply Hilbert transform to each row
phase = np.unwrap(np.angle(analytic_signal)) # Compute angle between img and real
return phase
t = np.linspace(0,len(scaled_data),len(scaled_data))
dt = 1
# Extract instantaneous phases and frequencies using Hilbert transform
instant_phases = instant_phase(imfs1)
instant_freqs = np.diff(instant_phases)/(2*np.pi*dt)
# Create a figure consisting of 3 panels which from the top are the input signal, IMFs and instantaneous frequencies
fig, axes = plt.subplots(3, figsize=(20,18))
# The top panel shows the input signal
ax = axes[0]
ax.plot(t, scaled_data)
ax.set_ylabel("Amplitude [arb. u.]")
ax.set_title("Input signal Channel 1")
# The middle panel shows all IMFs
ax = axes[1]
for num, imf in enumerate(imfs1):
ax.plot(t, imf, label='IMF %s' %(num+1))
# Label the figure
ax.legend()
ax.set_ylabel("Amplitude [arb. u.]")
ax.set_title("IMFs")
# The bottom panel shows all instantaneous frequencies
ax = axes[2]
for num, instant_freq in enumerate(instant_freqs):
ax.plot(t[:-1], instant_freq, label='IMF %s'%(num+1))
# Label the figure
ax.legend()
ax.set_xlabel("Time [s]")
ax.set_ylabel("Inst. Freq. [Hz]")
ax.set_title("Huang-Hilbert Transform")
plt.tight_layout()
plt.savefig('hht_example', dpi=120)
plt.show()
```
# Creating Datasets
*Raw Data, using a certian number of lags; most of my experimentation has beeen with either 10 or 20.
```
in_sample = in_sample.reshape(-1,1); print(in_sample.shape)
lookback = 10
data_f = series_to_supervised(in_sample, n_in = lookback, n_out = 1, dropnan = True)
print(data_f.shape)
data_f = np.asarray(data_f)
Xr = data_f[:,:-1]
Y = data_f[:,-1]
print(Xr.shape, Y.shape)
```
# Use the IMFs--which are time series of equal length as the original signal, as features for convolutional/recurrent network.
```
imfs1.shape
imfs1 = np.transpose(imfs1, (1,0)); imfs1.shape
imf_df = series_to_supervised(imfs1, n_in = lookback, n_out = 1, dropnan = True)
imf_df = np.expand_dims(imf_df, axis = 1)
print(imf_df.shape)
imf_df = np.reshape(imf_df, (imf_df.shape[0], (lookback +1), imfs1.shape[-1]))
print(imf_df.shape)
targets = imf_df[:,-1,:]
print(targets.shape)
print(Xr.shape)
#so reshape everything properly
input_data = np.reshape(Xr, (targets.shape[0],1,lookback))
targets = np.reshape(targets,(targets.shape[0],1,targets.shape[1]))
print(input_data.shape, targets.shape)
#test Y values--completely out of sample. The calculation of the IMFs
#was not influenced by these values. No information contamination from future to past.
out_df = series_to_supervised(out_sample.reshape(-1,1), n_in = lookback, n_out = 1, dropnan = True)
print(out_df.shape); out_df = np.asarray(out_df)
testY = out_df[:,-1]
testX = out_df[:,:-1]
testX = np.expand_dims(testX, axis = 1)
print(testX.shape,testY.shape)
```
# Partial autocorrelation
If you were doing SARIMA analysis, you would want to know if this series is autoregressive and to what extent. this helps when calculating a good lag for prediction, that is, how many past values you need to accurately predict a future value.
```
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
fig, axes = plt.subplots(2, figsize=(20,6))
fig1 = plot_acf(scaled_data,lags = 60, ax = axes[0])
fig2 = plot_pacf(scaled_data, lags = 100, ax = axes[1])
plt.show()
```
# Network Architecture and Model fitting
```
from keras.layers.advanced_activations import *
from keras.regularizers import l1, l2
from sklearn.metrics import r2_score
import keras.backend as K
from keras.layers import ConvLSTM2D
from keras.layers import LeakyReLU
np.random.seed(2018) #inputs are (1, 20) and outputs are #(1 time step,17 features)
def convs(x, n, f, rate, bn = False):
x = Conv1D(n, f, padding = "causal", dilation_rate = rate, activation="tanh")(x)
if bn == False:
x = x
else:
x = BatchNormalization()(x)
return x
inputs = Input(shape = (1, lookback))
x = convs(x = inputs, n = 128, f = 3, rate = 2, bn = False)
y = convs(x = inputs, n = 128, f = 3, rate = 4, bn = False)
u = convs(x = inputs, n = 128, f = 3, rate = 8, bn = False)
v = convs(x = inputs, n = 128, f = 3, rate = 16, bn = False)
z = concatenate([inputs, x, y, u, v], axis = -1)
z = Activation("tanh")(z)
z = Dropout(0.3)(z)
predictions = Conv1D(11, 3, padding = "causal", dilation_rate = 1)(z)
model = Model(inputs = inputs, outputs = predictions)
opt = adam(lr = 1e-3, clipnorm = 1.)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor = 0.9, patience = 3, min_lr = 1e-5, verbose = 1)
checkpointer = ModelCheckpoint(filepath = "timeseries_weights.hdf5", verbose = 1, save_best_only = True)
early = EarlyStopping(monitor = 'loss', min_delta = 1e-4, patience = 10, verbose = 1)
model.compile(optimizer=opt, loss='mse', metrics = [])
model.summary()
history = model.fit(input_data, targets,
epochs = 20,
batch_size = 128,
verbose = 1,
#validation_data = (validX, validY),
callbacks = [reduce_lr, early],
shuffle = False)
```
```
preds = model.predict(testX, batch_size = 1)
summed = np.sum(preds, axis = -1); print(summed.shape)
test_preds = summed[:,0]
plt.plot(test_preds)
```
# R2 analysis#
In statistics, the coefficient of determination, denoted R2 or r2 and pronounced "R squared", is the proportion of the variance in the dependent variable that is predictable from the independent variable(s).
It is a statistic used in the context of statistical models whose main purpose is either the prediction of future outcomes or the testing of hypotheses, on the basis of other related information. It provides a measure of how well observed outcomes are replicated by the model, based on the proportion of total variation of outcomes explained by the model.[1][2][3]
```
print("Final R2 Score is: {}".format(r2_score(testY, test_preds)))
fig = plt.figure(figsize = (20,6))
fig = plt.plot(test_preds, label = "PREDICTIONS")
fig = plt.plot(testY, label = "TRUE DATA")
plt.xlim([0,990])
plt.legend()
plt.show()
plt.clf()
plt.cla()
plt.close()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/unicamp-dl/IA025_2022S1/blob/main/ex07/Guilherme_Pereira/Aula_7_Guilherme_Pereira.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
nome = 'Guilherme Pereira'
print(f'Meu nome é {nome}')
```
# Exercício: Modelo de Linguagem (Bengio 2003) - MLP + Embeddings
Neste exercício iremos treinar uma rede neural simples para prever a proxima palavra de um texto, data as palavras anteriores como entrada. Esta tarefa é chamada de "Modelagem da Língua".
Este dataset já possui um tamanho razoável e é bem provável que você vai precisar rodar seus experimentos com GPU.
Alguns conselhos úteis:
- **ATENÇÃO:** o dataset é bem grande. Não dê comando de imprimí-lo.
- Durante a depuração, faça seu dataset ficar bem pequeno, para que a depuração seja mais rápida e não precise de GPU. Somente ligue a GPU quando o seu laço de treinamento já está funcionando
- Não deixe para fazer esse exercício na véspera. Ele é trabalhoso.
```
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
```
## Importação dos pacotes
```
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm_notebook
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
```
## Implementação do MyDataset
```
from typing import List
def tokenize(text: str, tokenizer):
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, context_size: int):
# Escreva seu código aqui
self.tokens, self.target = [], []
for text in texts:
ids = tokenize(text, tokenizer)
for i in range(len(ids)-context_size):
self.tokens.append(ids[i:i + context_size])
self.target.append(ids[i + context_size])
self.tokens = torch.tensor(self.tokens)
self.target = torch.tensor(self.target)
def __len__(self):
# Escreva seu código aqui
return len(self.target)
def __getitem__(self, idx):
# Escreva seu código aqui
return self.tokens[idx], self.target[idx]
```
## Teste se sua implementação do MyDataset está correta
```
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Eu gosto de correr', 'Ela gosta muito de comer pizza']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, context_size=3)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
assert len(dummy_dataset) == 5
print('passou no assert de tamanho do dataset')
first_batch_input, first_batch_target = next(iter(dummy_loader))
correct_first_batch_input = torch.LongTensor(
[[ 3396, 10303, 125],
[ 1660, 5971, 785],
[ 5971, 785, 125],
[ 785, 125, 1847],
[ 125, 1847, 13779]])
correct_first_batch_target = torch.LongTensor([13239, 125, 1847, 13779, 15616])
assert torch.equal(first_batch_input, correct_first_batch_input)
print('Passou no assert de input')
assert torch.equal(first_batch_target, correct_first_batch_target)
print('Passou no assert de target')
```
# Carregamento do dataset
Iremos usar uma pequena amostra do dataset [BrWaC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) para treinar e avaliar nosso modelo de linguagem.
```
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula7/sample_brwac.txt
# Load datasets
context_size = 9
valid_examples = 100
test_examples = 100
texts = open('sample_brwac.txt').readlines()
# print('Truncating for debugging purposes.')
# texts = texts[:500]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, context_size=context_size)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, context_size=context_size)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, context_size=context_size)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size, context_size, embedding_dim, hidden_size):
"""
Implements the Neural Language Model proposed by Bengio et al."
Args:
vocab_size (int): Size of the input vocabulary.
context_size (int): Size of the sequence to consider as context for prediction.
embedding_dim (int): Dimension of the embedding layer for each word in the context.
hidden_size (int): Size of the hidden layer.
"""
# Escreva seu código aqui.
super(LanguageModel, self).__init__()
self.context_size = context_size
self.embeddings_dim = embedding_dim
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.hidden_layer1 = nn.Linear(self.context_size*self.embeddings_dim, hidden_size*4)
self.hidden_layer2 = nn.Linear(hidden_size*4, hidden_size*2)
self.hidden_layer3 = nn.Linear(hidden_size*2, hidden_size)
self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)
self.relu = nn.ReLU()
def forward(self, inputs):
"""
Args:
inputs is a LongTensor of shape (batch_size, context_size)
"""
# Escreva seu código aqui.
out = self.embeddings(inputs).view(-1, self.context_size*self.embeddings_dim)
out = self.relu(self.hidden_layer1(out))
out = self.relu(self.hidden_layer2(out))
out = self.relu(self.hidden_layer3(out))
return self.output_layer(out)
```
## Teste o modelo com um exemplo
```
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
context_size=context_size,
embedding_dim=64,
hidden_size=128,
).to(device)
sample_train, _ = next(iter(DataLoader(training_dataset)))
sample_train_gpu = sample_train.to(device)
model(sample_train_gpu).shape
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
```
## Assert da Perplexidade
```
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, vocab_size)
target: a LongTensor of shape (batch_size,)
Returns:
A float corresponding to the perplexity.
"""
# Escreva seu código aqui.
return torch.exp(nn.functional.cross_entropy(logits,target))
n_examples = 1000
sample_train, target_token_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
sample_train_gpu = sample_train.to(device)
target_token_ids = target_token_ids.to(device)
logits = model(sample_train_gpu)
my_perplexity = perplexity(logits=logits, target=target_token_ids)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=2000)
print('Passou o no assert da perplexidade')
```
## Laço de Treinamento e Validação
```
max_examples = 200_000_000
eval_every_steps = 5000
lr = 3.5e-5
batch_size = 1024
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
context_size=context_size,
embedding_dim=128,
hidden_size=256,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=batch_size)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input, target):
model.train()
model.zero_grad()
logits = model(input.to(device))
loss = nn.functional.cross_entropy(logits, target.to(device))
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input, target):
model.eval()
logits = model(input)
loss = nn.functional.cross_entropy(logits, target)
return loss.item()
train_losses = []
n_examples = 0
step = 0
ver = 0
while n_examples < max_examples:
for input, target in train_loader:
loss = train_step(input.to(device), target.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(input.to(device), target.to(device))
for input, target in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(input) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
```
## Avaliação final no dataset de teste
Bonus: o modelo com menor perplexidade no dataset de testes ganhará 0.5 ponto na nota final.
```
test_loader = DataLoader(test_dataset, batch_size=64)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(input.to(device), target.to(device))
for input, target in test_loader
]))
print(f'test perplexity: {test_ppl}')
```
## Teste seu modelo com uma sentença
Escolha uma sentença gerada pelo modelo que ache interessante.
```
prompt = 'Eu estou sozinho, sinto muita falta da minha namorada'
max_output_tokens = 10
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-context_size:] # Usamos apenas os últimos <context_size> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
# Ao usarmos o argmax, a saída do modelo em cada passo é token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
```
|
github_jupyter
|
# Network Training
## Includes
```
# mass includes
import os, sys, warnings
import ipdb
import torch as t
import torchnet as tnt
from tqdm.notebook import tqdm
# add paths for all sub-folders
paths = [root for root, dirs, files in os.walk('.')]
for item in paths:
sys.path.append(item)
from ipynb.fs.full.config import r2rNetConf
from ipynb.fs.full.monitor import Visualizer
from ipynb.fs.full.network import r2rNet
from ipynb.fs.full.dataLoader import r2rSet
from ipynb.fs.full.util import *
```
## Initialization
```
# for debugging only
%pdb off
warnings.filterwarnings('ignore')
# choose GPU if available
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
device = t.device('cuda' if t.cuda.is_available() else 'cpu')
# define model
opt = r2rNetConf()
model = r2rNet().to(device)
# load pre-trained model if necessary
if opt.save_root:
last_epoch = model.load(opt.save_root)
last_epoch += opt.save_epoch
else:
last_epoch = 0
# dataloader for training
train_dataset = r2rSet(opt, mode='train')
train_loader = t.utils.data.DataLoader(train_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True)
# dataloader for validation
val_dataset = r2rSet(opt, mode='val')
val_loader = t.utils.data.DataLoader(val_dataset)
# optimizer
last_lr = opt.lr * opt.lr_decay**(last_epoch // opt.upd_freq)
optimizer = t.optim.Adam(model.parameters(), lr=last_lr)
scheduler = t.optim.lr_scheduler.StepLR(optimizer,
step_size=opt.upd_freq,
gamma=opt.lr_decay)
# visualizer
vis = Visualizer(env='r2rNet', port=8686)
loss_meter = tnt.meter.AverageValueMeter()
```
## Validation
```
def validate():
# set to evaluation mode
model.eval()
psnr = 0.0
for (raw_patch, srgb_patch, cam_wb) in val_loader:
with t.no_grad():
# copy to device
raw_patch = raw_patch.to(device)
srgb_patch = srgb_patch.to(device)
rggb_patch = toRGGB(srgb_patch)
cam_wb = cam_wb.to(device)
# inference
pred_patch = model(rggb_patch, cam_wb)
pred_patch = t.clamp(pred_patch, 0.0, 1.0)
# compute psnr
mse = t.mean((pred_patch - raw_patch)**2)
psnr += 10 * t.log10(1 / mse)
psnr /= len(val_loader)
# set to training mode
model.train(mode=True)
return psnr
```
## Training entry
```
for epoch in tqdm(range(last_epoch, opt.max_epoch),
desc='epoch',
total=opt.max_epoch - last_epoch):
# reset meter and update learning rate
loss_meter.reset()
scheduler.step()
for (raw_patch, srgb_patch, cam_wb) in train_loader:
# reset gradient
optimizer.zero_grad()
# copy to device
raw_patch = raw_patch.to(device)
srgb_patch = srgb_patch.to(device)
rggb_patch = toRGGB(srgb_patch)
cam_wb = cam_wb.to(device)
# inference
pred_patch = model(rggb_patch, cam_wb)
# compute loss
loss = t.mean(t.abs(pred_patch - raw_patch))
# backpropagation
loss.backward()
optimizer.step()
# add to loss meter for logging
loss_meter.add(loss.item())
# show training status
vis.plot('loss', loss_meter.value()[0])
gt_img = raw2Img(raw_patch[0, :, :, :],
wb=opt.d65_wb,
cam_matrix=opt.cam_matrix)
pred_img = raw2Img(pred_patch[0, :, :, :],
wb=opt.d65_wb,
cam_matrix=opt.cam_matrix)
vis.img('gt/pred/mask', t.cat([gt_img, pred_img], dim=2).cpu() * 255)
# save model and do validation
if (epoch + 1) > opt.save_epoch or (epoch + 1) % 50 == 0:
model.save()
psnr = validate()
vis.log('epoch: %d, psnr: %.2f' % (epoch, psnr))
```
|
github_jupyter
|
```
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
%matplotlib inline
#Importamos nuestros módulos y clases necesarias
import Image_Classifier as img_clf
import Labeled_Image as li
import classifiers as clfs
from skimage import io
from skimage.color import rgb2gray
from skimage.transform import rescale
import matplotlib.pyplot as plt
from IPython.display import display
import fileupload
import os
import PIL.Image
import io as io2
import numpy as np
# Inicializamos la clase que se encarga de clasificar imagenes
clf = img_clf.Image_Classifier(clfs.classifiers.get('svm'))
lbl_img = li.Labeled_Image(clf)
''' Función que se encarga de aplicar las operaciones
necesarias para convertir los datos obtenidos del FileUpload
en una imagen'''
def imageConverter(change):
ch = change['owner']
image = io2.BytesIO(ch.data)
image = PIL.Image.open(image)
image = np.array(image)
return rgb2gray(image)
'''Función mediante la que indicamos el clasificador
con el que clasificaremos la imagen'''
def set_classifier_wrapper(classifier_index):
clf.set_classifier(clfs.classifiers[classifier_index][0],
is_probs_classifier = clfs.classifiers[classifier_index][1])
'''Función que nos permite mostrar la imagen'''
def plotter_wrapper():
lbl_img.boxes_generator_with_nms()
lbl_img.plotter()
''' Función mediante la que escogemos la imagen'''
def _upload(lbl_img):
_upload_widget = fileupload.FileUploadWidget()
def _cb(change):
image = imageConverter(change)
lbl_img.set_image(image)
#lbl_img.predict()
_upload_widget.observe(_cb, names='data')
display(_upload_widget)
'''Función que nos permite mostrar la imagen'''
def rescale_image_selector(lbl_img, rescale_coef):
if lbl_img.get_original_image() is not None:
lbl_img.image_rescale(rescale_coef)
def patch_size_selector(Ni, Nj):
clf.set_patch_size((Ni,Nj))
clf_button = widgets.Button(description="Clasificar")
def on_button_clicked(b):
# Etiquetamos imagen
lbl_img.predict()
# Y la mostramos
plotter_wrapper()
#clf_button.on_click(on_button_clicked)#, clf)
def step_size_selector(istep, jstep):
clf.set_istep(istep)
clf.set_jstep(jstep)
def probabilities_selector(probs):
lbl_img.set_probs(probs)
lbl_img.predict()
plotter_wrapper()
def alfa_selector(alfa):
lbl_img.set_alfa(alfa)
# Mostramos el widget que permita elegir el clasificador
interact(set_classifier_wrapper, classifier_index = list(clfs.classifiers.keys()));
# Mostramos el widget que permita elegir la imagen a clasificar
_upload(lbl_img)
# Permitimos escoger el rescalado de la imagen, por defecto 1
interact(rescale_image_selector, rescale_coef=(0.3,1,0.001), lbl_img=fixed(lbl_img))
# Permitimos escoger el tamaño de alto y ancho para
# las subdivisiones de la ventana
#interact(patch_size_selector, Ni=(0,100), Nj=(0,100))
# Permitimos escoger el tamaño del salto
# en las subdivisiones de la imagen
interact(step_size_selector, istep=(0,100), jstep=(0,100))
interact(alfa_selector, alfa=(0,1,0.001))
# Por ultimo, mostramos la imagen y permitimos que muestre las ventanas
# en función de las probabilidades
interact_manual(probabilities_selector, probs=(0.5,1,0.001))
# LLamar al clasificador
#display(clf_button)
```
|
github_jupyter
|
# COMP90051 Workshop 3
## Logistic regression
***
In this workshop we'll be implementing L2-regularised logistic regression using `scipy` and `numpy`.
Our key objectives are:
* to become familiar with the optimisation problem that sits behind L2-regularised logistic regression;
* to apply polynomial basis expansion and recognise when it's useful; and
* to experiment with the effect of L2 regularisation.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
### 1. Binary classification data
Let's begin by generating some binary classification data.
To make it easy for us to visualise the results, we'll stick to a two-dimensional feature space.
```
from sklearn.datasets import make_circles
X, Y = make_circles(n_samples=300, noise=0.1, factor=0.7, random_state=90051)
plt.plot(X[Y==0,0], X[Y==0,1], 'o', label = "y=0")
plt.plot(X[Y==1,0], X[Y==1,1], 's', label = "y=1")
plt.legend()
plt.xlabel("$x_0$")
plt.ylabel("$x_1$")
plt.show()
```
**Question:** What's interesting about this data? Do you think logistic regression will perform well?
**Answer:** *This question is answered in section 3.*
In preparation for fitting and evaluating a logistic regression model, we randomly partition the data into train/test sets. We use the `train_test_split` function from `sklearn`.
```
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=90051)
print("Training set has {} instances. Test set has {} instances.".format(X_train.shape[0], X_test.shape[0]))
```
### 2. Logistic regression objective function
Recall from lectures, that logistic regression models the distribution of the binary class $y$ *conditional* on the feature vector $\mathbf{x}$ as
$$
y | \mathbf{x} \sim \mathrm{Bernoulli}[\sigma(\mathbf{w}^T \mathbf{x} + b)]
$$
where $\mathbf{w}$ is the weight vector, $b$ is the bias term and $\sigma(z) = 1/(1 + e^{-z})$ is the logistic function.
To simplify the notation, we'll collect the model parameters $\mathbf{w}$ and $b$ in a single vector $\mathbf{v} = [b, \mathbf{w}]$.
Fitting this model amounts to choosing $\mathbf{v}$ that minimises the sum of cross-entropies over the instances ($i = 1,\ldots,n$) in the training set
$$
f_\mathrm{cross-ent}(\mathbf{v}; \mathbf{X}, \mathbf{Y}) = - \sum_{i = 1}^{n} \left\{ y_i \log \sigma(\mathbf{w}^T \mathbf{x}_i + b) + (1 - y_i) \log (1 - \sigma(\mathbf{w}^T \mathbf{x}_i + b)) \right\}
$$
Often a regularisation term of the form $f_\mathrm{reg}(\mathbf{w}; \lambda) = \frac{1}{2} \lambda \mathbf{w}^T \mathbf{w}$ is added to the objective to penalize large weights (this can help to prevent overfitting). Note that $\lambda \geq 0$ controls the strength of the regularisation term.
Putting this together, our goal is to minimise the following objective function with respect to $\mathbf{w}$ and $b$:
$$
f(\mathbf{v}; \mathbf{X}, \mathbf{Y}, \lambda) = f_\mathrm{reg}(\mathbf{w}; \lambda) + f_\mathrm{cross-ent}(\mathbf{v}; \mathbf{X}, \mathbf{Y})
$$
**Question:** Why aren't we regularising the entire parameter vector $\mathbf{v}$? Notice that only $\mathbf{w}$ is included in $f_\mathrm{reg}$—in other words $b$ is excluded from regularisation.
**Answer:** *If we were to replace $\mathbf{w}$ with $\mathbf{v}$ in the regularisation term, we'd be penalising large $b$. This is not a good idea, because a large bias may be required for some data sets—and restricting the bias doesn't help with generalisation.*
We're going to find a solution to this minimisation problem using the BFGS algorithm (named after the inventors Broyden, Fletcher, Goldfarb and Shanno). BFGS is a "hill-climbing" algorithm like gradient descent, however it additionally makes use of second-order derivative information (by approximating the Hessian). It converges in fewer iterations than gradient descent (it's convergence rate is *superlinear* whereas gradient descent is only *linear*).
We'll use an implementation of BFGS provided in `scipy` called `fmin_bfgs`. The algorithm requires two functions as input: (i) a function that evaluates the objective $f(\mathbf{v}; \ldots)$ and (ii) a function that evalutes the gradient $\nabla_{\mathbf{v}} f(\mathbf{v}; \ldots)$.
Let's start by writing a function to compute $f(\mathbf{v}; \ldots)$.
```
from scipy.special import expit # this is the logistic function
# v: parameter vector
# X: feature matrix
# Y: class labels
# Lambda: regularisation constant
def obj_fn(v, X, Y, Lambda):
prob_1 = expit(np.dot(X,v[1::]) + v[0])
reg_term = 0.5 * Lambda * np.dot(v[1::],v[1::]) # fill in
cross_entropy_term = - np.dot(Y, np.log(prob_1)) - np.dot(1. - Y, np.log(1. - prob_1))
return reg_term + cross_entropy_term # fill in
```
Now for the gradient, we use the following result (if you're familiar with vector calculus, you may wish to derive this yourself):
$$
\nabla_{\mathbf{v}} f(\mathbf{v}; \ldots) = \left[\frac{\partial f(\mathbf{w}, b;\ldots)}{\partial b}, \nabla_{\mathbf{w}} f(\mathbf{w}, b; \ldots) \right] = \left[\sum_{i = 1}^{n} \sigma(\mathbf{w}^T \mathbf{x}_i + b) - y_i, \lambda \mathbf{w} + \sum_{i = 1}^{n} (\sigma(\mathbf{w}^T \mathbf{x}_i + b) - y_i)\mathbf{x}_i\right]
$$
The function below implements $\nabla_{\mathbf{v}} f(\mathbf{v}; \ldots)$.
```
# v: parameter vector
# X: feature matrix
# Y: class labels
# Lambda: regularisation constant
def grad_obj_fn(v, X, Y, Lambda):
prob_1 = expit(np.dot(X, v[1::]) + v[0])
grad_b = np.sum(prob_1 - Y)
grad_w = Lambda * v[1::] + np.dot(prob_1 - Y, X)
return np.insert(grad_w, 0, grad_b)
```
### 3. Solving the minimization problem using BFGS
Now that we've implemented functions to compute the objective and the gradient, we can plug them into `fmin_bfgs`.
Specifically, we define a function `my_logistic_regression` which calls `fmin_bfgs` and returns the optimal weight vector.
```
from scipy.optimize import fmin_bfgs
# X: feature matrix
# Y: class labels
# Lambda: regularisation constant
# v_initial: initial guess for parameter vector
def my_logistic_regression(X, Y, Lambda, v_initial, disp=True):
# Function for displaying progress
def display(v):
print('v is', v, 'objective is', obj_fn(v, X, Y, Lambda))
return fmin_bfgs(f=obj_fn, fprime=grad_obj_fn,
x0=v_initial, args=(X, Y, Lambda), disp=disp,
callback=display)
```
Let's try it out!
```
Lambda = 1
v_initial = np.zeros(X_train.shape[1] + 1) # fill in a vector of zeros of appropriate length
v_opt = my_logistic_regression(X_train, Y_train, Lambda, v_initial)
# Function to plot the data points and decision boundary
def plot_results(X, Y, v, trans_func = None):
# Scatter plot in feature space
plt.plot(X[Y==0,0], X[Y==0,1], 'o', label = "y=0")
plt.plot(X[Y==1,0], X[Y==1,1], 's', label = "y=1")
# Compute axis limits
x0_lower = X[:,0].min() - 0.1
x0_upper = X[:,0].max() + 0.1
x1_lower = X[:,1].min() - 0.1
x1_upper = X[:,1].max() + 0.1
# Generate grid over feature space
x0, x1 = np.mgrid[x0_lower:x0_upper:.01, x1_lower:x1_upper:.01]
grid = np.c_[x0.ravel(), x1.ravel()]
if (trans_func is not None):
grid = trans_func(grid) # apply transformation to features
arg = (np.dot(grid, v[1::]) + v[0]).reshape(x0.shape)
# Plot decision boundary (where w^T x + b == 0)
plt.contour(x0, x1, arg, levels=[0], cmap="Greys", vmin=-0.2, vmax=0.2)
plt.legend()
plt.show()
plot_results(X, Y, v_opt)
```
**Question:** Is the solution what you expected? Is it a good fit for the data?
**Answer:** *It's not a good fit because logistic regression is a linear classifier, and the data is not linearly seperable.*
**Question:** What's the accuracy of this model? Fill in the code below assuming the following decision function
$$
\hat{y} = \begin{cases}
1, &\mathrm{if} \ p(y = 1|\mathbf{x}) \geq \tfrac{1}{2}, \\
0, &\mathrm{otherwise}.
\end{cases}
$$
```
from sklearn.metrics import accuracy_score
Y_test_pred = ((np.dot(X_test, v_opt[1::]) + v_opt[0]) >= 0)*1 # fill in
accuracy_score(Y_test, Y_test_pred)
```
### 4. Adding polynomial features
We've seen that ordinary logistic regression does poorly on this data set, because the data is not linearly separable in the $x_0,x_1$ feature space.
We can get around this problem using basis expansion. In this case, we'll augment the feature space by adding polynomial features of degree 2. In other words, we replace the original feature matrix $\mathbf{X}$ by a transformed feature matrix $\mathbf{\Phi}$ which contains additional columns corresponding to $x_0^2$, $x_0 x_1$ and $x_1^2$. This is done using the function `add_quadratic_features` defined below.
**Note:** There's a built-in function in `sklearn` for adding polynomial features located at `sklearn.preprocessing.PolynomialFeatures`.
```
# X: original feature matrix
def add_quadratic_features(X):
return np.c_[X, X[:,0]**2, X[:,0]*X[:,1], X[:,1]**2]
Phi_train = add_quadratic_features(X_train)
Phi_test = add_quadratic_features(X_test)
```
Let's apply our custom logistic regression function again on the augmented feature space.
```
Lambda = 1
v_initial = np.zeros(Phi_train.shape[1] + 1) # fill in a vector of zeros of appropriate length
v_opt = my_logistic_regression(Phi_train, Y_train, Lambda, v_initial)
plot_results(X, Y, v_opt, trans_func=add_quadratic_features)
```
This time we should get a better result for the accuracy on the test set.
```
from sklearn.metrics import accuracy_score
Y_test_pred = ((np.dot(Phi_test, v_opt[1::]) + v_opt[0]) >= 0)*1 # fill in
accuracy_score(Y_test, Y_test_pred)
```
### 5. Effect of regularisation
So far, we've fixed the regularisation constant so that $\lambda = 1$. (Note it's possible to choose an "optimal" value for $\lambda$ by applying cross-validation.)
**Question:** What do you think will happen if we switch the regularisation off? Try setting $\lambda$ to a small value (say $10^{-3}$) and check whether the accuracy of the model is affected.
**Answer:** *Generally speaking, we risk overfitting if the regularisation constant is too small (or switched off entirely). You should observe that the accuracy on the test set reduces slightly with $\lambda = 10^{-3}$ vs. $\lambda = 1$.*
### 6. Logistic regression using sklearn
Now that you have some insight into the optimisation problem behind logistic regression, you should feel confident in using the built-in implementation in `sklearn` (or other packages).
Note that the `sklearn` implementation handles floating point underflow/overflow more carefully than we have done, and uses faster numerical optimisation algorithms.
```
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(C=1)
clf.fit(Phi_train, Y_train)
from sklearn.metrics import accuracy_score
Y_test_pred = clf.predict(Phi_test)
accuracy_score(Y_test, Y_test_pred)
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
from sklearn import *
import warnings; warnings.filterwarnings("ignore")
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
sub = pd.read_csv('../input/sample_submission.csv')
train.shape, test.shape, sub.shape
```
Wordplay in Column Names
==============================
```
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph()
col = [c for c in train.columns if c not in ['id', 'target']]
G.add_node('Start')
for i in range(4):
G.add_node('Column Section '+ str(i))
G.add_edge('Start','Column Section '+ str(i))
for c in train[col].columns:
if c.split('-')[i] not in G.nodes():
G.add_node(c.split('-')[i])
G.add_edge('Column Section '+ str(i), c.split('-')[i])
if c not in G.nodes():
G.add_node(c)
G.add_edge(c.split('-')[i],c)
plt.figure(1,figsize=(12,12))
nx.draw_networkx(G, node_size=1,font_size=6)
plt.axis('off'); plt.show()
```
How unique are the column values
==========
```
df = []
for c in train.columns:
if c not in ['target', 'id', 'wheezy-copper-turtle-magic']:
l1 = test[c].unique()
l2 = train[c].unique()
df.append([c, len(l1), len(l2), len(l1)- 131073, len(l2) - 262144])
df = pd.DataFrame(df, columns=['col', 'test_unique', 'train_unique', 'test_diff', 'train_diff'])
for c in ['test_unique', 'train_unique', 'test_diff', 'train_diff']:
print(df[c].min(), df[c].max())
#col = list(df[((df['test_diff']<-1900) & (df['train_diff']<-7500))]['col'].values)
df.head()
```
Getting wheezy
=====
```
col = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
df_all = pd.concat((train,test), axis=0, ignore_index=True).reset_index(drop=True)
df_all['wheezy-copper-turtle-magic'] = df_all['wheezy-copper-turtle-magic'].astype('category')
train = df_all[:train.shape[0]].reset_index(drop=True)
test = df_all[train.shape[0]:].reset_index(drop=True)
del df_all
train.shape, test.shape
```
Lets Race
======
```
test_ = []
kn = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)
sv = svm.NuSVC(kernel='poly', degree=4, random_state=4, probability=True, coef0=0.08)
for s in sorted(train['wheezy-copper-turtle-magic'].unique()):
train2 = train[train['wheezy-copper-turtle-magic']==s].reset_index(drop=True).copy()
test2 = test[test['wheezy-copper-turtle-magic']==s].reset_index(drop=True).copy()
kn.fit(train2[col], train2['target'])
sv.fit(train2[col], train2['target'])
test2['target'] = (kn.predict_proba(test2[col])[:,1] * 0.2) + (sv.predict_proba(test2[col])[:,1] * 0.8)
test_.append(test2)
test_ = pd.concat(test_).reset_index(drop=True)
test_[['id','target']].to_csv("submission.csv", index=False)
```
|
github_jupyter
|
# Accessing the Trigger
In ATLAS all access to event trigger decision is via the Trigger Decision Tool (TDT). There is quite a bit of information attached to the trigger, and its layout is quite complex - for that reason one should use the TDT to access the data. It is not really possible for a human to navigate the data structures quickly!
```
import matplotlib.pyplot as plt
from config import ds_zee as ds
from func_adl_servicex_xaodr21 import tdt_chain_fired, tmt_match_object
```
## Looking for events that fired a chain
Lets look at $Z \rightarrow ee$ Monte Carlo for a single electron trigger in the event.
```
n_electrons = (ds.Select(lambda e:
{
"n_ele": e.Electrons().Where(lambda e: abs(e.eta()) < 2.5).Count(),
"fired": tdt_chain_fired("HLT_e60_lhmedium_nod0"),
})
.AsAwkwardArray()
.value()
)
plt.hist(n_electrons.n_ele, bins=4, range=(0, 4), label='All Events')
plt.hist(n_electrons.n_ele[n_electrons.fired], bins=4, range=(0, 4), label='Fired Events')
plt.xlabel('Number of Electrons')
plt.ylabel('Number of Events')
plt.title('Electron Trigger and Number of Electrons in the Event')
_ = plt.legend()
```
## Trigger Matching
Next, let's find the electrons that matched that trigger that fired above. We'll do this by looking only at events where the trigger has fired, and then asking each electron if it matches withing a $\Delta R$.
```
matched_electrons = (
ds.Where(lambda e: tdt_chain_fired("HLT_e60_lhmedium_nod0"))
.SelectMany(lambda e: e.Electrons())
.Select(
lambda e: {
"pt": e.pt() / 1001.0,
"eta": e.eta(),
"is_trig": tmt_match_object("HLT_e60_lhmedium_nod0", e, 0.7),
}
)
.AsAwkwardArray()
.value()
)
```
To know the `tnt_match_object` arguments, you'll need to look up its definition below on the atlas twiki.
```
plt.hist(matched_electrons.pt, bins=100, range=(0, 100), label='All Electrons')
trigger_electrons = matched_electrons[matched_electrons.is_trig]
plt.hist(trigger_electrons.pt, bins=100, range=(0, 100), label='Trigger Electrons')
plt.xlabel('Electron $p_T$ [GeV]')
plt.ylabel('Number of Electrons')
_ = plt.legend()
```
## Further Information
* Tutorial on [trigger for analysis](https://indico.cern.ch/event/860971/contributions/3626403/attachments/1973400/3283452/200122_TriggerTutorial.pdf).
* Trigger Group's [Trigger Analysis Tool](https://twiki.cern.ch/twiki/bin/view/Atlas/TriggerAnalysisTools) twiki page (with a [page devoted to the TDT](https://twiki.cern.ch/twiki/bin/view/Atlas/TrigDecisionTool)).
* [Lowest un-prescaled triggers](https://twiki.cern.ch/twiki/bin/view/Atlas/LowestUnprescaled) per data-taking period twiki.
|
github_jupyter
|
# Additive Secret Sharing
Author:
- Carlos Salgado - [email](mailto:[email protected]) - [linkedin](https://www.linkedin.com/in/eng-socd/) - [github](https://github.com/socd06)
## Additive Secret Sharing
Additive Secret Sharing is a mechanism to share data among parties and to perform computation on it.

## Sharing
A secret `s` is uniformly split into `n` shares, one per shareholder (also known as worker, node, user or party) using some randomness `r`, also known as some **very high random prime** number `Q`.
$ F_s (s, r, n) = ( s_1, s_2, ..., s_n ) $
## Reconstruction
`s` can be reconstructed (decrypted) by adding up **all the shares** and taking the [*modulo*](https://en.wikipedia.org/wiki/Modulo_operation) of the random prime number `Q`, used to encrypt the shares originally.
$ s = ( \: \sum \limits _{i=1} ^n s_i \: ) \; mod \; Q $
## 32-bit Integer Secrets
A secret is the data or message that a party wants to secure. In additive secret sharing, secrets (and therefore, shares) must be members of a fixed [finite field](https://en.wikipedia.org/wiki/Finite_field). Particularly, the literature mentions shares should be members of the $ {\mathbb{Z}_{2^{32}}} $ [ring](https://en.wikipedia.org/wiki/Ring_(mathematics)), which is the [ring of integers](https://en.wikipedia.org/wiki/Ring_of_integers) that fit within [32-bits](https://en.wikipedia.org/wiki/32-bit_computing).

Rings are [sets](https://en.wikipedia.org/wiki/Set_(mathematics)) with two operations, addition and multiplication, which allow the rationale of secret sharing and reconstruction to work.
Plainly, secrets and secret shares **must** be integers within -2,147,483,647 to +2,147,483,647
## Governance
Additive secret sharing provides shared governance. The threshold `t` to reconstruct `s` is equal to `n`, which means **no party can recover the data** alone because all the shares are required to decrypt the secret *(t = n)*. This scheme allows us to do computation on the shares while each shareholder is only aware of their **own** share.
### [Quiz] Find the secret `s`
In practice, we use a **very high prime number** Q to add a **big deal of uniform randomness** to our shares. Here we will use a very small Q, so you can try to solve the quiz without programming yet.
Let $ s_1 = 10 \; and \; s_2 = 74 \; and \; Q = 59 $
What is the original secret `s`? Fill the ____ space below with your answer.
Try **not** to use a calculator or programming.
```
# Run this cell to import the quizzes
from quiz import q0, q1, q2
# run to check your answer
q0.check(___)
# Uncomment the line below to see a hint
# q0.hint
# Uncomment the line below to see the solution
# q0.solution
```
### [Quiz] Find the final share s<sub>2</sub>
Using a small `Q` to facilitate calculation (it needs to be a **very high prime number** in production), let
$ s = 7, n = 2 $ with $ Q = 59 $ and $ s_1 = 9 $
plugged in on the secret reconstruction equation, find the final share s<sub>2</sub>.
Fill the ____ space below with your answer. Feel free to implement the equation in a new cell or use whatever tool you'd like (e.g. a calculator), it's your call.
```
# Fill the ____ space below with your answer
final_share =
# run to check your answer
q1.check(final_share)
# Uncomment the line below to see a hint
# q1.hint
# Uncomment the line below to see the solution
# q1.solution
```
## In Practice
Just as an educational example, we can generate a list of prime numbers using [sympy](https://www.sympy.org/en/index.html)
```
# Verify we have all the tools we need to run the notebook
!pip install -r requirements.txt
import sympy
# An arbitrary constant, feel free to play with it
CONST = 999
BIT_DEPTH = 31
# Range start
start = 2**BIT_DEPTH-CONST
# Maximum in Z2**32 ring
end = 2**BIT_DEPTH
prime_lst = list(sympy.primerange(start,end+1))
print("Prime numbers in range: " , prime_lst)
```
And **randomly** choose one every time using [NumPy](https://numpy.org/devdocs/contents.html)'s [randint](https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html)
```
from numpy.random import randint
Q = prime_lst[randint(len(prime_lst))]
Q
```
As an additional note, the [Secrets module](https://docs.python.org/3/library/secrets.html), introduced in Python 3.6, provides randomness as secure as your operating system.
```
import secrets
Q = secrets.choice(prime_lst)
Q
```
## The Final Share and 2-party Additive Secret Sharing
Knowing that $ s_n = Q - (\; \sum \limits _{i=1} ^{n-1} s_i \; mod \; Q \; ) + s $
How do we implement 2-party ($ n=2 $) additive secret sharing using Python?
Keep reading and fing out!
```
def dual_share(s, r):
'''
s = secret
r = randomness
'''
share_lst = list()
share_lst.append(randint(0,r))
final_share = r - (share_lst[0] % r) + s
share_lst.append(final_share)
return share_lst
# Let's generate a couple of shares
secret = 5
dual_shares = dual_share(secret, Q)
dual_shares
```
Now go back to the previous cell and **run it again**. Notice anything?
...
...
...
See it yet? The shares are never the same because they are **randomly generated**.
Now let's implement the reconstruction (or decryption) function.
```
def decrypt(shares, r):
'''
shares = iterable made of additive secret shares
r = randomness
'''
return sum(shares) % r
# And let's decrypt our secret for the first time
decrypt(dual_shares, Q)
```
## Exercise: Implement n-party additive secret sharing
Fill the function below with your code.
```
def n_share(s, r, n):
'''
s = secret
r = randomness
n = number of nodes, workers or participants
returns a tuple of n-shares
'''
# replace with your code
pass
five_shares = n_share(s=686,r=Q,n=5)
five_shares
# run this cell to check your solution
q2.check(decrypt(five_shares, Q))
# Uncomment the line below to see a hint
# q2.hint
# Uncomment the line below to see the solution
# q2.solution
```
## Addition
Given two shared values $a$ and $b$, a party $P_i$ can compute the added shares as:
$ c_i = ( a_i + b_i ) \; mod \; Q$
In Python, we can implement this type of addition like this:
```
def addition(a, b, r):
'''
a = iterable of the same length of b
b = iterable of the same length of a
r = randomness AKA randomly generated very high prime number
'''
c = list()
for i in range(len(a)):
c.append((a[i] + b[i]) % r)
return tuple(c)
```
Considering Alice and Bob are our parties, with secrets $s_a$ and $s_b$ to be shared (2-way) and wanting to compute addition.
Let $s_a = 5 $ and $s_b = 11 $
Alice's shares would be something like:
```
# Alice's secret
sa = 5
alice_shares = dual_share(sa, Q)
alice_shares
```
While Bob's shares would be
```
# Bob's secret
sb = 11
bob_shares = dual_share(sb, Q)
bob_shares
secret_sum = addition(alice_shares, bob_shares, Q)
secret_sum
```
Doesn't make a lot of sense, does it?
Secret shares must only reveal information about their secrets when they are all combined. Otherwise all data must be hidden, which defines the **privacy** property.
These are still secret shares so there is one more step to get the sum of the original secrets.
```
decrypt(secret_sum, Q)
```
Et Voilà!
## Public (scalar) Multiplication
Given a list of shared values $a$ and a **scalar** $b$, a party $P_i$ can compute the multiplied shares as:
$ c_i = a_i \times b \; mod \; Q$
In Python, we can implement this type of multiplication like this:
```
def public_mul(a, b, r):
'''
a = iterable of the same length of b
b = scalar to multiply a by
r = randomness AKA randomly generated very high prime number
'''
c = list()
for i in range(len(a)):
c.append((a[i] * b) % r)
return tuple(c)
```
Let's say another party wants to multiply Alice's shares by the **scalar** value of 3.
```
alice_times3 = public_mul(alice_shares, 3, Q)
```
Then we can decrypt (with Alice's permission) to double check we did multiply what we intended.
```
decrypt(alice_times3,Q)
```
And this is `True` because Alice's secret $sa = 5$, remember?
```
decrypt(alice_times3,Q) == sa * 3
```
## PyTorch + PySyft implementation
Now that you know how additive secret sharing works under the hood, let's see how we can leverage PyTorch and PySyft to do it for us.
```
import torch
import syft as sy
hook = sy.TorchHook(torch)
```
Let's say Alice, Bob and Charlie are all enrolled on the **Foundations of Privacy** course and we, as instructors, want to know on average, how far in the course they are. We don't want to breach their privacy so each percentage of completion will be their own secret (a, b and c).
For educational purposes, we will define our parties (nodes, workers, etc) using `VirtualWorker` PySyft objects.
```
alice = sy.VirtualWorker(hook, id="alice")
bob = sy.VirtualWorker(hook, id="bob")
charlie = sy.VirtualWorker(hook, id="charlie")
```
We also need a "secure worker", also known as the `Crypto Provider` to provide us with random prime numbers.
```
secure_worker = sy.VirtualWorker(hook, "secure_worker")
```
We define our secrets using `torch.tensor` PyTorch tensor objects and we `Additive Share` them with our fellow workers.
```
# Let a, b and c be our students' completion percentage
a = torch.tensor([35])
b = torch.tensor([77])
c = torch.tensor([10])
# And we additive share with our parties
a = a.share(alice, bob, charlie, crypto_provider=secure_worker)
b = b.share(alice, bob, charlie, crypto_provider=secure_worker)
c = c.share(alice, bob, charlie, crypto_provider=secure_worker)
# And we compute the mean of our tensor
mean = torch.mean(torch.stack(list([a,b,c])))
mean
```
Also, see that the object type is **[AdditiveSharingTensor]**.
For this example, we can decrypt our computation result using the get() method
```
decrypted_mean = mean.get()
decrypted_mean
```
And get the scalar using the item() method (Only works for 1-dimensional tensors).
```
scalar_mean = decrypted_mean.item()
scalar_mean
```
Now, the average completion should actually be 40 and $ \frac{1}{3} $ (or 40.6666666666... ) but this is something we will learn about in the next lessons.
Let’s now tackle private multiplication!
|
github_jupyter
|
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
file = os.path.join(dirname, filename)
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
```
Importing necessary libraries.
```
from tensorflow.keras.layers import Input, Dense, Flatten
from keras import Model
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.models import Sequential
```
As we are using VGG16 architecture, it expects the size of 224 by 224(Although, you can set your own size). We will set image size.
```
image_size = [224, 224]
vgg = VGG16(input_shape = image_size + [3], weights = 'imagenet', include_top = False)
```
The first argument is the shape of input image plus **3**(as image is colured[RBG], for black_and_white add **1**).
The second one is the weights eqaul to imagenet. And,
as we know it gives 1000 outputs. Third one excludes the top layer.
```
for layer in vgg.layers:
layer.trainable = False
```
Some of the layers of VGG16 are already trained. To train them again is not a good practice. Thereby making it False
```
from glob import glob
folders = glob('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/*')
folders
```
Flattening the output layer
```
x = Flatten()(vgg.output)
prediction = Dense(len(folders), activation = 'softmax')(x)
model = Model(inputs = vgg.input, outputs = prediction)
model.summary()
```
Compiling the model
```
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
```
Generating more images
```
from keras.preprocessing.image import ImageDataGenerator
train_data_gen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_data_gen = ImageDataGenerator(rescale = 1./255)
train_set = train_data_gen.flow_from_directory('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/train/', target_size = (224,224), batch_size = 32, class_mode = 'categorical')
test_set = test_data_gen.flow_from_directory('/kaggle/input/tomato/New Plant Diseases Dataset(Augmented)/valid/', target_size = (224,224), batch_size = 32, class_mode = 'categorical')
```
Fitting the model
```
mod = model.fit_generator(
train_set,
validation_data=test_set,
epochs=10,
steps_per_epoch=len(train_set),
validation_steps=len(test_set)
)
import matplotlib.pyplot as plt
plt.plot(mod.history['loss'], label='train loss')
plt.plot(mod.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.plot(mod.history['accuracy'], label='train accuracy')
plt.plot(mod.history['val_accuracy'], label='val_accuracy')
plt.legend()
plt.show()
```
|
github_jupyter
|
# Optimization of a Dissipative Quantum Gate
```
# NBVAL_IGNORE_OUTPUT
%load_ext watermark
import sys
import os
import qutip
import numpy as np
import scipy
import matplotlib
import matplotlib.pylab as plt
import krotov
import copy
from functools import partial
from itertools import product
%watermark -v --iversions
```
$\newcommand{tr}[0]{\operatorname{tr}}
\newcommand{diag}[0]{\operatorname{diag}}
\newcommand{abs}[0]{\operatorname{abs}}
\newcommand{pop}[0]{\operatorname{pop}}
\newcommand{aux}[0]{\text{aux}}
\newcommand{int}[0]{\text{int}}
\newcommand{opt}[0]{\text{opt}}
\newcommand{tgt}[0]{\text{tgt}}
\newcommand{init}[0]{\text{init}}
\newcommand{lab}[0]{\text{lab}}
\newcommand{rwa}[0]{\text{rwa}}
\newcommand{bra}[1]{\langle#1\vert}
\newcommand{ket}[1]{\vert#1\rangle}
\newcommand{Bra}[1]{\left\langle#1\right\vert}
\newcommand{Ket}[1]{\left\vert#1\right\rangle}
\newcommand{Braket}[2]{\left\langle #1\vphantom{#2}\mid{#2}\vphantom{#1}\right\rangle}
\newcommand{ketbra}[2]{\vert#1\rangle\!\langle#2\vert}
\newcommand{op}[1]{\hat{#1}}
\newcommand{Op}[1]{\hat{#1}}
\newcommand{dd}[0]{\,\text{d}}
\newcommand{Liouville}[0]{\mathcal{L}}
\newcommand{DynMap}[0]{\mathcal{E}}
\newcommand{identity}[0]{\mathbf{1}}
\newcommand{Norm}[1]{\lVert#1\rVert}
\newcommand{Abs}[1]{\left\vert#1\right\vert}
\newcommand{avg}[1]{\langle#1\rangle}
\newcommand{Avg}[1]{\left\langle#1\right\rangle}
\newcommand{AbsSq}[1]{\left\vert#1\right\vert^2}
\newcommand{Re}[0]{\operatorname{Re}}
\newcommand{Im}[0]{\operatorname{Im}}$
This example illustrates the optimization for a quantum gate in an open quantum system, where the dynamics is governed by the Liouville-von Neumann equation. A naive extension of a gate optimization to Liouville space would seem to imply that it is necessary to optimize over the full basis of Liouville space (16 matrices, for a two-qubit gate). However, [Goerz et al., New J. Phys. 16, 055012 (2014)][1] showed that is not necessary, but that a set of 3 density matrices is sufficient to track the optimization.
This example reproduces the "Example II" from that paper, considering the optimization towards a $\sqrt{\text{iSWAP}}$ two-qubit gate on a system of two transmons with a shared transmission line resonator.
[1]: https://michaelgoerz.net/research/Goerz_NJP2014.pdf
**Note**: This notebook uses some parallelization features (`parallel_map`/`multiprocessing`). Unfortunately, on Windows (and macOS with Python >= 3.8), `multiprocessing` does not work correctly for functions defined in a Jupyter notebook (due to the [spawn method](https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods) being used on Windows, instead of Unix-`fork`, see also https://stackoverflow.com/questions/45719956). We can use the third-party [loky](https://loky.readthedocs.io/) library to fix this, but this significantly increases the overhead of multi-process parallelization. The use of parallelization here is for illustration only and makes no guarantee of actually improving the runtime of the optimization.
```
if sys.platform != 'linux':
krotov.parallelization.set_parallelization(use_loky=True)
from krotov.parallelization import parallel_map
```
## The two-transmon system
We consider the Hamiltonian from Eq (17) in the paper, in the rotating wave approximation, together with spontaneous decay and dephasing of each qubit. Alltogether, we define the Liouvillian as follows:
```
def two_qubit_transmon_liouvillian(
ω1, ω2, ωd, δ1, δ2, J, q1T1, q2T1, q1T2, q2T2, T, Omega, n_qubit
):
from qutip import tensor, identity, destroy
b1 = tensor(identity(n_qubit), destroy(n_qubit))
b2 = tensor(destroy(n_qubit), identity(n_qubit))
H0 = (
(ω1 - ωd - δ1 / 2) * b1.dag() * b1
+ (δ1 / 2) * b1.dag() * b1 * b1.dag() * b1
+ (ω2 - ωd - δ2 / 2) * b2.dag() * b2
+ (δ2 / 2) * b2.dag() * b2 * b2.dag() * b2
+ J * (b1.dag() * b2 + b1 * b2.dag())
)
H1_re = 0.5 * (b1 + b1.dag() + b2 + b2.dag()) # 0.5 is due to RWA
H1_im = 0.5j * (b1.dag() - b1 + b2.dag() - b2)
H = [H0, [H1_re, Omega], [H1_im, ZeroPulse]]
A1 = np.sqrt(1 / q1T1) * b1 # decay of qubit 1
A2 = np.sqrt(1 / q2T1) * b2 # decay of qubit 2
A3 = np.sqrt(1 / q1T2) * b1.dag() * b1 # dephasing of qubit 1
A4 = np.sqrt(1 / q2T2) * b2.dag() * b2 # dephasing of qubit 2
L = krotov.objectives.liouvillian(H, c_ops=[A1, A2, A3, A4])
return L
```
We will use internal units GHz and ns. Values in GHz contain an implicit factor 2π, and MHz and μs are converted to GHz and ns, respectively:
```
GHz = 2 * np.pi
MHz = 1e-3 * GHz
ns = 1
μs = 1000 * ns
```
This implicit factor $2 \pi$ is because frequencies ($\nu$) convert to energies as $E = h \nu$, but our propagation routines assume a unit $\hbar = 1$ for energies. Thus, the factor $h / \hbar = 2 \pi$.
We will use the same parameters as those given in Table 2 of the paper:
```
ω1 = 4.3796 * GHz # qubit frequency 1
ω2 = 4.6137 * GHz # qubit frequency 2
ωd = 4.4985 * GHz # drive frequency
δ1 = -239.3 * MHz # anharmonicity 1
δ2 = -242.8 * MHz # anharmonicity 2
J = -2.3 * MHz # effective qubit-qubit coupling
q1T1 = 38.0 * μs # decay time for qubit 1
q2T1 = 32.0 * μs # decay time for qubit 2
q1T2 = 29.5 * μs # dephasing time for qubit 1
q2T2 = 16.0 * μs # dephasing time for qubit 2
T = 400 * ns # gate duration
tlist = np.linspace(0, T, 2000)
```
While in the original paper, each transmon was cut off at 6 levels, here we truncate at 5 levels. This makes the propagation faster, while potentially introducing a slightly larger truncation error.
```
n_qubit = 5 # number of transmon levels to consider
```
In the Liouvillian, note the control being split up into a separate real and imaginary part. As a guess control we use a real-valued constant pulse with an amplitude of 35 MHz, acting over 400 ns, with a switch-on and switch-off in the first 20 ns (see plot below)
```
def Omega(t, args):
E0 = 35.0 * MHz
return E0 * krotov.shapes.flattop(t, 0, T, t_rise=(20 * ns), func='sinsq')
```
The imaginary part start out as zero:
```
def ZeroPulse(t, args):
return 0.0
```
We can now instantiate the Liouvillian:
```
L = two_qubit_transmon_liouvillian(
ω1, ω2, ωd, δ1, δ2, J, q1T1, q2T1, q1T2, q2T2, T, Omega, n_qubit
)
```
The guess pulse looks as follows:
```
def plot_pulse(pulse, tlist, xlimit=None):
fig, ax = plt.subplots()
if callable(pulse):
pulse = np.array([pulse(t, None) for t in tlist])
ax.plot(tlist, pulse/MHz)
ax.set_xlabel('time (ns)')
ax.set_ylabel('pulse amplitude (MHz)')
if xlimit is not None:
ax.set_xlim(xlimit)
plt.show(fig)
plot_pulse(L[1][1], tlist)
```
## Optimization objectives
Our target gate is $\Op{O} = \sqrt{\text{iSWAP}}$:
```
SQRTISWAP = qutip.Qobj(np.array(
[[1, 0, 0, 0],
[0, 1 / np.sqrt(2), 1j / np.sqrt(2), 0],
[0, 1j / np.sqrt(2), 1 / np.sqrt(2), 0],
[0, 0, 0, 1]]),
dims=[[2, 2], [2, 2]]
)
```
The key idea explored in the paper is that a set of three density matrices is sufficient to track the optimization
$$
\begin{align}
\Op{\rho}_1
&= \sum_{i=1}^{d} \frac{2 (d-i+1)}{d (d+1)} \ketbra{i}{i} \\
\Op{\rho}_2
&= \sum_{i,j=1}^{d} \frac{1}{d} \ketbra{i}{j} \\
\Op{\rho}_3
&= \sum_{i=1}^{d} \frac{1}{d} \ketbra{i}{i}
\end{align}
$$
In our case, $d=4$ for a two qubit-gate, and the $\ket{i}$, $\ket{j}$ are the canonical basis states $\ket{00}$, $\ket{01}$, $\ket{10}$, $\ket{11}$
```
ket00 = qutip.ket((0, 0), dim=(n_qubit, n_qubit))
ket01 = qutip.ket((0, 1), dim=(n_qubit, n_qubit))
ket10 = qutip.ket((1, 0), dim=(n_qubit, n_qubit))
ket11 = qutip.ket((1, 1), dim=(n_qubit, n_qubit))
basis = [ket00, ket01, ket10, ket11]
```
The three density matrices play different roles in the optimization, and, as shown in the paper, convergence may improve significantly by weighing the states relatively to each other. For this example, we place a strong emphasis on the optimization $\Op{\rho}_1 \rightarrow \Op{O}^\dagger \Op{\rho}_1 \Op{O}$, by a factor of 20. This reflects that the hardest part of the optimization is identifying the basis in which the gate is diagonal. We will be using the real-part functional ($J_{T,\text{re}}$) to evaluate the success of $\Op{\rho}_i \rightarrow \Op{O}\Op{\rho}_i\Op{O}^\dagger$. Because $\Op{\rho}_1$ and $\Op{\rho}_3$ are mixed states, the Hilbert-Schmidt overlap will take values smaller than one in the optimal case. To compensate, we divide the weights by the purity of the respective states.
```
weights = np.array([20, 1, 1], dtype=np.float64)
weights *= len(weights) / np.sum(weights) # manual normalization
weights /= np.array([0.3, 1.0, 0.25]) # purities
```
The `krotov.gate_objectives` routine can initialize the density matrices $\Op{\rho}_1$, $\Op{\rho}_2$, $\Op{\rho}_3$ automatically, via the parameter `liouville_states_set`. Alternatively, we could also use the `'full'` basis of 16 matrices or the extended set of $d+1 = 5$ pure-state density matrices.
```
objectives = krotov.gate_objectives(
basis,
SQRTISWAP,
L,
liouville_states_set='3states',
weights=weights,
normalize_weights=False,
)
objectives
```
The use of `normalize_weights=False` is because we have included the purities in the weights, as discussed above.
## Dynamics under the Guess Pulse
For numerical efficiency, both for the analysis of the guess/optimized controls, we will use a stateful density matrix propagator:
A true physical measure for the success of the optimization is the "average gate fidelity". Evaluating the fidelity requires to simulate the dynamics of the full basis of Liouville space:
```
full_liouville_basis = [psi * phi.dag() for (psi, phi) in product(basis, basis)]
```
We propagate these under the guess control:
```
def propagate_guess(initial_state):
return objectives[0].mesolve(
tlist,
rho0=initial_state,
).states[-1]
full_states_T = parallel_map(
propagate_guess, values=full_liouville_basis,
)
print("F_avg = %.3f" % krotov.functionals.F_avg(full_states_T, basis, SQRTISWAP))
```
Note that we use $F_{T,\text{re}}$, not $F_{\text{avg}}$ to steer the optimization, as the Krotov boundary condition $\frac{\partial F_{\text{avg}}}{\partial \rho^\dagger}$ would be non-trivial.
Before doing the optimization, we can look the population dynamics under the guess pulse. For this purpose we propagate the pure-state density matrices corresponding to the canonical logical basis in Hilbert space, and obtain the expectation values for the projection onto these same states:
```
rho00, rho01, rho10, rho11 = [qutip.ket2dm(psi) for psi in basis]
def propagate_guess_for_expvals(initial_state):
return objectives[0].propagate(
tlist,
propagator=krotov.propagators.DensityMatrixODEPropagator(),
rho0=initial_state,
e_ops=[rho00, rho01, rho10, rho11]
)
def plot_population_dynamics(dyn00, dyn01, dyn10, dyn11):
fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(16, 8))
axs = np.ndarray.flatten(axs)
labels = ['00', '01', '10', '11']
dyns = [dyn00, dyn01, dyn10, dyn11]
for (ax, dyn, title) in zip(axs, dyns, labels):
for (i, label) in enumerate(labels):
ax.plot(dyn.times, dyn.expect[i], label=label)
ax.legend()
ax.set_title(title)
plt.show(fig)
plot_population_dynamics(
*parallel_map(
propagate_guess_for_expvals,
values=[rho00, rho01, rho10, rho11],
)
)
```
## Optimization
We now define the optimization parameters for the controls, the Krotov step size $\lambda_a$ and the update-shape that will ensure that the pulse switch-on and switch-off stays intact.
```
pulse_options = {
L[i][1]: dict(
lambda_a=1.0,
update_shape=partial(
krotov.shapes.flattop, t_start=0, t_stop=T, t_rise=(20 * ns))
)
for i in [1, 2]
}
```
Then we run the optimization for 2000 iterations
```
opt_result = krotov.optimize_pulses(
objectives,
pulse_options,
tlist,
propagator=krotov.propagators.DensityMatrixODEPropagator(reentrant=True),
chi_constructor=krotov.functionals.chis_re,
info_hook=krotov.info_hooks.print_table(J_T=krotov.functionals.J_T_re),
iter_stop=3,
)
```
(this takes a while)...
```
dumpfile = "./3states_opt_result.dump"
if os.path.isfile(dumpfile):
opt_result = krotov.result.Result.load(dumpfile, objectives)
else:
opt_result = krotov.optimize_pulses(
objectives,
pulse_options,
tlist,
propagator=krotov.propagators.DensityMatrixODEPropagator(reentrant=True),
chi_constructor=krotov.functionals.chis_re,
info_hook=krotov.info_hooks.print_table(J_T=krotov.functionals.J_T_re),
iter_stop=5,
continue_from=opt_result
)
opt_result.dump(dumpfile)
opt_result
```
## Optimization result
```
optimized_control = opt_result.optimized_controls[0] + 1j * opt_result.optimized_controls[1]
plot_pulse(np.abs(optimized_control), tlist)
def propagate_opt(initial_state):
return opt_result.optimized_objectives[0].propagate(
tlist,
propagator=krotov.propagators.DensityMatrixODEPropagator(),
rho0=initial_state,
).states[-1]
opt_full_states_T = parallel_map(
propagate_opt, values=full_liouville_basis,
)
print("F_avg = %.3f" % krotov.functionals.F_avg(opt_full_states_T, basis, SQRTISWAP))
def propagate_opt_for_expvals(initial_state):
return opt_result.optimized_objectives[0].propagate(
tlist,
propagator=krotov.propagators.DensityMatrixODEPropagator(),
rho0=initial_state,
e_ops=[rho00, rho01, rho10, rho11]
)
```
Plotting the population dynamics, we see the expected behavior for the $\sqrt{\text{iSWAP}}$ gate.
```
plot_population_dynamics(
*parallel_map(
propagate_opt_for_expvals,
values=[rho00, rho01, rho10, rho11],
)
)
def plot_convergence(result):
fig, ax = plt.subplots()
ax.semilogy(result.iters, result.info_vals)
ax.set_xlabel('OCT iteration')
ax.set_ylabel(r'optimization error $J_{T, re}$')
plt.show(fig)
plot_convergence(opt_result)
```
|
github_jupyter
|
# Working with Scikit-learn
This notebook shows how PySINDy objects interface with some useful tools from [Scikit-learn](https://scikit-learn.org/stable/).
## Setup
```
import numpy as np
from scipy.integrate import odeint
import pysindy as ps
```
Let's generate some training data from the [Lorenz system](https://en.wikipedia.org/wiki/Lorenz_system) with which to experiment.
```
def lorenz(z, t):
return [
10 * (z[1] - z[0]),
z[0] * (28 - z[2]) - z[1],
z[0] * z[1] - (8 / 3) * z[2]
]
# Generate training data
dt = .002
t_train = np.arange(0, 10, dt)
x0_train = [-8, 8, 27]
x_train = odeint(lorenz, x0_train, t_train)
# Evolve the Lorenz equations in time using a different initial condition
t_test = np.arange(0, 15, dt)
x0_test = np.array([8, 7, 15])
x_test = odeint(lorenz, x0_test, t_test)
```
## Cross-validation
PySINDy supports Scikit-learn-type cross-validation with a few caveats.
1. We must use **uniform timesteps** using the `t_default` parameter. This is because the `fit` and `score` methods of `SINDy` differ from those used in Scikit-learn in the sense that they both have an optional `t` parameter. Setting `t_default` is a workaround.
2. We have to be careful about the way we split up testing and training data during cross-validation. Because the `SINDy` object needs to differentiate the data, we need the training and test data to consist of sequential intervals of time. If we randomly sample the data, then the computed derivatives will be horribly inaccurate. Luckily, Scikit-learn has a `TimeSeriesSplit` object for such situations. If we really want to randomly sample the data during cross-validation, there is a way to do so. However, it's more complicated.
Note that we need to prepend `optimizer__`, `feature_library__`, or `differentiation_method__` to the parameter names.
### Cross-validation with TimeSeriesSplit
```
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import TimeSeriesSplit
model = ps.SINDy(t_default=dt)
param_grid = {
"optimizer__threshold": [0.001, 0.01, 0.1],
"optimizer__alpha": [0.01, 0.05, 0.1],
"feature_library": [ps.PolynomialLibrary(), ps.FourierLibrary()],
"differentiation_method__order": [1, 2]
}
search = GridSearchCV(
model,
param_grid,
cv=TimeSeriesSplit(n_splits=5)
)
search.fit(x_train)
print("Best parameters:", search.best_params_)
search.best_estimator_.print()
```
### Cross-validation without TimeSeriesSplit
If we want to use another cross-validation splitter, we'll need to (a) define a wrapper class which uses the argument "y" instead of "x_dot" and (b) precompute the derivatives. Note that (b) means that we will not be able to perform cross-validation on the parameters of the differentiation method.
```
from sklearn.metrics import r2_score
class SINDyCV(ps.SINDy):
def __init__(
self,
optimizer=None,
feature_library=None,
differentiation_method=None,
feature_names=None,
t_default=1,
discrete_time=False,
n_jobs=1
):
super(SINDyCV, self).__init__(
optimizer=optimizer,
feature_library=feature_library,
differentiation_method=differentiation_method,
feature_names=feature_names,
t_default=t_default,
discrete_time=discrete_time,
n_jobs=n_jobs
)
def fit(self, x, y, **kwargs):
return super(SINDyCV, self).fit(x, x_dot=y, **kwargs)
def score(
self,
x,
y,
t=None,
u=None,
multiple_trajectories=False,
metric=r2_score,
**metric_kws
):
return super(SINDyCV, self).score(
x,
x_dot=y,
t=t,
u=u,
multiple_trajectories=multiple_trajectories,
metric=metric,
**metric_kws
)
from sklearn.model_selection import ShuffleSplit
model = SINDyCV()
x_dot = model.differentiate(x_train, t=t_train)
param_grid = {
"optimizer__threshold": [0.002, 0.01, 0.1],
"optimizer__alpha": [0.01, 0.05, 0.1],
"feature_library__degree": [1, 2, 3],
}
search = GridSearchCV(
model,
param_grid,
cv=ShuffleSplit(n_splits=3, test_size=0.25)
)
search.fit(x_train, y=x_dot)
print("Best parameters:", search.best_params_)
search.best_estimator_.print()
```
## Sparse optimizers
Any of Scikit-learn's [linear models ](https://scikit-learn.org/stable/modules/linear_model.html) can be used for the `optimizer` parameter of a `SINDy` object, though we only recommend using those designed for sparse regression.
In the examples below we set `fit_intercept` to `False` since the default feature library (polynomials of degree up to two) already includes constant functions.
```
from sklearn.linear_model import ElasticNet
model = ps.SINDy(optimizer=ElasticNet(l1_ratio=0.9, fit_intercept=False), t_default=dt)
model.fit(x_train)
model.print()
from sklearn.linear_model import OrthogonalMatchingPursuit
model = ps.SINDy(
optimizer=OrthogonalMatchingPursuit(n_nonzero_coefs=8, fit_intercept=False),
t_default=dt
)
model.fit(x_train)
model.print()
```
|
github_jupyter
|
# An Introduction to Natural Language in Python using spaCy
## Introduction
This tutorial provides a brief introduction to working with natural language (sometimes called "text analytics") in Pytho, using [spaCy](https://spacy.io/) and related libraries.
Data science teams in industry must work with lots of text, one of the top four categories of data used in machine learning.
Usually that's human-generated text, although not always.
Think about it: how does the "operating system" for business work? Typically, there are contracts (sales contracts, work agreements, partnerships), there are invoices, there are insurance policies, there are regulations and other laws, and so on.
All of those are represented as text.
You may run across a few acronyms: _natural language processing_ (NLP), _natural language understanding_ (NLU), _natural language generation_ (NLG) — which are roughly speaking "read text", "understand meaning", "write text" respectively.
Increasingly these tasks overlap and it becomes difficult to categorize any given feature.
The _spaCy_ framework — along with a wide and growing range of plug-ins and other integrations — provides features for a wide range of natural language tasks.
It's become one of the most widely used natural language libraries in Python for industry use cases, and has quite a large community — and with that, much support for commercialization of research advances as this area continues to evolve rapidly.
## Getting Started
Check out the excellent _spaCy_ [installation notes](https://spacy.io/usage) for a "configurator" which generates installation commands based on which platforms and natural languages you need to support.
Some people tend to use `pip` while others use `conda`, and there are instructions for both. For example, to get started with _spaCy_ working with text in English and installed via `conda` on a Linux system:
```
conda install -c conda-forge spacy
python -m spacy download en_core_web_sm
```
BTW, the second line above is a download for language resources (models, etc.) and the `_sm` at the end of the download's name indicates a "small" model. There's also "medium" and "large", albeit those are quite large. Some of the more advanced features depend on the latter, although we won't quite be diving to the bottom of that ocean in this (brief) tutorial.
Now let's load _spaCy_ and run some code:
```
import spacy
nlp = spacy.load("en_core_web_sm")
```
That `nlp` variable is now your gateway to all things _spaCy_ and loaded with the `en_core_web_sm` small model for English.
Next, let's run a small "document" through the natural language parser:
```
text = "The rain in Spain falls mainly on the plain."
doc = nlp(text)
for token in doc:
print(token.text, token.lemma_, token.pos_, token.is_stop)
```
First we created a [doc](https://spacy.io/api/doc) from the text, which is a container for a document and all of its annotations. Then we iterated through the document to see what _spaCy_ had parsed.
Good, but it's a lot of info and a bit difficult to read. Let's reformat the _spaCy_ parse of that sentence as a [pandas](https://pandas.pydata.org/) dataframe:
```
import pandas as pd
cols = ("text", "lemma", "POS", "explain", "stopword")
rows = []
for t in doc:
row = [t.text, t.lemma_, t.pos_, spacy.explain(t.pos_), t.is_stop]
rows.append(row)
df = pd.DataFrame(rows, columns=cols)
df
```
Much more readable!
In this simple case, the entire document is merely one short sentence.
For each word in that sentence _spaCy_ has created a [token](https://spacy.io/api/token), and we accessed fields in each token to show:
- raw text
- [lemma](https://en.wikipedia.org/wiki/Lemma_(morphology)) – a root form of the word
- [part of speech](https://en.wikipedia.org/wiki/Part_of_speech)
- a flag for whether the word is a _stopword_ – i.e., a common word that may be filtered out
Next let's use the [displaCy](https://ines.io/blog/developing-displacy) library to visualize the parse tree for that sentence:
```
from spacy import displacy
displacy.render(doc, style="dep", jupyter=True)
```
Does that bring back memories of grade school? Frankly, for those of us coming from more of a computational linguistics background, that diagram sparks joy.
But let's backup for a moment. How do you handle multiple sentences?
There are features for _sentence boundary detection_ (SBD) – also known as _sentence segmentation_ – based on the builtin/default [sentencizer](https://spacy.io/api/sentencizer):
```
text = "We were all out at the zoo one day, I was doing some acting, walking on the railing of the gorilla exhibit. I fell in. Everyone screamed and Tommy jumped in after me, forgetting that he had blueberries in his front pocket. The gorillas just went wild."
doc = nlp(text)
for sent in doc.sents:
print(">", sent)
```
When _spaCy_ creates a document, it uses a principle of _non-destructive tokenization_ meaning that the tokens, sentences, etc., are simply indexes into a long array. In other words, they don't carve the text stream into little pieces. So each sentence is a [span](https://spacy.io/api/span) with a _start_ and an _end_ index into the document array:
```
for sent in doc.sents:
print(">", sent.start, sent.end)
```
We can index into the document array to pull out the tokens for one sentence:
```
doc[48:54]
```
Or simply index into a specific token, such as the verb `went` in the last sentence:
```
token = doc[51]
print(token.text, token.lemma_, token.pos_)
```
At this point we can parse a document, segment that document into sentences, then look at annotations about the tokens in each sentence. That's a good start.
## Acquiring Text
Now that we can parse texts, where do we get texts?
One quick source is to leverage the interwebs.
Of course when we download web pages we'll get HTML, and then need to extract text from them.
[Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) is a popular package for that.
First, a little housekeeping:
```
import sys
import warnings
warnings.filterwarnings("ignore")
```
### Character Encoding
The following shows examples of how to use [codecs](https://docs.python.org/3/library/codecs.html) and [normalize unicode](https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize). NB: the example text comes from the article "[Metal umlat](https://en.wikipedia.org/wiki/Metal_umlaut)".
```
x = "Rinôçérôse screams flow not unlike an encyclopædia, \
'TECHNICIÄNS ÖF SPÅCE SHIP EÅRTH THIS IS YÖÜR CÄPTÅIN SPEÄKING YÖÜR ØÅPTÅIN IS DEA̋D' to Spın̈al Tap."
type(x)
```
The variable `x` is a *string* in Python:
```
repr(x)
```
Its translation into [ASCII](http://www.asciitable.com/) is unusable by parsers:
```
ascii(x)
```
Encoding as [UTF-8](http://unicode.org/faq/utf_bom.html) doesn't help much:
```
x.encode('utf8')
```
Ignoring difficult characters is perhaps an even worse strategy:
```
x.encode('ascii', 'ignore')
```
However, one can *normalize* text, then encode…
```
import unicodedata
unicodedata.normalize('NFKD', x).encode('ascii','ignore')
```
Even before this normalization and encoding, you may need to convert some characters explicitly **before** parsing. For example:
```
x = "The sky “above” the port … was the color of ‘cable television’ – tuned to the Weather Channel®"
ascii(x)
```
Consider the results for that line:
```
unicodedata.normalize('NFKD', x).encode('ascii', 'ignore')
```
...which still drops characters that may be important for parsing a sentence.
So a more advanced approach could be:
```
x = x.replace('“', '"').replace('”', '"')
x = x.replace("‘", "'").replace("’", "'")
x = x.replace('…', '...').replace('–', '-')
x = unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('utf-8')
print(x)
```
### Parsing HTML
In the following function `get_text()` we'll parse the HTML to find all of the `<p/>` tags, then extract the text for those:
```
from bs4 import BeautifulSoup
import requests
import traceback
def get_text (url):
buf = []
try:
soup = BeautifulSoup(requests.get(url).text, "html.parser")
for p in soup.find_all("p"):
buf.append(p.get_text())
return "\n".join(buf)
except:
print(traceback.format_exc())
sys.exit(-1)
```
Now let's grab some text from online sources.
We can compare open source licenses hosted on the [Open Source Initiative](https://opensource.org/licenses/) site:
```
lic = {}
lic["mit"] = nlp(get_text("https://opensource.org/licenses/MIT"))
lic["asl"] = nlp(get_text("https://opensource.org/licenses/Apache-2.0"))
lic["bsd"] = nlp(get_text("https://opensource.org/licenses/BSD-3-Clause"))
for sent in lic["bsd"].sents:
print(">", sent)
```
One common use case for natural language work is to compare texts. For example, with those open source licenses we can download their text, parse, then compare [similarity](https://spacy.io/api/doc#similarity) metrics among them:
```
pairs = [
["mit", "asl"],
["asl", "bsd"],
["bsd", "mit"]
]
for a, b in pairs:
print(a, b, lic[a].similarity(lic[b]))
```
That is interesting, since the [BSD](https://opensource.org/licenses/BSD-3-Clause) and [MIT](https://opensource.org/licenses/MIT) licenses appear to be the most similar documents.
In fact they are closely related.
Admittedly, there was some extra text included in each document due to the OSI disclaimer in the footer – but this provides a reasonable approximation for comparing the licenses.
## Natural Language Understanding
Now let's dive into some of the _spaCy_ features for NLU.
Given that we have a parse of a document, from a purely grammatical standpoint we can pull the [noun chunks](https://spacy.io/usage/linguistic-features#noun-chunks), i.e., each of the noun phrases:
```
text = "Steve Jobs and Steve Wozniak incorporated Apple Computer on January 3, 1977, in Cupertino, California."
doc = nlp(text)
for chunk in doc.noun_chunks:
print(chunk.text)
```
Not bad. The noun phrases in a sentence generally provide more information content – as a simple filter used to reduce a long document into a more "distilled" representation.
We can take this approach further and identify [named entities](https://spacy.io/usage/linguistic-features#named-entities) within the text, i.e., the proper nouns:
```
for ent in doc.ents:
print(ent.text, ent.label_)
```
The _displaCy_ library provides an excellent way to visualize named entities:
```
displacy.render(doc, style="ent", jupyter=True)
```
If you're working with [knowledge graph](https://www.akbc.ws/) applications and other [linked data](http://linkeddata.org/), your challenge is to construct links between the named entities in a document and other related information for the entities – which is called [entity linking](http://nlpprogress.com/english/entity_linking.html).
Identifying the named entities in a document is the first step in this particular kind of AI work.
For example, given the text above, one might link the `Steve Wozniak` named entity to a [lookup in DBpedia](http://dbpedia.org/page/Steve_Wozniak).
In more general terms, one can also link _lemmas_ to resources that describe their meanings.
For example, in an early section we parsed the sentence `The gorillas just went wild` and were able to show that the lemma for the word `went` is the verb `go`. At this point we can use a venerable project called [WordNet](https://wordnet.princeton.edu/) which provides a lexical database for English – in other words, it's a computable thesaurus.
There's a _spaCy_ integration for WordNet called
[spacy-wordnet](https://github.com/recognai/spacy-wordnet) by [Daniel Vila Suero](https://twitter.com/dvilasuero), an expert in natural language and knowledge graph work.
Then we'll load the WordNet data via NLTK (these things happen):
```
import nltk
nltk.download("wordnet")
```
Note that _spaCy_ runs as a "pipeline" and allows means for customizing parts of the pipeline in use.
That's excellent for supporting really interesting workflow integrations in data science work.
Here we'll add the `WordnetAnnotator` from the _spacy-wordnet_ project:
```
!pip install spacy-wordnet
from spacy_wordnet.wordnet_annotator import WordnetAnnotator
print("before", nlp.pipe_names)
if "WordnetAnnotator" not in nlp.pipe_names:
nlp.add_pipe(WordnetAnnotator(nlp.lang), after="tagger")
print("after", nlp.pipe_names)
```
Within the English language, some words are infamous for having many possible meanings. For example, click through the results online in a [WordNet](http://wordnetweb.princeton.edu/perl/webwn?s=star&sub=Search+WordNet&o2=&o0=1&o8=1&o1=1&o7=&o5=&o9=&o6=&o3=&o4=&h=) search to find the meanings related to the word `withdraw`.
Now let's use _spaCy_ to perform that lookup automatically:
```
token = nlp("withdraw")[0]
token._.wordnet.synsets()
token._.wordnet.lemmas()
token._.wordnet.wordnet_domains()
```
Again, if you're working with knowledge graphs, those "word sense" links from WordNet could be used along with graph algorithms to help identify the meanings for a particular word. That can also be used to develop summaries for larger sections of text through a technique called _summarization_. It's beyond the scope of this tutorial, but an interesting application currently for natural language in industry.
Going in the other direction, if you know _a priori_ that a document was about a particular domain or set of topics, then you can constrain the meanings returned from _WordNet_. In the following example, we want to consider NLU results that are within Finance and Banking:
```
domains = ["finance", "banking"]
sentence = nlp("I want to withdraw 5,000 euros.")
enriched_sent = []
for token in sentence:
# get synsets within the desired domains
synsets = token._.wordnet.wordnet_synsets_for_domain(domains)
if synsets:
lemmas_for_synset = []
for s in synsets:
# get synset variants and add to the enriched sentence
lemmas_for_synset.extend(s.lemma_names())
enriched_sent.append("({})".format("|".join(set(lemmas_for_synset))))
else:
enriched_sent.append(token.text)
print(" ".join(enriched_sent))
```
That example may look simple but, if you play with the `domains` list, you'll find that the results have a kind of combinatorial explosion when run without reasonable constraints.
Imagine having a knowledge graph with millions of elements: you'd want to constrain searches where possible to avoid having every query take days/weeks/months/years to compute.
Sometimes the problems encountered when trying to understand a text – or better yet when trying to understand a _corpus_ (a dataset with many related texts) – become so complex that you need to visualize it first.
Here's an interactive visualization for understanding texts: [scattertext](https://spacy.io/universe/project/scattertext), a product of the genius of [Jason Kessler](https://twitter.com/jasonkessler).
To install:
```
conda install -c conda-forge scattertext
```
Let's analyze text data from the party conventions during the 2012 US Presidential elections. It may take a minute or two to run, but the results from all that number crunching is worth the wait.
```
!pip install scattertext
import scattertext as st
if "merge_entities" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("merge_entities"))
if "merge_noun_chunks" not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe("merge_noun_chunks"))
convention_df = st.SampleCorpora.ConventionData2012.get_data()
corpus = st.CorpusFromPandas(convention_df,
category_col="party",
text_col="text",
nlp=nlp).build()
```
Once you have the `corpus` ready, generate an interactive visualization in HTML:
```
html = st.produce_scattertext_explorer(
corpus,
category="democrat",
category_name="Democratic",
not_category_name="Republican",
width_in_pixels=1000,
metadata=convention_df["speaker"]
)
```
Now we'll render the HTML – give it a minute or two to load, it's worth the wait...
```
from IPython.display import IFrame
from IPython.core.display import display, HTML
import sys
IN_COLAB = "google.colab" in sys.modules
print(IN_COLAB)
```
**NB: use the following cell on Google Colab:**
```
if IN_COLAB:
display(HTML("<style>.container { width:98% !important; }</style>"))
display(HTML(html))
```
**NB: use the following cell instead on Jupyter in general:**
```
file_name = "foo.html"
with open(file_name, "wb") as f:
f.write(html.encode("utf-8"))
IFrame(src=file_name, width = 1200, height=700)
```
Imagine if you had text from the past three years of customer support for a particular product in your organization. Suppose your team needed to understand how customers have been talking about the product? This _scattertext_ library might come in quite handy! You could cluster (k=2) on _NPS scores_ (a customer evaluation metric) then replace the Democrat/Republican dimension with the top two components from the clustering.
## Summary
Five years ago, if you’d asked about open source in Python for natural language, a default answer from many people working in data science would've been [NLTK](https://www.nltk.org/).
That project includes just about everything but the kitchen sink and has components which are relatively academic.
Another popular natural language project is [CoreNLP](https://stanfordnlp.github.io/CoreNLP/) from Stanford.
Also quite academic, albeit powerful, though _CoreNLP_ can be challenging to integrate with other software for production use.
Then a few years ago everything in this natural language corner of the world began to change.
The two principal authors for _spaCy_ -- [Matthew Honnibal](https://twitter.com/honnibal) and [Ines Montani](https://twitter.com/_inesmontani) -- launched the project in 2015 and industry adoption was rapid.
They focused on an _opinionated_ approach (do what's needed, do it well, no more, no less) which provided simple, rapid integration into data science workflows in Python, as well as faster execution and better accuracy than the alternatives.
Based on those priorities, _spaCy_ become sort of the opposite of _NLTK_.
Since 2015, _spaCy_ has consistently focused on being an open source project (i.e., depending on its community for directions, integrations, etc.) and being commercial-grade software (not academic research).
That said, _spaCy_ has been quick to incorporate the SOTA advances in machine learning, effectively becoming a conduit for moving research into industry.
It's important to note that machine learning for natural language got a big boost during the mid-2000's as Google began to win international language translation competitions.
Another big change occurred during 2017-2018 when, following the many successes of _deep learning_, those approaches began to out-perform previous machine learning models.
For example, see the [ELMo](https://arxiv.org/abs/1802.05365) work on _language embedding_ by Allen AI, followed by [BERT](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) from Google, and more recently [ERNIE](https://medium.com/syncedreview/baidus-ernie-tops-google-s-bert-in-chinese-nlp-tasks-d6a42b49223d) by Baidu -- in other words, the search engine giants of the world have gifted the rest of us with a Sesame Street repertoire of open source embedded language models based on deep learning, which is now _state of the art_ (SOTA).
Speaking of which, to keep track of SOTA for natural language, keep an eye on [NLP-Progress](http://nlpprogress.com/) and [Papers with Code](https://paperswithcode.com/sota).
The use cases for natural language have shifted dramatically over the past two years, after deep learning techniques arose to the fore.
Circa 2014, a natural language tutorial in Python might have shown _word count_ or _keyword search_ or _sentiment detection_ where the target use cases were relatively underwhelming.
Circa 2019 we're talking about analyzing thousands of documents for vendor contracts in an industrial supply chain optimization ... or hundreds of millions of documents for policy holders of an insurance company, or gazillions of documents regarding financial disclosures.
More contemporary natural language work tends to be in NLU, often to support construction of _knowledge graphs,_ and increasingly in NLG where large numbers of similar documents can be summarized at human scale.
The [spaCy Universe](https://spacy.io/universe) is a great place to check for deep-dives into particular use cases, and to see how this field is evolving. Some selections from this "universe" include:
- [Blackstone](https://spacy.io/universe/project/blackstone) – parsing unstructured legal texts
- [Kindred](https://spacy.io/universe/project/kindred) – extracting entities from biomedical texts (e.g., Pharma)
- [mordecai](https://spacy.io/universe/project/mordecai) – parsing geographic information
- [Prodigy](https://spacy.io/universe/project/prodigy) – human-in-the-loop annotation to label datasets
- [spacy-raspberry](https://spacy.io/universe/project/spacy-raspberry) – Raspberry PI image for running _spaCy_ and deep learning on edge devices
- [Rasa NLU](https://spacy.io/universe/project/rasa) – Rasa integration for voice apps
Also, a couple super new items to mention:
- [spacy-pytorch-transformers](https://explosion.ai/blog/spacy-pytorch-transformers) to fine tune (i.e., use _transfer learning_ with) the Sesame Street characters and friends: BERT, GPT-2, XLNet, etc.
- [spaCy IRL 2019](https://irl.spacy.io/2019/) conference – check out videos from the talks!
There's so much more that can be done with _spaCy_ – hopefully this tutorial provides an introduction. We wish you all the best in your natural language work.
|
github_jupyter
|
```
%load_ext watermark
%watermark -d -u -a 'Andreas Mueller, Kyle Kastner, Sebastian Raschka' -v -p numpy,scipy,matplotlib,scikit-learn
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
# SciPy 2016 Scikit-learn Tutorial
# In Depth - Support Vector Machines
SVM stands for "support vector machines". They are efficient and easy to use estimators.
They come in two kinds: SVCs, Support Vector Classifiers, for classification problems, and SVRs, Support Vector Regressors, for regression problems.
## Linear SVMs
The SVM module contains LinearSVC, which we already discussed briefly in the section on linear models.
Using ``SVC(kernel="linear")`` will also yield a linear predictor that is only different in minor technical aspects.
## Kernel SVMs
The real power of SVMs lies in using kernels, which allow for non-linear decision boundaries. A kernel defines a similarity measure between data points. The most common are:
- **linear** will give linear decision frontiers. It is the most computationally efficient approach and the one that requires the least amount of data.
- **poly** will give decision frontiers that are polynomial. The order of this polynomial is given by the 'order' argument.
- **rbf** uses 'radial basis functions' centered at each support vector to assemble a decision frontier. The size of the RBFs ultimately controls the smoothness of the decision frontier. RBFs are the most flexible approach, but also the one that will require the largest amount of data.
Predictions in a kernel-SVM are made using the formular
$$
\hat{y} = \text{sign}(\alpha_0 + \sum_{j}\alpha_j y_j k(\mathbf{x^{(j)}}, \mathbf{x}))
$$
where $\mathbf{x}^{(j)}$ are training samples, $\mathbf{y}^{(j)}$ the corresponding labels, $\mathbf{x}$ is a test-sample to predict on, $k$ is the kernel, and $\alpha$ are learned parameters.
What this says is "if $\mathbf{x}$ is similar to $\mathbf{x}^{(j)}$ then they probably have the same label", where the importance of each $\mathbf{x}^{(j)}$ for this decision is learned. [Or something much less intuitive about an infinite dimensional Hilbert-space]
Often only few samples have non-zero $\alpha$, these are called the "support vectors" from which SVMs get their name.
These are the most discriminant samples.
The most important parameter of the SVM is the regularization parameter $C$, which bounds the influence of each individual sample:
- Low C values: many support vectors... Decision frontier = mean(class A) - mean(class B)
- High C values: small number of support vectors: Decision frontier fully driven by most discriminant samples
The other important parameters are those of the kernel. Let's look at the RBF kernel in more detail:
$$k(\mathbf{x}, \mathbf{x'}) = \exp(-\gamma ||\mathbf{x} - \mathbf{x'}||^2)$$
```
from sklearn.metrics.pairwise import rbf_kernel
line = np.linspace(-3, 3, 100)[:, np.newaxis]
kernel_value = rbf_kernel(line, [[0]], gamma=1)
plt.plot(line, kernel_value);
```
The rbf kernel has an inverse bandwidth-parameter gamma, where large gamma mean a very localized influence for each data point, and
small values mean a very global influence.
Let's see these two parameters in action:
```
from figures import plot_svm_interactive
plot_svm_interactive()
```
## Exercise: tune a SVM on the digits dataset
```
from sklearn.datasets import load_digits
from sklearn.svm import SVC
digits = load_digits()
X_digits, y_digits = digits.data, digits.target
# split the dataset, apply grid-search
#%load solutions/18_svc_grid.py
```
|
github_jupyter
|
# Think Bayes
This notebook presents example code and exercise solutions for Think Bayes.
Copyright 2018 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import classes from thinkbayes2
from thinkbayes2 import Hist, Pmf, Suite, Beta
import thinkplot
```
## Unreliable observation
Suppose that instead of observing coin tosses directly, you measure the outcome using an instrument that is not always correct. Specifically, suppose there is a probability `y` that an actual heads is reported as tails, or actual tails reported as heads.
Write a class that estimates the bias of a coin given a series of outcomes and the value of `y`.
How does the spread of the posterior distribution depend on `y`?
```
# Solution
# Here's a class that models an unreliable coin
class UnreliableCoin(Suite):
def __init__(self, prior, y):
"""
prior: seq or map
y: probability of accurate measurement
"""
super().__init__(prior)
self.y = y
def Likelihood(self, data, hypo):
"""
data: outcome of unreliable measurement, either 'H' or 'T'
hypo: probability of heads, 0-100
"""
x = hypo / 100
y = self.y
if data == 'H':
return x*y + (1-x)*(1-y)
else:
return x*(1-y) + (1-x)*y
# Solution
# Now let's initialize an UnreliableCoin with `y=0.9`:
prior = range(0, 101)
suite = UnreliableCoin(prior, y=0.9)
thinkplot.Pdf(suite)
# Solution
# And update with 3 heads and 7 tails.
for outcome in 'HHHTTTTTTT':
suite.Update(outcome)
thinkplot.Pdf(suite)
# Solution
# Now let's try it out with different values of `y`:
def plot_posterior(y, data):
prior = range(0, 101)
suite = UnreliableCoin(prior, y=y)
for outcome in data:
suite.Update(outcome)
thinkplot.Pdf(suite, label='y=%g' % y)
# Solution
# The posterior distribution gets wider as the measurement gets less reliable.
data = 'HHHTTTTTTT'
plot_posterior(1, data)
plot_posterior(0.8, data)
plot_posterior(0.6, data)
thinkplot.decorate(xlabel='Probability of heads (x)',
ylabel='PMF')
# Solution
# At `y=0.5`, the measurement provides no information, so the posterior equals the prior:
plot_posterior(0.5, data)
thinkplot.decorate(xlabel='Probability of heads (x)',
ylabel='PMF')
# Solution
# As the coin gets less reliable (below `y=0.5`) the distribution gets narrower again.
# In fact, a measurement with `y=0` is just as good as one with `y=1`,
# provided that we know what `y` is.
plot_posterior(0.4, data)
plot_posterior(0.2, data)
plot_posterior(0.0, data)
thinkplot.decorate(xlabel='Probability of heads (x)',
ylabel='PMF')
```
|
github_jupyter
|
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# YahooFinance - Get Stock Update
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/YahooFinance/YahooFinance_Get_Stock_Update.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
**Tags:** #yahoofinance #usdinr #plotly #investors #analytics #automation #plotly
**Author:** [Megha Gupta](https://github.com/megha2907)
Description: With this template you will get INR USD rate visualized on a chart
## Input
### Import Libraries
```
import naas
from naas_drivers import yahoofinance, plotly
import markdown2
from IPython.display import Markdown as md
```
### Setup Yahoo parameters
👉 Here you can input:<br>
- yahoo ticker : get tickers <a href='https://finance.yahoo.com/trending-tickers?.tsrc=fin-srch'>here</a>
- date from
- date to
```
TICKER = 'INR=X'
date_from = -30
date_to = 'today'
```
### Setup your email parameters
👉 Here you can input your sender email and destination email
Note: emails are sent from [email protected] by default
```
email_to = ["[email protected]"]
email_from = None
```
## Model
### Get the data from yahoo finance using naas drivers
```
#data cleaning
df = yahoofinance.get(TICKER, date_from=date_from, date_to = date_to)
df = df.dropna()# drop the na values from the dataframe
df.reset_index(drop=True)
df = df.sort_values("Date", ascending=False).reset_index(drop=True)
df.head()
```
### Extract value from data
```
LASTOPEN = round(df.loc[0, "Open"], 2)
LASTCLOSE = round(df.loc[0, "Close"], 2)
YESTERDAYOPEN = round(df.loc[1, "Open"], 2)
YESTERDAYCLOSE = round(df.loc[1, "Close"], 2)
MAXRATE = round(df['Open'].max(),2)
MXDATEOPEN = df.loc[df['Open'].idxmax(), "Date"].strftime("%Y-%m-%d")
MINRATE = round(df['Open'].min(),2)
MNDATEOPEN = df.loc[df['Open'].idxmin(), "Date"].strftime("%Y-%m-%d")
```
### Plot the data
```
last_date = df.loc[df.index[0], "Date"].strftime("%Y-%m-%d")
output = plotly.linechart(df,
x="Date",
y=['Open','Close'],
title=f"<b>INR USD rates of last month</b><br><span style='font-size: 13px;'>Last value as of {last_date}: Open={LASTOPEN}, Close={LASTCLOSE}</span>")
```
## Output
### Save the dataset in csv
```
df.to_csv(f"{TICKER}_LastMonth.csv", index=False)
```
### Create markdown template
```
%%writefile message.md
Hello world,
The **TICKER** price is Open LASTOPEN and Close LASTCLOSE right now. <br>
**Yesterday Open**: YESTERDAYOPEN <br>
**Yesterday Close**: YESTERDAYCLOSE <br>
The Max Open rate of **TICKER** was on MXDATEOPEN which was MAXRATE. <br>
The Min Open rate of **TICKER** was on MNDATEOPEN which was MINRATE. <br>
Attached is the excel file for your reference. <br>
Have a nice day.
<br>
PS: You can [send the email again](link_webhook) if you need a fresh update.<br>
<div><strong>Full Name</strong></div>
<div>Open source lover | <a href="http://www.naas.ai/" target="_blank">Naas</a></div>
<div>+ 33 1 23 45 67 89</div>
<div><small>This is an automated email from my Naas account</small></div>
```
### Add email template as dependency
```
naas.dependency.add("message.md")
```
### Replace values in template
```
markdown_file = "message.md"
content = open(markdown_file, "r").read()
md = markdown2.markdown(content)
md
post = md.replace("LASTOPEN", str(LASTOPEN))
post = post.replace("LASTCLOSE", str(LASTCLOSE))
post = post.replace("YESTERDAYOPEN", str(YESTERDAYOPEN))
post = post.replace("YESTERDAYCLOSE", str(YESTERDAYCLOSE))
post = post.replace("MXDATEOPEN", str(MXDATEOPEN))
post = post.replace("MAXRATE", str(MAXRATE))
post = post.replace("MNDATEOPEN", str(MNDATEOPEN))
post = post.replace("MINRATE", str(MINRATE))
post = post.replace("TICKER", str(TICKER))
post
```
### Add webhook to run your notebook again
```
link_webhook = naas.webhook.add()
```
### Send by email
```
subject = f"📈 {TICKER} Open and close rates as of today"
content = post
files = [f"{TICKER}_LastMonth.csv"]
naas.notification.send(email_to=email_to,
subject=subject,
html=content,
email_from=email_from,
files=files)
```
### Schedule your notebook
Please uncomment and run the cell below to schedule your notebook everyday at 8:00 during business days
```
# import naas
# naas.scheduler.add("0 8 1-5 * *")
```
|
github_jupyter
|
Before we begin, let's execute the cell below to display information about the CUDA driver and GPUs running on the server by running the `nvidia-smi` command. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.
```
!nvidia-smi
```
## Learning objectives
The **goal** of this lab is to:
- Dig deeper into kernels by analyzing it with Nsight Compute
In the previous section, we learned to optimize the parallel [RDF](../serial/rdf_overview.ipynb) application using OpenACC. Moreover, we used NVIDIA Nsight Systems to get a system-wide performance analysis. Now, let's dig deeper and profile the kernel with the Nsight Compute profiler to get detailed performance metrics and find out how the OpenACC is mapped at the Compute Unified Device Architecture(CUDA) hardware level. Note: You will get a better understanding of the GPU architecture in the CUDA notebooks.
To do this, let's use the [solution](../../source_code/openacc/SOLUTION/rdf_collapse.f90) as a reference to get a similar report from Nsight Compute. Run the application, and profile it with the Nsight Systems first.
Now, let's compile, and profile it with Nsight Systems first.
```
#compile the solution for Tesla GPU
!cd ../../source_code/openacc && nvfortran -acc -ta=tesla,lineinfo -Minfo=accel -o rdf nvtx.f90 SOLUTION/rdf_collapse.f90 -L/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/cuda/11.2/lib64 -lnvToolsExt
#profile the solution with Nsight Systems
!cd ../../source_code/openacc && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o rdf_collapse_solution ./rdf
```
Let's checkout the profiler's report. Download and save the report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../../source_code/openacc/rdf_collapse_solution.qdrep) and open it via the GUI. Now, right click on the kernel `rdf_98_gpu` and click on "Analyze the Selected Kernel with NVIDIA Nsight Compute" (see below screenshot).
<img src="../images/f_compute_analyz.png">
Then, make sure to tick the radio button next to "Display the command line to user NVIDIA Nsight Compute CLI".
<img src="../images/compute_command_line.png" width="50%" height="50%">
Then, you simply copy the command, run it and analyze the selected kernel.
<img src="../images/f_compute_command.png" width="50%" height="50%">
To profile the selected kernel, run the below cell (by adding `--set full` we make sure to capture all the sections in Nsight Compute profiler):
```
#profile the selected kernel in the solution with Nsight compute
!cd ../../source_code/openacc && ncu --set full --launch-skip 1 --launch-count 1 -o rdf_collapse_solution ./rdf
```
Let's checkout the Nsight Compute profiler's report together. Download and save the report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../../source_code/openacc/rdf_collapse_solution.ncu-rep) and open it via the GUI. Let's checkout the first section called "GPU Speed Of Light". This section gives an overview of the utilization for compute and memory resources on the GPU. As you can see from the below screenshot, the Speed of Light (SOL) reports the achieved percentage of utilization of 30.04% for SM and 70.10% for memory.
<img src="../images/f_sol.png">
**Extra**: If you can use the baseline feature on the Nsight Compute and compare the analysis of the kernel from this version of the RDF (which uses data directives and collapse clause) with the very first parallel version where we only added parallel directives and used managed memory, you can see how much improvements we got (see the below screenshot for reference):
<img src="../images/f_sol_baseline.png">
It is clear that we were able to reduce the execution time to half(red rectangle) and increase the SM and memory utilization (green rectangle). However, as you see the device is still underutilized. Let's look at the roofline analysis which indicates that the application is bandwith bound and the kernel exhibits low compute throughput and memory is more heavily utilized than Compute and it is clear the the memory is the bottleneck.
<img src="../images/f_roofline_collapse.png">
The Nsight Compute profiler suggests us to checkout the "Memory Workload Analysis" report sections to see where the memory system bottleneck is. There are 9.85 M instructions loading from or storing to the global memory space. The link going from L1/TEX Cache to Global shows 8.47 M requests generated due to global load instructions.
<img src="../images/f_memory_collapse.png">
Let's have a look at the table showing L1/TEX Cache. The "Sectors/Req" column shows the average ratio of sectors to requests for the L1 cache. For the same number of active threads in a warp, smaller numbers imply a more efficient memory access pattern. For warps with 32 active threads, the optimal ratios per access size are: `32-bit: 4`, `64-bit: 8`, `128-bit: 16`. Smaller ratios indicate some degree of uniformity or overlapped loads within a cache line. Checkout the [GPU Architecture Terminologies](../GPU_Architecture_Terminologies.ipynb) notebook to learn more about threads and warps.
In the example screenshot, we can see that this number is higher. This implies uncoalesced memory accesses and will result in increased memory traffic. We are not efficiently utilizing the bytes transferred.
<img src="../images/f_memory_sec.png">
Now, let's have a look at the "Source Counters" section located at the end of "Details" page of the profiler report. The section contains tables indicating the N highest or lowest values of one or more metrics in the selected kernel source code. Hotspot tables point out performance problems in the source.
<img src="../images/f_source_loc.png">
We can select the location links to navigate directly to this location in the "Source" page. Moreover, you can hover the mouse over a value to see which metrics contribute to it.
<img src="../images/f_source_hover.png">
The "Source" page displays metrics that can be correlated with source code. It is filtered to only show (SASS) functions that were executed in the kernel launch.
<!--<img src="../images/source_sass_collapse.png">-->
<img src="../images/f_source_sass.png">
The "Source" section in the "Details" page indicates that the issue is *uncoalesced Global memory access*.
<img src="../images/uncoalesced_hint.png">
**Memory Coalescing**
On GPUs, threads are executed in warps. When we have a group of 32 contiguous threads called *warp* accessing adjacent locations in memory, we have *Coalesced memory* access and as a result we have few transactions and higher utilization. However, if a warp of 32 threads accessing scattered memory locations, then we have *Uncoalesced memory* access and this results in high number of transactions and low utilization.
<img src="../images/coalesced_mem.png">
Without changing the data structure and refactoring the code, we cannot fix this issue and improve the performance further using OpenACC in a straightforward easier way. The next step would be to look into how to optimize this application further with CUDA and perhaps take advantage of shared memory.
## Post-Lab Summary
If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below.
```
%%bash
cd ..
rm -f nways_files.zip
zip -r nways_files.zip *
```
**After** executing the above zip command, you should be able to download and save the zip file [here] by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../nways_files.zip).
Let us now go back to parallelizing our code using other approaches.
**IMPORTANT**: Please click on **HOME** to go back to the main notebook for *N ways of GPU programming for MD* code.
-----
# <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../../nways_MD_start.ipynb>HOME</a></p>
-----
# Links and Resources
[NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/)
[NVIDIA Nsight Compute](https://developer.nvidia.com/nsight-compute)
[CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads)
**NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems).
Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.
---
## Licensing
This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
|
github_jupyter
|
# Recruitment Across Datasets
In this notebook, we further examine the capability of ODIF to transfer across datasets, building upon the prior FTE/BTE experiments on MNIST and Fashion-MNIST. Using the datasets found in [this repo](https://github.com/neurodata/LLF_tidy_images), we perform a series of experiments to evaluate the transfer efficiency and recruitment capabilities of ODIF across five different datasets. The datasets and their content are as follows:
- Caltech-101: contains images of objects in 101 categories
- CIFAR-10: contains 32x32 color images of objects in 10 classes
- CIFAR-100: contains 32x32 color images of objects in 100 classes
- Food-101: contains images of dishes in 101 categories
- DTD: contains images of describable textures
```
import functions.recruitacrossdatasets_functions as fn
```
**Note:** This notebook tutorial uses functions stored externally within `functions/recruitacrossdatasets_functions.py` to simplify presentation of code. These functions are imported above, along with other libraries.
## FTE/BTE Experiment
We begin our examination of ODIF's transfer capabilities across datasets with the FTE/BTE experiment, which provides background metrics for what the expected performance should be. This helps inform the later recruitment experiment.
### Base Experiment
#### Import and Process Data
Let's first import the data and perform some preprocessing so that it is in the correct format for feeding to ODIF. The following function does so for us:
```
data, classes = fn.import_data(normalize=False)
```
#### Define Hyperparameters
We then define the hyperparameters to be used for the experiment:
- `model`: model to be used for FTE/BTE experiment
- `num_tasks`: number of tasks
- `num_trees`: nuber of trees
- `reps`: number of repetitions, fewer than actual figures to reduce running time
```
##### MAIN HYPERPARAMS ##################
model = "odif"
num_tasks = 5
num_trees = 10
reps = 4
#########################################
```
Taking each dataset as a separate task, we have `5` tasks, and we also set a default of `10` trees, with the experiment being run for `30` reps.
Note, in comparison to previous FTE/BTE experiments, the lack of the `num_points_per_task` parameter. Here, we sample based on the label with the least number of samples and take 31 samples from each label.
#### Run Experiment and Plot Results
First, we call the function to run the experiment:
```
accuracy_all_task = fn.ftebte_exp(
data, classes, model, num_tasks, num_trees, reps, shift=0
)
```
Using the accuracies over all tasks, we can calculate the error, the forwards transfer efficiency (FTE), the backwards transfer efficiency (BTE), and the overall transfer efficiency (TE).
```
err, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)
```
These results are therefore plotted using the function as follows:
```
fn.plot_ftebte(num_tasks, err, bte, fte, te)
```
As can be seen from above, there is generally positive forwards and backwards transfer efficiency when evaluating transfer across datasets, even though the datasets contained very different content.
### Varying the Number of Trees
We were also curious how changing the number of trees would affect the results of the FTE/BTE experiment across datasets, and therefore also reran the experiment using `50` trees:
```
##### MAIN HYPERPARAMS ##################
model = "odif"
num_tasks = 5
num_trees = 50
reps = 4
#########################################
```
Running the experiment, we find the following results:
```
accuracy_all_task = fn.ftebte_exp(
data, classes, model, num_tasks, num_trees, reps, shift=0
)
err, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)
fn.plot_ftebte(num_tasks, err, bte, fte, te)
```
It seems as if more trees leads to lower transfer efficiency.
We use `10` trees for the remainder of the experiments to save on computing power.
## Recruitment Experiment
Now that we have roughly assessed the performance of ODIF via the FTE/BTE experiment, we are also interested in which recruitment scheme works the best for this set of data.
### Base Experiment
To quickly reiterate some of the background on the recruitment experiment, there are generally two main schemes for developing lifelong learning algorithms: building and reallocating. The former involves adding new resources as new data comes in, whereas the latter involves compressing current representations to make room for new ones. We want to examine whether current resources could be better leveraged by testing a range of approaches:
1. **Building (default for Omnidirectional Forest):** train `num_trees` new trees
2. **Uncertainty forest:** ignore all prior trees
3. **Recruiting:** select `num_trees` (out of all 450 existing trees) that perform best on the newly introduced 10th task
4. **Hybrid:** builds `num_trees/2` new trees AND recruits `num_trees/2` best-forming trees
We compare the results of these approaches based on varying training sample sizes, in the range of `[1, 5, 10, 25]` samples per label.
#### Define Hyperparameters
As always, we define the hyperparameters:
- `num_tasks`: number of tasks
- `num_trees`: nuber of trees
- `reps`: number of repetitions
- `estimation_set`: size of set used to train for the last task, as a proportion (`1-estimation_set` is the size of the set used for validation, aka the selection of best trees)
```
############################
### Main hyperparameters ###
############################
num_tasks = 5
num_trees = 10
reps = 4
estimation_set = 0.63
```
#### Run Experiment and Plot Results
We call our experiment function and input the main hyperparameters:
```
# run recruitment experiment
means, stds, last_task_sample = fn.recruitment_exp(
data, classes, num_tasks, num_trees, reps, estimation_set, shift=0
)
```
And then we plot the results:
```
# plot results
fn.recruitment_plot(means, stds, last_task_sample, num_tasks)
```
We therefore see that though generalization error remains high on the final task, the lifelong learning algorithm still outperforms the other recruitment schemes overall.
### Shifting Dataset Order
Since the above experiment involves fixing DTD as the final dataset, a further experiment involves shifting the order of datasets, so that there is a different dataset as task 5 each time. This allows us to see whether different dataset content would significantly impact the results on the final task.
To do so, we define the `shift` parameter in our call to the `recruitment_exp` function. This, in turn, calls the `shift_data` function, which moves the first task to the end and thus reorders the sequence of tasks.
More specifically, if we define `shift=1`, as done below, we would get the following order of datasets:
1. CIFAR-10
2. CIFAR-100
3. Food-101
4. DTD
5. Caltech-101
```
# run recruitment experiment
means, stds, last_task_sample = fn.recruitment_exp(
data, classes, num_tasks, num_trees, reps, estimation_set, shift=1
)
# plot results
fn.recruitment_plot(means, stds, last_task_sample, num_tasks)
```
A `shift=2` results in a dataset order of:
1. CIFAR-100
2. Food-101
3. DTD
4. Caltech-101
5. CIFAR-10
```
# run recruitment experiment
means, stds, last_task_sample = fn.recruitment_exp(
data, classes, num_tasks, num_trees, reps, estimation_set, shift=2
)
# plot results
fn.recruitment_plot(means, stds, last_task_sample, num_tasks)
```
`shift=3` gives us:
1. Food-101
2. DTD
3. Caltech-101
4. CIFAR-10
5. CIFAR-100
```
# run recruitment experiment
means, stds, last_task_sample = fn.recruitment_exp(
data, classes, num_tasks, num_trees, reps, estimation_set, shift=3
)
# plot results
fn.recruitment_plot(means, stds, last_task_sample, num_tasks)
```
And finally, `shift=4` yields:
1. DTD
2. Caltech-101
3. CIFAR-10
4. CIFAR-100
5. Food-101
```
# run recruitment experiment
means, stds, last_task_sample = fn.recruitment_exp(
data, classes, num_tasks, num_trees, reps, estimation_set, shift=4
)
# plot results
fn.recruitment_plot(means, stds, last_task_sample, num_tasks)
```
Throughout all the above experiments, even though generalization error remains high due to the sheer amount of different labels across all the different datsets, our lifelong learning algorithm still outperforms the other recruitment methods.
## Other Experiments
### Effect of Normalization
When examining data across different datasets, normalization and standardization of data is often of interest. However, this can also lead to loss of information, as we are placing all the images on the same scale. As a final experiment, we also look into the effect of normalization on the FTE/BTE results.
#### Import and Process Data
The `import_data` function has a `normalize` parameter, where one can specify whether they want to normalize the data, normalize across the dataset, or just normalize across each image. Previously, for the original FTE/BTE experiment, we set `normalize=False`.
Here, we look at the other two options.
```
# normalize across dataset
data1, classes1 = fn.import_data(normalize="dataset")
# normalize across each image
data2, classes2 = fn.import_data(normalize="image")
```
#### Define Hyperparameters
We use the same parameters as before:
```
##### MAIN HYPERPARAMS ##################
model = "odif"
num_tasks = 5
num_trees = 10
reps = 4
#########################################
```
#### Run Experiment and Plot Results
We first run the FTE/BTE experiment by normalizing across each dataset, such that the images in each dataset have a range of [0,1] in each channel.
```
accuracy_all_task = fn.ftebte_exp(
data1, classes1, model, num_tasks, num_trees, reps, shift=0
)
err, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)
fn.plot_ftebte(num_tasks, err, bte, fte, te)
```
We then run the FTE/BTE experiment with normalizing per image, so that each channel in each image is scaled to a range of [0,1].
```
accuracy_all_task = fn.ftebte_exp(
data2, classes2, model, num_tasks, num_trees, reps, shift=0
)
err, bte, fte, te = fn.get_metrics(accuracy_all_task, num_tasks)
fn.plot_ftebte(num_tasks, err, bte, fte, te)
```
It seems as if normalizing both across the dataset and within each image yield relatively similar results to not normalizing, so we did not perform further experiments to explore this area more at the current point in time.
|
github_jupyter
|
$\newcommand{\xv}{\mathbf{x}}
\newcommand{\wv}{\mathbf{w}}
\newcommand{\Chi}{\mathcal{X}}
\newcommand{\R}{\rm I\!R}
\newcommand{\sign}{\text{sign}}
\newcommand{\Tm}{\mathbf{T}}
\newcommand{\Xm}{\mathbf{X}}
\newcommand{\Im}{\mathbf{I}}
\newcommand{\Ym}{\mathbf{Y}}
$
### ITCS8010
# G_np Simulation Experiment
In this experiment I like to replicate the behaviour of `Fraction of node in largest CC` and `Fraction of isolated nodes` over the `p*log(n)` in `Erdös-Renyi random graph model`.
```
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import collections as collec
%matplotlib inline
# Fraction of node in largest CC Vs. {p*log(n)}
n = 100000
x1 = []
y1 = []
for kave in np.arange(0.5, 3.0, 0.1):
G = nx.fast_gnp_random_graph(n, kave / (n - 1))
largest_cc = max(nx.connected_components(G), key=len)
x1.append(kave)
y1.append(len(largest_cc)/n)
# print(kave)
# print(len(largest_cc)/n)
fig, ax = plt.subplots()
ax.plot(x1, y1)
ax.set(xlabel='p*(n-1)', ylabel='Fraction of node in largest CC',
title='Fraction of node in largest CC Vs. p*(n-1)')
ax.grid()
# fig.savefig("test.png")
plt.show()
# Fraction of isolated nodes Vs. {p*log(n)}
x2 = []
y2 = []
for kave in np.arange(0.3, 1.5, 0.1):
p = kave / (n - 1)
G = nx.fast_gnp_random_graph(n, p)
isolates = len(list(nx.isolates(G)))
x2.append(p * np.log10(n))
y2.append(isolates/n)
# print(kave)
# print(isolates/n)
fig, ax = plt.subplots()
ax.plot(x2, y2)
ax.set(xlabel='p*log(n)', ylabel='Fraction of isolated nodes',
title='Fraction of isolated nodes Vs. p*log(n)')
ax.grid()
# fig.savefig("test.png")
plt.show()
# Fraction of isolated nodes Vs. {p*log(n)}
x2 = []
y2 = []
for kave in np.arange(0.3, 10, 0.1):
p = kave / (n - 1)
G = nx.fast_gnp_random_graph(n, p)
isolates = len(list(nx.isolates(G)))
x2.append(p * np.log10(n))
y2.append(isolates/n)
# print(kave)
# print(isolates/n)
fig, ax = plt.subplots()
ax.plot(x2, y2)
ax.set(xlabel='p*log(n)', ylabel='Fraction of isolated nodes',
title='Fraction of isolated nodes Vs. p*log(n)')
ax.grid()
# fig.savefig("test.png")
plt.show()
```
### Observation:
1. The result of the first experiment (i.e. `fraction of node in largest CC` varying `p*(n-1)`) gives somewhat similar behaviour we observed in the class slide.
2. In the second experiment (i.e. plotting `fraction of isolated nodes` on varying `p*log(n)`) gives somewhat different result comparing to the one we found in the class slide. When we plot the graph for p*(n-1) within the range of 0.3 to 1.5 we don't get the long tail; which we can get when we increase the range of p*(n-1) from 0.3 to 10. Just to inform, in this experiment we do run the loop on the different values of `p*(n-1)`, but plot it on `p*log(n)` scale. I am not sure the reason behind type of behaviour.
## Key Network Properties
Now we like to use the networkx [[2]](https://networkx.github.io/documentation/stable/) library support to ovserve the values of the key network properties in Erdös-Renyi random graph.
```
# plotting degree distribution
n1 = 180
p1 = 0.11
G = nx.fast_gnp_random_graph(n1, p1)
degree_sequence = sorted([d for n, d in G.degree()], reverse=True) # degree sequence
degreeCount = collec.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
fig, ax = plt.subplots()
plt.bar(deg, cnt, width=0.80, color="b")
plt.title("Degree Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")
ax.set_xticks([d + 0.4 for d in deg])
ax.set_xticklabels(deg)
# draw graph in inset
plt.axes([0.4, 0.4, 0.5, 0.5])
Gcc = G.subgraph(sorted(nx.connected_components(G), key=len, reverse=True)[0])
pos = nx.spring_layout(G)
plt.axis("off")
nx.draw_networkx_nodes(G, pos, node_size=20)
nx.draw_networkx_edges(G, pos, alpha=0.4)
plt.show()
# diameter and path length
dia = nx.diameter(G)
print(dia)
avg_path_len = nx.average_shortest_path_length(G)
print(avg_path_len)
```
# References
[1] Erdős, Paul, and Alfréd Rényi. 1960. “On the Evolution of Random Graphs.” Bull. Inst. Internat. Statis. 38 (4): 343–47.
[2] NetworkX, “Software for Complex Networks,” https://networkx.github.io/documentation/stable/, 2020, accessed: 2020-10.
|
github_jupyter
|
## Classify Radio Signals from Space using Keras
In this experiment, we attempt to classify radio signals from space.
Dataset has been provided by SETI. Details can be found here:
https://github.com/setiQuest/ML4SETI/blob/master/tutorials/Step_1_Get_Data.ipynb
## Import necessary libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
import seaborn as sns
import tensorflow as tf
%matplotlib inline
# Mount google drive to get data
from google.colab import drive
drive.mount('/content/drive')
!ls -l '/content/drive/My Drive/datasets/seti'
```
## Load data
```
# Load dataset from CSV
train_images = pd.read_csv('/content/drive/My Drive/datasets/seti/train/images.csv', header=None)
train_labels = pd.read_csv('/content/drive/My Drive/datasets/seti/train/labels.csv', header=None)
val_images = pd.read_csv('/content/drive/My Drive/datasets/seti/validation/images.csv', header=None)
val_labels = pd.read_csv('/content/drive/My Drive/datasets/seti/validation/labels.csv', header=None)
train_images.head()
train_labels.head()
# Check shape of train_images, train_labels, val_images nad val_labels
print("train_images shape:", train_images.shape)
print("train_labels shape:", train_labels.shape)
print("val_images shape:", val_images.shape)
print("val_labels shape:", val_labels.shape)
# Reshape the image sets
# Get the values as numpy array
x_train = train_images.values.reshape(3200, 64, 128, 1)
x_val = val_images.values.reshape(800, 64, 128, 1)
y_train = train_labels.values
y_val = val_labels.values
```
## Plot 2D spectrogram data
```
plt.figure(figsize=(15,15))
for i in range(1,4):
plt.subplot(1,3,i)
img = np.squeeze(x_train[np.random.randint(x_train.shape[0])])
plt.imshow(img, cmap='gray')
```
## Preprocess data
```
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen_train = ImageDataGenerator(horizontal_flip=True)
datagen_train.fit(x_train)
datagen_val = ImageDataGenerator(horizontal_flip=True)
datagen_val.fit(x_val)
```
## Build model
```
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.layers import BatchNormalization, Dropout, Activation
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
# Initialize model
model = Sequential()
# 1st CNN block
model.add(Conv2D(32, (5,5), padding='same', input_shape=(64,128,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# 2nd CNN block
model.add(Conv2D(64, (5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# Falatter CNN output to feed to FC layer
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
# Softmax layer
model.add(Dense(4, activation='softmax'))
```
## Compile the model
```
# Schedule learnning rate decay
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.005,
decay_steps=5,
decay_rate=0.9,
staircase=True)
model.compile(optimizer=Adam(lr_schedule), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
```
## Train the model
```
batch_size = 32
history = model.fit(
datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size,
validation_data = datagen_val.flow(x_val, y_val, batch_size=batch_size, shuffle=True),
validation_steps = len(x_val)//batch_size,
epochs=10,
)
```
## Evaluation
```
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'])
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['training', 'validation'])
plt.show()
model.evaluate(x_val, y_val)
y_true = np.argmax(y_val, 1)
y_pred = np.argmax(model.predict(x_val), 1)
print(metrics.classification_report(y_true, y_pred))
print("Classification accuracy: %.2f" % metrics.accuracy_score(y_true, y_pred))
plt.figure(figsize=(8,8))
labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"]
ax = plt.subplot()
sns.heatmap(metrics.confusion_matrix(y_true, y_pred, normalize='true'), annot=True, ax=ax, cmap=plt.cm.Blues)
ax.set_title('Confusion Matrix')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels)
```
## Conclusions
Winning submission has used ResNet based architechure (WRN) on primary (full) dataset, and achieved a classification accuracy of 94.99%.
Reference: https://github.com/sgrvinod/Wide-Residual-Nets-for-SETI
Here we have used a simple CNN based model. The model did not learn much after the first 2 epochs (accuracy is around 74% after 10 epochs).
Reasons:
* The signals in the dataset have a noise factor added to it.
* Even though the dataset, we have used here, is simpler than the other datasets provided by SETI, it's a bit challenging to extract features using a simple model like ours. So it is essentially a underfitting problem.
Possible improvements:
* Add additional CNN blocks, change filter sizes (e.g. 7x7, 5x5 etc.) to learn more features.
* Add additional fully connected layers.
* Here we have used Adam optimizer. It has convergence issues. We can change it SGD, and see what happens.
* Use a different architechture altogether.
|
github_jupyter
|
### building a dask array without knowing sizes
#### from dask.dataframe
```
from dask import array as da, dataframe as ddf, delayed, compute
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
da.from_delayed
def get_chunk_df(array_size,n_cols):
col_names = [f"col_{i}" for i in range(n_cols)]
pd_df = pd.DataFrame(
{nm:pd.Series(np.arange(array_size[0])) for ic,nm in enumerate(col_names)}
)
return pd_df
def get_meta(n_cols):
col_names = [f"col_{i}" for i in range(n_cols)]
return {nm:pd.Series([], dtype=np.float64) for nm in col_names}
n_cols = 5
meta_dict = get_meta(n_cols)
delayed_chunks = [delayed(get_chunk_df)((10000+10*ch,),n_cols) for ch in range(0,5)]
df_delayed = ddf.from_delayed(delayed_chunks,meta=meta_dict)
df_delayed
df = df_delayed.compute()
df.head()
df.size
type(df)
col0 = df_delayed['col_0'].to_dask_array()
col0
col0.min()
col0.max().compute()
col0.max().compute()
col0np = col0.compute()
col0np.shape
col0np.max()
```
### direct from_array?
```
delayed_arrays=[]
for ichunk in range(0,5):
ra_size = 10000+10*ichunk
delayed_array = delayed(np.arange)(ra_size)
delayed_arrays.append(da.from_delayed(delayed_array, (ra_size,), dtype=float))
delayed_arrays
hda = da.hstack(delayed_arrays)
hda
def get_delayed_array(base_chunk_size,n_chunks):
delayed_arrays = []
for ichunk in range(0,n_chunks):
ra_size = base_chunk_size+10*ichunk
delayed_array = delayed(np.arange)(ra_size)
delayed_arrays.append(da.from_delayed(delayed_array, (ra_size,), dtype=float))
return da.hstack(delayed_arrays)
def get_delayed_array_from_df(base_chunk_size,n_chunks):
meta_dict = get_meta(1)
delayed_chunks = [delayed(get_chunk_df)((base_chunk_size+10*ch,),1) for ch in range(0,n_chunks)]
df_delayed = ddf.from_delayed(delayed_chunks,meta=meta_dict)
return df_delayed[list(meta_dict.keys())[0]].to_dask_array()
n_chunks = 5
base_chunk_size = 1000
array_from_hstack = get_delayed_array(base_chunk_size,n_chunks)
array_from_df = get_delayed_array_from_df(base_chunk_size,n_chunks)
array_from_hstack
array_from_df
h_array = array_from_hstack.compute()
df_array = array_from_df.compute()
h_array.shape
df_array.shape
np.all(h_array==df_array)
```
### comparison
```
def array_construct_compute(base_chunk_size,n_chunks,find_mean=False):
res1 = get_delayed_array(base_chunk_size,n_chunks)
if find_mean:
r = res1.mean().compute()
else:
r = res1.compute()
return
def df_construct_compute(base_chunk_size,n_chunks,find_mean=False):
res1 = get_delayed_array_from_df(base_chunk_size,n_chunks)
if find_mean:
r = res1.mean().compute()
else:
r = res1.compute()
return
base_chunk_size = 100000
test_chunks = np.arange(2,100,5)
results = pd.DataFrame()
for n_chunks in test_chunks:
time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks)
time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks)
new_row = {
'chunks':n_chunks,'base_size':base_chunk_size,"actual_size":n_chunks * (base_chunk_size + 10),
'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,
'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,
}
results = results.append([new_row],ignore_index=True)
results.head()
def plot_results(results,xvar='chunks',log_x=False,log_y=True,second_x=None):
fig = plt.figure()
clrs = [[0,0,0,1],[1,0,0,1]]
ax1 = plt.subplot(2,1,1)
xvals = results[xvar]
for fld,clr in zip(['direct','indirect'],clrs):
plt.plot(xvals,results[fld+'_mean'],color=clr,marker='.')
clr[3] = 0.5
for pm in [1,-1]:
std_pm = results[fld+'_mean'] + results[fld+'_std']* pm *2
plt.plot(xvals,std_pm,color=clr)
if log_y:
plt.yscale('log')
if log_x:
plt.xscale('log')
plt.ylabel('time [s]')
plt.subplot(2,1,2)
plt.plot(xvals,results.indirect_mean/results.direct_mean,color='k',marker='.')
plt.ylabel('indirect / direct ')
plt.xlabel(xvar)
if log_x:
plt.xscale('log')
return fig
fig = plot_results(results,xvar='chunks')
base_chunk_size = 100000
test_chunks = np.arange(2,100,5)
results_wmn = pd.DataFrame()
for n_chunks in test_chunks:
time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks,True)
time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks, True)
new_row = {
'chunks':n_chunks,'base_size':base_chunk_size,"actual_size":n_chunks * (base_chunk_size + 10),
'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,
'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,
}
results_wmn = results.append([new_row],ignore_index=True)
fig = plot_results(results_wmn)
test_sizes = np.logspace(3,6,9-3+1)
n_chunks = 10
results_by_size = pd.DataFrame()
for base_chunk_size in test_sizes:
time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks,True)
time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks,True)
new_row = {
'chunks':n_chunks,'base_size':base_chunk_size,"actual_size":n_chunks * (base_chunk_size + 10),
'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,
'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,
}
results_by_size = results_by_size.append([new_row],ignore_index=True)
results_by_size
fig = plot_results(results_by_size,xvar='actual_size',log_x=True)
test_sizes = np.logspace(3,6,9-3+1)
n_chunks = 10
results_by_size_nomn = pd.DataFrame()
for base_chunk_size in test_sizes:
time_result_ar = %timeit -n10 -r5 -o array_construct_compute(base_chunk_size,n_chunks)
time_result_df = %timeit -n10 -r5 -o df_construct_compute(base_chunk_size,n_chunks)
new_row = {
'chunks':n_chunks,'base_size':base_chunk_size,"actual_size":n_chunks * (base_chunk_size + 10),
'direct_mean':time_result_ar.average,'direct_std':time_result_ar.stdev,
'indirect_mean':time_result_df.average,'indirect_std':time_result_df.stdev,
}
results_by_size_nomn = results_by_size.append([new_row],ignore_index=True)
fig = plot_results(results_by_size_nomn,xvar='actual_size',log_x=True)
```
the question is:
pre-compute time for counting particles + direct array <= indirect array from dataframe ?
|
github_jupyter
|
## Linear Regression with PyTorch
#### Part 2 of "PyTorch: Zero to GANs"
*This post is the second in a series of tutorials on building deep learning models with PyTorch, an open source neural networks library developed and maintained by Facebook. Check out the full series:*
1. [PyTorch Basics: Tensors & Gradients](https://jovian.ml/aakashns/01-pytorch-basics)
2. [Linear Regression & Gradient Descent](https://jovian.ml/aakashns/02-linear-regression)
3. [Image Classfication using Logistic Regression](https://jovian.ml/aakashns/03-logistic-regression)
4. [Training Deep Neural Networks on a GPU](https://jovian.ml/aakashns/04-feedforward-nn)
5. [Image Classification using Convolutional Neural Networks](https://jovian.ml/aakashns/05-cifar10-cnn)
6. [Data Augmentation, Regularization and ResNets](https://jovian.ml/aakashns/05b-cifar10-resnet)
7. [Generating Images using Generative Adverserial Networks](https://jovian.ml/aakashns/06-mnist-gan)
Continuing where the [previous tutorial](https://jvn.io/aakashns/3143ceb92b4f4cbbb4f30e203580b77b) left off, we'll discuss one of the foundational algorithms of machine learning in this post: *Linear regression*. We'll create a model that predicts crop yields for apples and oranges (*target variables*) by looking at the average temperature, rainfall and humidity (*input variables or features*) in a region. Here's the training data:

In a linear regression model, each target variable is estimated to be a weighted sum of the input variables, offset by some constant, known as a bias :
```
yield_apple = w11 * temp + w12 * rainfall + w13 * humidity + b1
yield_orange = w21 * temp + w22 * rainfall + w23 * humidity + b2
```
Visually, it means that the yield of apples is a linear or planar function of temperature, rainfall and humidity:

The *learning* part of linear regression is to figure out a set of weights `w11, w12,... w23, b1 & b2` by looking at the training data, to make accurate predictions for new data (i.e. to predict the yields for apples and oranges in a new region using the average temperature, rainfall and humidity). This is done by adjusting the weights slightly many times to make better predictions, using an optimization technique called *gradient descent*.
## System setup
This tutorial takes a code-first approach towards learning PyTorch, and you should try to follow along by running and experimenting with the code yourself. The easiest way to start executing this notebook is to click the **"Run"** button at the top of this page, and select **"Run on Binder"**. This will run the notebook on [mybinder.org](https://mybinder.org), a free online service for running Jupyter notebooks.
**NOTE**: *If you're running this notebook on Binder, please skip ahead to the next section.*
### Running on your computer locally
You can clone this notebook hosted on [Jovian.ml](https://www.jovian.ml), install the required dependencies, and start Jupyter by running the following commands on the terminal:
```bash
pip install jovian --upgrade # Install the jovian library
jovian clone aakashns/02-linear-regression # Download notebook & dependencies
cd 02-linear-regression # Enter the created directory
jovian install # Install the dependencies
conda activate 02-linear-regression # Activate virtual environment
jupyter notebook # Start Jupyter
```
On older versions of conda, you might need to run `source activate 02-linear-regression` to activate the environment. For a more detailed explanation of the above steps, check out the *System setup* section in the [previous notebook](https://jovian.ml/aakashns/01-pytorch-basics).
We begin by importing Numpy and PyTorch:
```
# Uncomment the command below if Numpy or PyTorch is not installed
# !conda install numpy pytorch cpuonly -c pytorch -y
import numpy as np
import torch
```
## Training data
The training data can be represented using 2 matrices: `inputs` and `targets`, each with one row per observation, and one column per variable.
```
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119]], dtype='float32')
```
We've separated the input and target variables, because we'll operate on them separately. Also, we've created numpy arrays, because this is typically how you would work with training data: read some CSV files as numpy arrays, do some processing, and then convert them to PyTorch tensors as follows:
```
# Convert inputs and targets to tensors
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
```
## Linear regression model from scratch
The weights and biases (`w11, w12,... w23, b1 & b2`) can also be represented as matrices, initialized as random values. The first row of `w` and the first element of `b` are used to predict the first target variable i.e. yield of apples, and similarly the second for oranges.
```
# Weights and biases
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
print(w)
print(b)
```
`torch.randn` creates a tensor with the given shape, with elements picked randomly from a [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) with mean 0 and standard deviation 1.
Our *model* is simply a function that performs a matrix multiplication of the `inputs` and the weights `w` (transposed) and adds the bias `b` (replicated for each observation).

We can define the model as follows:
```
def model(x):
return x @ w.t() + b
```
`@` represents matrix multiplication in PyTorch, and the `.t` method returns the transpose of a tensor.
The matrix obtained by passing the input data into the model is a set of predictions for the target variables.
```
# Generate predictions
preds = model(inputs)
print(preds)
```
Let's compare the predictions of our model with the actual targets.
```
# Compare with targets
print(targets)
```
You can see that there's a huge difference between the predictions of our model, and the actual values of the target variables. Obviously, this is because we've initialized our model with random weights and biases, and we can't expect it to *just work*.
## Loss function
Before we improve our model, we need a way to evaluate how well our model is performing. We can compare the model's predictions with the actual targets, using the following method:
* Calculate the difference between the two matrices (`preds` and `targets`).
* Square all elements of the difference matrix to remove negative values.
* Calculate the average of the elements in the resulting matrix.
The result is a single number, known as the **mean squared error** (MSE).
```
# MSE loss
def mse(t1, t2):
diff = t1 - t2
return torch.sum(diff * diff) / diff.numel()
```
`torch.sum` returns the sum of all the elements in a tensor, and the `.numel` method returns the number of elements in a tensor. Let's compute the mean squared error for the current predictions of our model.
```
# Compute loss
loss = mse(preds, targets)
print(loss)
```
Here’s how we can interpret the result: *On average, each element in the prediction differs from the actual target by about 145 (square root of the loss 20834)*. And that’s pretty bad, considering the numbers we are trying to predict are themselves in the range 50–200. Also, the result is called the *loss*, because it indicates how bad the model is at predicting the target variables. Lower the loss, better the model.
## Compute gradients
With PyTorch, we can automatically compute the gradient or derivative of the loss w.r.t. to the weights and biases, because they have `requires_grad` set to `True`.
```
# Compute gradients
loss.backward()
```
The gradients are stored in the `.grad` property of the respective tensors. Note that the derivative of the loss w.r.t. the weights matrix is itself a matrix, with the same dimensions.
```
# Gradients for weights
print(w)
print(w.grad)
```
The loss is a [quadratic function](https://en.wikipedia.org/wiki/Quadratic_function) of our weights and biases, and our objective is to find the set of weights where the loss is the lowest. If we plot a graph of the loss w.r.t any individual weight or bias element, it will look like the figure shown below. A key insight from calculus is that the gradient indicates the rate of change of the loss, or the [slope](https://en.wikipedia.org/wiki/Slope) of the loss function w.r.t. the weights and biases.
If a gradient element is **positive**:
* **increasing** the element's value slightly will **increase** the loss.
* **decreasing** the element's value slightly will **decrease** the loss

If a gradient element is **negative**:
* **increasing** the element's value slightly will **decrease** the loss.
* **decreasing** the element's value slightly will **increase** the loss.

The increase or decrease in loss by changing a weight element is proportional to the value of the gradient of the loss w.r.t. that element. This forms the basis for the optimization algorithm that we'll use to improve our model.
Before we proceed, we reset the gradients to zero by calling `.zero_()` method. We need to do this, because PyTorch accumulates, gradients i.e. the next time we call `.backward` on the loss, the new gradient values will get added to the existing gradient values, which may lead to unexpected results.
```
w.grad.zero_()
b.grad.zero_()
print(w.grad)
print(b.grad)
```
## Adjust weights and biases using gradient descent
We'll reduce the loss and improve our model using the gradient descent optimization algorithm, which has the following steps:
1. Generate predictions
2. Calculate the loss
3. Compute gradients w.r.t the weights and biases
4. Adjust the weights by subtracting a small quantity proportional to the gradient
5. Reset the gradients to zero
Let's implement the above step by step.
```
# Generate predictions
preds = model(inputs)
print(preds)
```
Note that the predictions are same as before, since we haven't made any changes to our model. The same holds true for the loss and gradients.
```
# Calculate the loss
loss = mse(preds, targets)
print(loss)
# Compute gradients
loss.backward()
print(w.grad)
print(b.grad)
```
Finally, we update the weights and biases using the gradients computed above.
```
# Adjust weights & reset gradients
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
```
A few things to note above:
* We use `torch.no_grad` to indicate to PyTorch that we shouldn't track, calculate or modify gradients while updating the weights and biases.
* We multiply the gradients with a really small number (`10^-5` in this case), to ensure that we don't modify the weights by a really large amount, since we only want to take a small step in the downhill direction of the gradient. This number is called the *learning rate* of the algorithm.
* After we have updated the weights, we reset the gradients back to zero, to avoid affecting any future computations.
Let's take a look at the new weights and biases.
```
print(w)
print(b)
```
With the new weights and biases, the model should have lower loss.
```
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
```
We have already achieved a significant reduction in the loss, simply by adjusting the weights and biases slightly using gradient descent.
## Train for multiple epochs
To reduce the loss further, we can repeat the process of adjusting the weights and biases using the gradients multiple times. Each iteration is called an epoch. Let's train the model for 100 epochs.
```
# Train for 100 epochs
for i in range(100):
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
```
Once again, let's verify that the loss is now lower:
```
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
```
As you can see, the loss is now much lower than what we started out with. Let's look at the model's predictions and compare them with the targets.
```
# Predictions
preds
# Targets
targets
```
The prediction are now quite close to the target variables, and we can get even better results by training for a few more epochs.
At this point, we can save our notebook and upload it to [Jovian.ml](https://www.jovian.ml) for future reference and sharing.
```
!pip install jovian --upgrade -q
import jovian
jovian.commit()
```
`jovian.commit` uploads the notebook to [Jovian.ml](https://www.jovian.ml), captures the Python environment and creates a sharable link for the notebook. You can use this link to share your work and let anyone reproduce it easily with the `jovian clone` command. Jovian also includes a powerful commenting interface, so you (and others) can discuss & comment on specific parts of your notebook:

## Linear regression using PyTorch built-ins
The model and training process above were implemented using basic matrix operations. But since this such a common pattern , PyTorch has several built-in functions and classes to make it easy to create and train models.
Let's begin by importing the `torch.nn` package from PyTorch, which contains utility classes for building neural networks.
```
import torch.nn as nn
```
As before, we represent the inputs and targets and matrices.
```
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58],
[102, 43, 37], [69, 96, 70], [73, 67, 43],
[91, 88, 64], [87, 134, 58], [102, 43, 37],
[69, 96, 70], [73, 67, 43], [91, 88, 64],
[87, 134, 58], [102, 43, 37], [69, 96, 70]],
dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133],
[22, 37], [103, 119], [56, 70],
[81, 101], [119, 133], [22, 37],
[103, 119], [56, 70], [81, 101],
[119, 133], [22, 37], [103, 119]],
dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
inputs
```
We are using 15 training examples this time, to illustrate how to work with large datasets in small batches.
## Dataset and DataLoader
We'll create a `TensorDataset`, which allows access to rows from `inputs` and `targets` as tuples, and provides standard APIs for working with many different types of datasets in PyTorch.
```
from torch.utils.data import TensorDataset
# Define dataset
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
```
The `TensorDataset` allows us to access a small section of the training data using the array indexing notation (`[0:3]` in the above code). It returns a tuple (or pair), in which the first element contains the input variables for the selected rows, and the second contains the targets.
We'll also create a `DataLoader`, which can split the data into batches of a predefined size while training. It also provides other utilities like shuffling and random sampling of the data.
```
from torch.utils.data import DataLoader
# Define data loader
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
```
The data loader is typically used in a `for-in` loop. Let's look at an example.
```
for xb, yb in train_dl:
print(xb)
print(yb)
break
```
In each iteration, the data loader returns one batch of data, with the given batch size. If `shuffle` is set to `True`, it shuffles the training data before creating batches. Shuffling helps randomize the input to the optimization algorithm, which can lead to faster reduction in the loss.
## nn.Linear
Instead of initializing the weights & biases manually, we can define the model using the `nn.Linear` class from PyTorch, which does it automatically.
```
# Define model
model = nn.Linear(3, 2)
print(model.weight)
print(model.bias)
```
PyTorch models also have a helpful `.parameters` method, which returns a list containing all the weights and bias matrices present in the model. For our linear regression model, we have one weight matrix and one bias matrix.
```
# Parameters
list(model.parameters())
```
We can use the model to generate predictions in the exact same way as before:
```
# Generate predictions
preds = model(inputs)
preds
```
## Loss Function
Instead of defining a loss function manually, we can use the built-in loss function `mse_loss`.
```
# Import nn.functional
import torch.nn.functional as F
```
The `nn.functional` package contains many useful loss functions and several other utilities.
```
# Define loss function
loss_fn = F.mse_loss
```
Let's compute the loss for the current predictions of our model.
```
loss = loss_fn(model(inputs), targets)
print(loss)
```
## Optimizer
Instead of manually manipulating the model's weights & biases using gradients, we can use the optimizer `optim.SGD`. SGD stands for `stochastic gradient descent`. It is called `stochastic` because samples are selected in batches (often with random shuffling) instead of as a single group.
```
# Define optimizer
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
```
Note that `model.parameters()` is passed as an argument to `optim.SGD`, so that the optimizer knows which matrices should be modified during the update step. Also, we can specify a learning rate which controls the amount by which the parameters are modified.
## Train the model
We are now ready to train the model. We'll follow the exact same process to implement gradient descent:
1. Generate predictions
2. Calculate the loss
3. Compute gradients w.r.t the weights and biases
4. Adjust the weights by subtracting a small quantity proportional to the gradient
5. Reset the gradients to zero
The only change is that we'll work batches of data, instead of processing the entire training data in every iteration. Let's define a utility function `fit` which trains the model for a given number of epochs.
```
# Utility function to train the model
def fit(num_epochs, model, loss_fn, opt, train_dl):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb,yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate loss
loss = loss_fn(pred, yb)
# 3. Compute gradients
loss.backward()
# 4. Update parameters using gradients
opt.step()
# 5. Reset the gradients to zero
opt.zero_grad()
# Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
```
Some things to note above:
* We use the data loader defined earlier to get batches of data for every iteration.
* Instead of updating parameters (weights and biases) manually, we use `opt.step` to perform the update, and `opt.zero_grad` to reset the gradients to zero.
* We've also added a log statement which prints the loss from the last batch of data for every 10th epoch, to track the progress of training. `loss.item` returns the actual value stored in the loss tensor.
Let's train the model for 100 epochs.
```
fit(100, model, loss_fn, opt,train_dl)
```
Let's generate predictions using our model and verify that they're close to our targets.
```
# Generate predictions
preds = model(inputs)
preds
# Compare with targets
targets
```
Indeed, the predictions are quite close to our targets, and now we have a fairly good model to predict crop yields for apples and oranges by looking at the average temperature, rainfall and humidity in a region.
## Commit and update the notebook
As a final step, we can record a new version of the notebook using the `jovian` library.
```
import jovian
jovian.commit()
```
Note that running `jovian.commit` a second time records a new version of your existing notebook. With Jovian.ml, you can avoid creating copies of your Jupyter notebooks and keep versions organized. Jovian also provides a visual diff ([example](https://jovian.ml/aakashns/keras-mnist-jovian/diff?base=8&remote=2)) so you can inspect what has changed between different versions:

## Further Reading
We've covered a lot of ground this this tutorial, including *linear regression* and the *gradient descent* optimization algorithm. Here are a few resources if you'd like to dig deeper into these topics:
* For a more detailed explanation of derivates and gradient descent, see [these notes from a Udacity course](https://storage.googleapis.com/supplemental_media/udacityu/315142919/Gradient%20Descent.pdf).
* For an animated visualization of how linear regression works, [see this post](https://hackernoon.com/visualizing-linear-regression-with-pytorch-9261f49edb09).
* For a more mathematical treatment of matrix calculus, linear regression and gradient descent, you should check out [Andrew Ng's excellent course notes](https://github.com/Cleo-Stanford-CS/CS229_Notes/blob/master/lectures/cs229-notes1.pdf) from CS229 at Stanford University.
* To practice and test your skills, you can participate in the [Boston Housing Price Prediction](https://www.kaggle.com/c/boston-housing) competition on Kaggle, a website that hosts data science competitions.
With this, we complete our discussion of linear regression in PyTorch, and we’re ready to move on to the next topic: *Logistic regression*.
|
github_jupyter
|
# Spin-polarized calculations with BigDFT
The goal of this notebook is to explain how to do a spin-polarized calculation with BigDFT (`nspin=2`).
We start with the molecule O$_2$ and a non-spin polarized calculation, which is the code default.
To do that we only have to specify the atomic positions of the molecule.
```
from BigDFT import Calculators as C
calc = C.SystemCalculator()
posO1=3*[0.0]
posO2=[0.0, 0.0, 1.2075] # in angstroem
inpt={'posinp':
{ 'positions': [ {'O': posO1 }, {'O': posO2 }], 'units': 'angstroem' }}
logNSP = calc.run(input=inpt)
```
Such calculation produced a converged set of KS LDA orbitals, with the following density of states:
```
%matplotlib inline
DoS=logNSP.get_dos(label='NSP')
DoS.plot()
```
Now we do the same calculation but with spin-polarized specifying `nspin=2`, in the `dft` field.
```
inpt['dft']={'nspin': 2}
logSP = calc.run(input=inpt)
```
We may see that this run did not produce any difference with respect to the previous one. Even though we doubled the number of orbitals, the input guess wavefunctions and densities are identical in both the spin sectors. As a consequence the energy and the DoS are identical to the NSP case:
```
print logNSP.energy,logSP.energy
DoS.append_from_bandarray(logSP.evals,label='SP (m 0)')
DoS.plot()
```
This is due to the fact that:
1. We had the same input guess for up and down subspaces;
2. We had the same number of orbitals in both the sectors and no empty orbitals during the minimization.
Such problems can be solved at the same time by performing mixing scheme with *random* initialization of the wavefunctions:
```
inpt['import']='mixing'
inpt['mix']={'iscf': 12, 'itrpmax': 20} # mixing on the potential, just 20 Hamiltonian iterations for a quick look
inpt['dft']['inputpsiid']= 'RANDOM' #for random initialization
logSP_mix = calc.run(input=inpt)
```
We see that with these input parameters the DoS is different from the NSP case, the energy is lower and the net polarization is 2:
```
print logNSP.energy,logSP_mix.energy
DoS.append_from_bandarray(logSP_mix.evals,label='SP mix(m 0, RAND)')
DoS.plot()
print 'Magnetic Polarization', logSP_mix.magnetization
```
We see that to break the symmetry it is therefore necessary to have different IG subspaces between up and down orbitals, otherwise the results will be identical to the NSP case.
Now that we know the polarization of the molecule, we may perform a direct minimization calculation of the molecule by specifying from the beginning the `mpol: 2` condition. We can also add some empty orbitals using the keyword `norbsempty`.
```
inpt={'dft': { 'nspin': 2, 'mpol': 2},
'mix': { 'norbsempty': 2 },
'posinp':
{ 'positions': [ {'O': posO1 }, {'O': posO2 }], 'units': 'angstroem' } }
logSP_m2 = calc.run(input=inpt)
print logSP_mix.energy,logSP_m2.energy
DoS.append_from_bandarray(logSP_m2.evals,label='SP m 2')
DoS.plot()
```
We show that the total magnetization is 2 in the case of the oxygen dimer. The DoS is not exactly the same because the mixing scheme was not fully converged (check increasing the value of `itrpmax`).
```
DoS=logSP_mix.get_dos(label='SP mix')
DoS.append_from_bandarray(logSP_m2.evals,label='SP m 2')
DoS.plot()
```
## Odd electron system: the N atom
What does happen when the number of electrons is odd as in the case of N?
If we do a NSP calculation, the occupation of the last state is 1. Switching only the parameter `nspin` to the value 2, we do the same calculation with averaged-occupation (0.5 for the last up and down state).
To do a spin-polarisation calculation, we need to change mpol which is the difference between the number of occupied electrons of different spins.
In the same way, we can look for the total magnetization using the mixing scheme.
```
inpt = { 'dft': { 'nspin': 1},
'posinp': { 'units': 'angstroem',
'positions': [ {'N': 3*[0.0] } ] } }
logNSP = calc.run(input=inpt)
inpt['dft']['nspin'] = 2
logSP = calc.run(input=inpt)
print logNSP.energy,logSP.energy
print logNSP.fermi_level,logSP.fermi_level
DoS=logNSP.get_dos(label='NSP')
DoS.append_from_bandarray(logSP.evals,label='SP')
DoS.plot()
inpt['dft']['inputpsiid']='RANDOM' #Random input guess
inpt['mix']={'iscf': 12, 'itrpmax': 30} # mixing on the potential, just 30 Hamiltonian iterations for a quick look
inpt['import'] = 'mixing'
logSP_mix = calc.run(input=inpt)
print logSP_mix.magnetization
DoS.append_from_bandarray(logSP_mix.evals,label='SP mix')
DoS.plot()
```
We found a total magnetization of 3 following the Hund's rule.
## Defining the input guess (*ig_occupation* keyword)
We have shown that by default, the input guess is LCAO (localised atomic orbitals) defining by the pseudo-orbitals.
The occupation is sphere symmetry (same occupation per orbital moment).
We have used random input guess to break the spin symmetry.
We can also use an LCAO input guess and indicate the occupation number for the input guess using the keyword `ig_occupation` in order to break the spin symmetry
```
inpt['dft']['inputpsiid']='LCAO' #LCAO input guess
inpt['ig_occupation'] = { 'N': { '2s': { 'up': 1, 'down': 1}, '2p': {'up': [1,1,1], 'down': 0} } }
logLCAO_mix = calc.run(input=inpt)
print logSP_mix.energy,logLCAO_mix.energy
DoS=logSP_mix.get_dos(label='SP RAN')
DoS.append_from_bandarray(logLCAO_mix.evals,label='SP LCAO')
DoS.plot()
```
Instead of `ig_occupation`, it is also possible to specify the keyword `IGSpin` per atom in the `posinp` dictionary.
```
inpt = { 'dft': { 'nspin': 2, 'mpol': 3},
'posinp': { 'units': 'angstroem',
'positions': [ {'N': 3*[0.0], 'IGSpin': 3 } ] },
'ig_occupation': { 'N': { '2s': { 'up': 1, 'down': 1},
'2p': { 'up': [1,1,1], 'down': 0} } } }
logIG = calc.run(input=inpt)
print logSP_mix.energy,logLCAO_mix.energy,logIG.energy
DoS=logLCAO_mix.get_dos(label='LCAO ig_occ')
DoS.append_from_bandarray(logIG.evals,label='LCAO IGSpin')
DoS.plot()
```
## Occupation numbers
Finally, it is possible to set the occupation numbers for each state by the parameter `occup`.
In this case, the direct minimization is done with this occupation number.
In the case of N, there are 8 orbitals, the first 4 are up and the other ones down.
Here we do a calculation following the Hund's rule.
```
del inpt['ig_occupation']
inpt['occupation'] = { 'up': { 'Orbital 1': 1, 'Orbital 2': 1, 'Orbital 3': 1, 'Orbital 4': 1 }, # up
'down': { 'Orbital 1': 1, 'Orbital 2': 0, 'Orbital 3': 0, 'Orbital 4': 0 } }# down
logS = calc.run(input=inpt)
print logSP_mix.energy,logLCAO_mix.energy,logIG.energy,logS.energy
DoS.append_from_bandarray(logS.evals,label='SP occup')
DoS.plot()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/01_MNIST_TPU_Keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## MNIST on TPU (Tensor Processing Unit)<br>or GPU using tf.Keras and tf.data.Dataset
<table><tr><td><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/keras-tensorflow-tpu300px.png" width="300" alt="Keras+Tensorflow+Cloud TPU"></td></tr></table>
This sample trains an "MNIST" handwritten digit
recognition model on a GPU or TPU backend using a Keras
model. Data are handled using the tf.data.Datset API. This is
a very simple sample provided for educational purposes. Do
not expect outstanding TPU performance on a dataset as
small as MNIST.
<h3><a href="https://cloud.google.com/gpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/gpu-hexagon.png" width="50"></a> Train on GPU or TPU <a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a></h3>
1. Select a GPU or TPU backend (Runtime > Change runtime type)
1. Runtime > Run All (Watch out: the "Colab-only auth" cell requires user input)
<h3><a href="https://cloud.google.com/ml-engine/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/mlengine-hexagon.png" width="50"></a> Deploy to ML Engine</h3>
1. At the bottom of this notebook you can deploy your trained model to ML Engine for a serverless, autoscaled, REST API experience. You will need a GCP project and a GCS bucket for this last part.
TPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage (GCS)
### Parameters
```
BATCH_SIZE = 128 # On TPU, this will be the per-core batch size. A Cloud TPU has 8 cores so tha global TPU batch size is 1024
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
```
### Imports
```
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.python.platform import tf_logging
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
"""
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
"""
# Matplotlib config
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
unbatched_train_ds = training_dataset.apply(tf.data.experimental.unbatch())
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = unbatched_train_ds.batch(N).make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
# utility to display training and validation curves
def display_training_curves(training, validation, title, subplot):
if subplot%10==1: # set up the subplots on the first call
plt.subplots(figsize=(10,10), facecolor='#F0F0F0')
plt.tight_layout()
ax = plt.subplot(subplot)
ax.grid(linewidth=1, color='white')
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
```
### Colab-only auth for this notebook and the TPU
```
IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence
if IS_COLAB_BACKEND:
from google.colab import auth
auth.authenticate_user() # Authenticates the backend and also the TPU using your credentials so that they can access your private GCS buckets
```
### tf.data.Dataset: parse files and prepare training and validation datasets
Please read the [best practices for building](https://www.tensorflow.org/guide/performance/datasets) input pipelines with tf.data.Dataset
```
def read_label(tf_bytestring):
label = tf.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(-1) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
```
### Let's have a look at the data
```
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
```
### Keras model: 3 convolutional layers, 2 dense layers
If you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: [Tensorflow and deep learning without a PhD](https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/#featured-code-sample)
```
# This model trains to 99.4% sometimes 99.5% accuracy in 10 epochs (with a batch size of 32)
l = tf.keras.layers
model = tf.keras.Sequential(
[
l.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),
l.Conv2D(filters=6, kernel_size=3, padding='same', use_bias=False), # no bias necessary before batch norm
l.BatchNormalization(scale=False, center=True), # no batch norm scaling necessary before "relu"
l.Activation('relu'), # activation after batch norm
l.Conv2D(filters=12, kernel_size=6, padding='same', use_bias=False, strides=2),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Conv2D(filters=24, kernel_size=6, padding='same', use_bias=False, strides=2),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Flatten(),
l.Dense(200, use_bias=False),
l.BatchNormalization(scale=False, center=True),
l.Activation('relu'),
l.Dropout(0.5), # Dropout on dense layer only
l.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', # learning rate will be set by LearningRateScheduler
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# set up learning rate decay
lr_decay = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 0.0001 + 0.02 * math.pow(0.5, 1+epoch), verbose=True)
```
### Train and validate the model
```
EPOCHS = 10
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
tpu = None
trained_model = model
# Counting steps and batches on TPU: the tpu.keras_to_tpu_model API regards the batch size of the input dataset
# as the per-core batch size. The effective batch size is 8x more because Cloud TPUs have 8 cores. It increments
# the step by +8 everytime a global batch (8 per-core batches) is processed. Therefore batch size and steps_per_epoch
# settings can stay as they are for TPU training. The training will just go faster.
# Warning: this might change in the final version of the Keras/TPU API.
try: # TPU detection
tpu = tf.contrib.cluster_resolver.TPUClusterResolver() # Picks up a connected TPU on Google's Colab, ML Engine, Kubernetes and Deep Learning VMs accessed through the 'ctpu up' utility
#tpu = tf.contrib.cluster_resolver.TPUClusterResolver('MY_TPU_NAME') # If auto-detection does not work, you can pass the name of the TPU explicitly (tip: on a VM created with "ctpu up" the TPU has the same name as the VM)
except ValueError:
print('Training on GPU/CPU')
if tpu: # TPU training
strategy = tf.contrib.tpu.TPUDistributionStrategy(tpu)
trained_model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)
# Work in progress: reading directly from dataset object not yet implemented
# for Keras/TPU. Keras/TPU needs a function that returns a dataset.
history = trained_model.fit(training_input_fn, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_input_fn, validation_steps=1, callbacks=[lr_decay])
else: # GPU/CPU training
history = trained_model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[lr_decay])
```
### Visualize training and validation curves
```
print(history.history.keys())
display_training_curves(history.history['acc'], history.history['val_acc'], 'accuracy', 211)
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)
```
### Visualize predictions
```
# recognize digits from local fonts
probabilities = trained_model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = trained_model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
```
## Deploy the trained model to ML Engine
Push your trained model to production on ML Engine for a serverless, autoscaled, REST API experience.
You will need a GCS bucket and a GCP project for this.
Models deployed on ML Engine autoscale to zero if not used. There will be no ML Engine charges after you are done testing.
Google Cloud Storage incurs charges. Empty the bucket after deployment if you want to avoid these. Once the model is deployed, the bucket is not useful anymore.
### Configuration
```
PROJECT = "" #@param {type:"string"}
BUCKET = "gs://" #@param {type:"string", default:"jddj"}
NEW_MODEL = True #@param {type:"boolean"}
MODEL_NAME = "colabmnist" #@param {type:"string"}
MODEL_VERSION = "v0" #@param {type:"string"}
assert PROJECT, 'For this part, you need a GCP project. Head to http://console.cloud.google.com/ and create one.'
assert re.search(r'gs://.+', BUCKET), 'For this part, you need a GCS bucket. Head to http://console.cloud.google.com/storage and create one.'
```
### Export the model for serving from ML Engine
```
class ServingInput(tf.keras.layers.Layer):
# the important detail in this boilerplate code is "trainable=False"
def __init__(self, name, dtype, batch_input_shape=None):
super(ServingInput, self).__init__(trainable=False, name=name, dtype=dtype, batch_input_shape=batch_input_shape)
def get_config(self):
return {'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'name': self.name }
def call(self, inputs):
# When the deployed model is called through its REST API,
# the JSON payload is parsed automatically, transformed into
# a tensor and passed to this input layer. You can perform
# additional transformations, such as decoding JPEGs for example,
# before sending the data to your model. However, you can only
# use tf.xxxx operations.
return inputs
# little wrinkle: must copy the model from TPU to CPU manually. This is a temporary workaround.
tf_logging.set_verbosity(tf_logging.INFO)
restored_model = model
restored_model.set_weights(trained_model.get_weights()) # this copied the weights from TPU, does nothing on GPU
tf_logging.set_verbosity(tf_logging.WARN)
# add the serving input layer
serving_model = tf.keras.Sequential()
serving_model.add(ServingInput('serving', tf.float32, (None, 28*28)))
serving_model.add(restored_model)
export_path = tf.contrib.saved_model.save_keras_model(serving_model, os.path.join(BUCKET, 'keras_export')) # export he model to your bucket
export_path = export_path.decode('utf-8')
print("Model exported to: ", export_path)
```
### Deploy the model
This uses the command-line interface. You can do the same thing through the ML Engine UI at https://console.cloud.google.com/mlengine/models
```
# Create the model
if NEW_MODEL:
!gcloud ml-engine models create {MODEL_NAME} --project={PROJECT} --regions=us-central1
# Create a version of this model (you can add --async at the end of the line to make this call non blocking)
# Additional config flags are available: https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions
# You can also deploy a model that is stored locally by providing a --staging-bucket=... parameter
!echo "Deployment takes a couple of minutes. You can watch your deployment here: https://console.cloud.google.com/mlengine/models/{MODEL_NAME}"
!gcloud ml-engine versions create {MODEL_VERSION} --model={MODEL_NAME} --origin={export_path} --project={PROJECT} --runtime-version=1.10
```
### Test the deployed model
Your model is now available as a REST API. Let us try to call it. The cells below use the "gcloud ml-engine"
command line tool but any tool that can send a JSON payload to a REST endpoint will work.
```
# prepare digits to send to online prediction endpoint
digits = np.concatenate((font_digits, validation_digits[:100-N]))
labels = np.concatenate((font_labels, validation_labels[:100-N]))
with open("digits.json", "w") as f:
for digit in digits:
# the format for ML Engine online predictions is: one JSON object per line
data = json.dumps({"serving_input": digit.tolist()}) # "serving_input" because the ServingInput layer was named "serving". Keras appends "_input"
f.write(data+'\n')
# Request online predictions from deployed model (REST API) using the "gcloud ml-engine" command line.
predictions = !gcloud ml-engine predict --model={MODEL_NAME} --json-instances digits.json --project={PROJECT} --version {MODEL_VERSION}
print(predictions)
probabilities = np.stack([json.loads(p) for p in predictions[1:]]) # first line is the name of the input layer: drop it, parse the rest
predictions = np.argmax(probabilities, axis=1)
display_top_unrecognized(digits, predictions, labels, N, 100//N)
```
## License
---
author: Martin Gorner<br>
twitter: @martin_gorner
---
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---
This is not an official Google product but sample code provided for an educational purpose
|
github_jupyter
|
```
import numpy as np
import pandas as pd
df_can = pd.read_excel('https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skip_footer=2
)
print('Data downloaded and read into a dataframe!')
df_can.head()
print(df_can.shape)
# clean up the dataset to remove unnecessary columns (eg. REG)
df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)
# let's rename the columns so that they make sense
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)
# for sake of consistency, let's also make all column labels of type string
df_can.columns = list(map(str, df_can.columns))
# set the country name as index - useful for quickly looking up countries using .loc method
df_can.set_index('Country', inplace=True)
# add total column
df_can['Total'] = df_can.sum(axis=1)
# years that we will be using in this lesson - useful for plotting later on
years = list(map(str, range(1980, 2014)))
print('data dimensions:', df_can.shape)
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('ggplot') # optional: for ggplot-like style
# check for latest version of Matplotlib
print('Matplotlib version: ', mpl.__version__) # >= 2.0.0
# group countries by continents and apply sum() function
df_continents = df_can.groupby('Continent', axis=0).sum()
# note: the output of the groupby method is a `groupby' object.
# we can not use it further until we apply a function (eg .sum())
print(type(df_can.groupby('Continent', axis=0)))
df_continents.head()
# autopct create %, start angle represent starting point
df_continents['Total'].plot(kind='pie',
figsize=(5, 6),
autopct='%1.1f%%', # add in percentages
startangle=90, # start angle 90° (Africa)
shadow=True, # add shadow
)
plt.title('Immigration to Canada by Continent [1980 - 2013]')
plt.axis('equal') # Sets the pie chart to look like a circle.
plt.show()
colors_list = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'lightgreen', 'pink']
explode_list = [0.1, 0, 0, 0, 0.1, 0.1] # ratio for each continent with which to offset each wedge.
df_continents['Total'].plot(kind='pie',
figsize=(15, 6),
autopct='%1.1f%%',
startangle=90,
shadow=True,
labels=None, # turn off labels on pie chart
pctdistance=1.12, # the ratio between the center of each pie slice and the start of the text generated by autopct
colors=colors_list, # add custom colors
explode=explode_list # 'explode' lowest 3 continents
)
# scale the title up by 12% to match pctdistance
plt.title('Immigration to Canada by Continent [1980 - 2013]', y=1.12)
plt.axis('equal')
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
plt.show()
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from tqdm import tqdm
from scipy.spatial.distance import cdist
from sklearn.metrics import roc_curve, roc_auc_score
timings = Path('timings/')
raw_data = Path('surface_data/raw/protein_surfaces/01-benchmark_surfaces_npy')
experiment_names = ['TangentConv_site_1layer_5A_epoch49', 'TangentConv_site_1layer_9A_epoch49', 'TangentConv_site_1layer_15A_epoch49','TangentConv_site_3layer_5A_epoch49','TangentConv_site_3layer_9A_epoch46', 'TangentConv_site_3layer_15A_epoch17','PointNet_site_1layer_5A_epoch30','PointNet_site_1layer_9A_epoch30','PointNet_site_3layer_5A_epoch46', 'PointNet_site_3layer_9A_epoch37', 'DGCNN_site_1layer_k40_epoch46','DGCNN_site_1layer_k100_epoch32','DGCNN_site_3layer_k40_epoch33']
experiment_names_short = ['Ours 1L 5A', 'Ours 1L 9A', 'Ours 1L 15A','Ours 3L 5A','Ours 3L 9A', 'Ours 3L 15A','PN++ 1L 5A','PN++ 1L 9A','PN++ 3L 5A', 'PN++ 3L 9A', 'DGCNN 1L K40','DGCNN 1L K100','DGCNN 3L K40']
performance = []
times = []
time_errors = []
memory = []
memory_errors = []
for experiment_name in experiment_names:
predpoints_preds = np.load(timings/f'{experiment_name}_predpoints_preds.npy')
predpoints_labels = np.load(timings/f'{experiment_name}_predpoints_labels.npy')
rocauc = roc_auc_score(predpoints_labels,predpoints_preds)
conv_times = np.load(timings/f'{experiment_name}_convtime.npy')
memoryusage = np.load(timings/f'{experiment_name}_memoryusage.npy')
memoryusage = memoryusage
conv_times = conv_times
performance.append(rocauc)
times.append(conv_times.mean())
time_errors.append(conv_times.std())
memory.append(memoryusage.mean())
memory_errors.append(memoryusage.std())
performance += [0.849]
times += [0.16402676922934395]
time_errors += [0.04377787154914341]
memory += [1491945956.9371428]
memory_errors += [125881554.73354617]
experiment_names_short += ['MaSIF 3L 9A']
colors += [40]
experiment_names_short = [f'{i+1}) {experiment_names_short[i]}' for i in range(len(experiment_names_short))]
times = np.array(times)*1e3
time_errors = np.array(time_errors)*1e3
memory = np.array(memory)*1e-6
memory_errors = np.array(memory_errors)*1e-6
colors = [f'hsl(240,100,{25+i*10.83})' for i in range(6)]+[f'hsl(116,100,{25+i*16.25})' for i in range(4)] + [f'hsl(300,100,{25+i*21.66})' for i in range(3)] + [f'hsl(0,100,50)']
fig = go.Figure()
for i in range(len(times)):
fig.add_trace(go.Scatter(
x=[times[i]],
y=[performance[i]],
mode='markers',
name=experiment_names_short[i],
marker = dict(color=colors[i]),
error_x=dict(
type='data',
symmetric=True,
array=[time_errors[i]])))
fig.update_layout(
xaxis_title='Forward pass time per protein [ms] (log)',
yaxis_title='Site identification ROC-AUC',
legend_title="Models",
)
fig.update_xaxes(type="log")
fig.update_layout(
xaxis = dict(
tickvals = [1e1,2e1,4e1,6e1,8e1,1e2,2e2,4e2,6e2],
#tickvals = [10, 20, 50, 100, 200, 500],
)
)
fig.show()
fig.write_image('figures/time_vs_perf.pdf')
fig = go.Figure()
for i in range(len(times)):
fig.add_trace(go.Scatter(
x=[memory[i]],
y=[performance[i]],
mode='markers',
marker = dict(color=colors[i]),
name=experiment_names_short[i],
error_x=dict(
type='data',
symmetric=True,
array=[memory_errors[i]])))
fig.update_layout(
xaxis_title='Memory usage per protein [MB] (log)',
yaxis_title='Site identification ROC-AUC',
legend_title="Models",
)
fig.update_xaxes(type="log")
fig.update_layout(
xaxis = dict(
tickvals = [100,200,400,600,800,1000,2000,4000],
)
)
fig.show()
fig.write_image('figures/mem_vs_perf.pdf')
```
|
github_jupyter
|
## Student Activity on Advanced Data Structure
In this activity we will have to do the following tasks
- Look up the definition of permutations, and dropwhile from [itertools documentation](https://docs.python.org/3/library/itertools.html) in Python
- Using permutations generate all possible three digit numbers that can be generated using 0, 1, and 2
- Loop over this iterator and print them and also use `type` and `assert` to make sure that the return types are tuples
- Use a single line code involving `dropwhile` and an lambda expression to convert all the tuples to lists while dropping any leading zeros (example - `(0, 1, 2)` becomes `[1, 2]`)
- Write a function which takes a list like above and returns the actual number contained in it. Example - if you pass `[1, 2]` to the function it will return you `12`. Make sure it is indeed a number and not just a concatenated string. (Hint - You will need to treat the incoming list as a stack in the function to achieve this)
### Task 1
Look up the definition of `permutations` and `dropwhile` from itertools.
There is a way to look up the definition of a function inside Jupyter itself. just type the function name followed by a `?` and press `Shift+Enter`. We encourage you to also try this way
```
### Write your code bellow this comment.
```
### Task 2
Write an expression to generate all the possible three digit numbers using 0, 1, and 2
```
### Write your code bellow this comment
```
### Task 3
Loop over the iterator expression you generated before. Use print to print each element returned by the iterator. Use `assert` and `type` to make sure that the elements are of type tuple
```
### Write your code bellow this comment
```
### Task 4
Write the loop again. But this time use `dropwhile` with a lambda expression to drop any leading zeros from the tuples. As an example `(0, 1, 2)` will become `[0, 2]`. Also cast the output of the dropwhile to a list.
_Extra task can be to check the actual type that dropwhile returns without the casting asked above_
```
### Write your code bellow this comment
```
### Task 5
Write all the logic you had written above, but this time write a separate function where you will be passing the list generated from dropwhile and the function will return the whole number contained in the list. As an example if you pass `[1, 2]` to the fucntion it will return 12 to you. Make sure that the return type is indeed a number and not a string. Although this task can be achieved using some other tricks, we require that you treat the incoming list as a stack in the function and generate the number there.
```
### Write your code bellow this comment
```
|
github_jupyter
|
---
**Universidad de Costa Rica** | Escuela de Ingeniería Eléctrica
*IE0405 - Modelos Probabilísticos de Señales y Sistemas*
### `PyX` - Serie de tutoriales de Python para el análisis de datos
# `Py5` - *Curvas de ajuste de datos*
> Los modelos para describir un fenómeno y sus parámetros pueden obtenerse a partir de una muestra de datos. Debido a la gran cantidad de modelos probabilísticos disponibles, a menudo es necesario hacer una comparación de ajuste entre muchas de ellas.
*Fabián Abarca Calderón* \
*Jonathan Rojas Sibaja*
---
## Ajuste de modelos
El ajuste de modelos es ampliamente utilizado para obtener un modelo matemático que caracterize el comportamiento de cierto sistema basandose en los datos experimentales obtenidos. Este modelo deberá predecir también otras medidas experimentales que se obtengan de su recreación.
### Estimación de máxima verosimilitud (MLE)
(Esto es de menor prioridad) La estimación de máxima verosimilitud (**MLE**, *maximum likelihood estimation*) es...
---
## 5.1 - Con el módulo `numpy`
Para iniciar, con la función `polyfit()` de la librería `numpy` se puede realizar el ajuste de datos experimentals a polinomios de cualquier orden. Esta función devuelve los parámetros de la recta para un modelo lineal de la forma:
$$
f(x) = mx + b
$$
Esto en el caso de un polinomio de grado 1. Un ejemplo utilizando este método es el siguiente:
```
from numpy import *
import matplotlib.pyplot as plt
# Datos experimentales
x = array([ 0., 1., 2., 3., 4.])
y = array([ 10.2 , 12.1, 15.5 , 18.3, 20.6 ])
# Ajuste a una recta (polinomio de grado 1)
p = polyfit(x, y, 1)
# Una vez conocidos los parámetros de la recta de ajuste,
#se pueden utilizar para graficar la recta de ajuste.
y_ajuste = p[0]*x + p[1]
# Dibujamos los datos experimentales
p_datos, = plt.plot(x, y, 'b.')
# Dibujamos la recta de ajuste
p_ajuste, = plt.plot(x, y_ajuste, 'r-')
plt.title('Ajuste lineal por minimos cuadrados')
plt.xlabel('Eje x')
plt.ylabel('Eje y')
plt.legend(('Datos experimentales', 'Ajuste lineal'), loc="upper left")
```
En el caso de otro tipo de regresiones, se debe aumentar el grado del polinomio. Por ejemplo, el caso de una regresió polinomial se muestra a continuación:
```
import numpy
import matplotlib.pyplot as plt
#Lo primero es crear los vectores que definen los puntos de datos
x = [1,2,3,5,6,7,8,9,10,12,13,14,15,16,18,19,21,22]
y = [100,90,80,60,60,55,60,65,70,70,75,76,78,79,90,99,99,100]
#Este método nos permite crear un modelo polinomial
mimodelo = numpy.poly1d(numpy.polyfit(x, y, 3))
#Esto determina cómo se mostrara la línea, la cual inicia en 1
#y termina en 22
milinea = numpy.linspace(1,22,100)
#Y por último graficamos los datos y la curva de
#la regresion polinomial
plt.scatter(x,y)
plt.plot(milinea, mimodelo(milinea))
plt.show()
```
Una vez trazada la recta de mejor ajuste, se puede obtener el valor de un punto dado, evaluando la curva en dicho punto. por ejemplo si quisieramos obtener el valor dado para un valor de 17 en el eje x, entonces sería:
```
valor = mimodelo(17)
print(valor)
```
---
## 5.2 - Con el módulo `stats`
En este caso existen diversos comandos que pueden ser utilizados para crear diferentes distribuciones basadas en datos dados. por ejemplo, partiendo de los datos de un histograma de una PDF, se puede crear el la curva de dicha distribución normal utiliando el comando `scipy.stats.rv_histogram`, además también se puede graficar el CDF de dichos datos:
```
import scipy.stats
import numpy as np
import matplotlib.pyplot as plt
data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
hist = np.histogram(data, bins=100)
hist_dist = scipy.stats.rv_histogram(hist)
X = np.linspace(-5.0, 5.0, 100)
plt.title("Datos aleatorios")
plt.hist(data, density=True, bins=100)
plt.show()
X = np.linspace(-5.0, 5.0, 100)
plt.title("PDF de los datos")
plt.plot(X, hist_dist.pdf(X), label='PDF')
plt.show()
X = np.linspace(-5.0, 5.0, 100)
plt.title("CDF de los datos")
plt.plot(X, hist_dist.cdf(X), label='CDF')
plt.show()
```
Otro paquete que brinda la librería ´Scipy´ es ´optimize´ el cuál tiene algorítmos de curvas de ajuste por medio de la función ´curve_fit´ con la cuál se pueden ajustar curvas de sistemas no lineales utilizando mínimos cuadrados. A continuación un ejemplo de su implementación para encontrar la recta de mejor ajuste ante una serie de datos experimentales obtenidos:
```
import numpy
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def _polynomial(x, *p):
"""Ajuste polinomial de grado arbitrario"""
poly = 0.
for i, n in enumerate(p):
poly += n * x**i
return poly
# Se definen los datos experimentales:
x = numpy.linspace(0., numpy.pi)
y = numpy.cos(x) + 0.05 * numpy.random.normal(size=len(x))
# p0 es la suposición inicial para los coeficientes de ajuste, este valor
# establece el orden del polinomio que desea ajustar. Aquí yo
# ya establecí todas las conjeturas iniciales en 1., es posible que tenga una mejor idea de
# qué valores esperar en función de sus datos.
p0 = numpy.ones(6,)
coeff, var_matrix = curve_fit(_polynomial, x, y, p0=p0)
yfit = [_polynomial(xx, *tuple(coeff)) for xx in x]
plt.plot(x, y, label='Test data')
plt.plot(x, yfit, label='fitted data')
plt.show()
```
---
## 5.3 - Con la librería `fitter`
Si es necesario, el paquete de `fitter` provee una simple clases la cual identifica la distribución de la cuál las muestras de datos son generados. Utiliza 80 distribuciones de Scipy y permite graficar los resultados para verificar que dicha distribución es la que mejor se ajusta a los datos. En el siguiente ejemplo se generarán los una muestra de 1000 puntos con una distribución gamma, para luego utilizar `fitter` el cuál revisará las 80 distribuciones de Scipy y desplegará un resumen con las distribuciones que calzan de mejor forma con nuestros datos, basandose en la suma del cuadrado de los errores. Los resultados del resumen se puede verificar de manera visual en las gráficas que dicho resumen traza por sí mismo:
```
from scipy import stats
from fitter import Fitter
# Crear los datos
data = stats.gamma.rvs(2, loc=1.5, scale=2, size=1000)
# Definir cuáles distribuciones queremos que evalúe
f = Fitter(data, distributions=['gamma', 'rayleigh', 'uniform'])
f.fit()
f.summary()
```
Por último, un ejemplo que que ilustra la combinación de el paquete ´scipy.stats´ y ´fitter´ es mediante el modulo ´histfit´, el cuál permite graficar tanto los datos y también las curvas de mejor ajuste al agregar ruido a la medición y calcular ese ajuste en 10 ocaciones, ´Nfit = 10´. En este caso la serie de datos utilizada corresponde a una distribuación normal (creada con el paquete ´scipy.stats´) y se obtuvieron 10 curvas de mejor ajuste ante diversos casos de ruido (con ´error_rate = 0.01´) y además se obtuvo un estimado de los valores correspondientes a la media, la varianza y la amplitud de la distribución de las curvas de mejor ajuste.
```
from fitter import HistFit
from pylab import hist
import scipy.stats
#Creamos la curva con distribución normal
data = [scipy.stats.norm.rvs(2,3.4) for x in range(10000)]
#Graficamos los valores asignándoles espaciamiento temporal
Y, X, _ = hist(data, bins=30)
#Creamos las curvas de mejor ajuste
hf = HistFit(X=X, Y=Y)
#Aplicamos un margen de error para simular ruido y calcular 10
#curvas de mejor ajuste
hf.fit(error_rate=0.01, Nfit=10)
#Obtenemos los valores correspondientes a la media, la varianza y
#la amplitud de las curvas de mejor ajuste
print(hf.mu, hf.sigma, hf.amplitude)
```
---
### Más información
* [Página web](https://www.google.com/)
* Libro o algo
* Tutorial [w3schools](https://www.w3schools.com/python/)
---
**Universidad de Costa Rica** | Facultad de Ingeniería | Escuela de Ingeniería Eléctrica
© 2021
---
|
github_jupyter
|
# This task is not quite ready as we don't have an open source route for simulating geometry that requires imprinting and merging. However this simulation can be carried out using Trelis.
# Heating Mesh Tally on CAD geometry made from Components
This constructs a reactor geometry from 3 Component objects each made from points.
The Component made include a breeder blanket, PF coil and a central column shield.
2D and 3D Meshes tally are then simulated to show nuclear heating, flux and tritium_production across the model.
This section makes the 3d geometry for the entire reactor from a input parameters.
```
import paramak
my_reactor = paramak.BallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=55,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=50,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=100,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=1,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=10,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
)
# TF and PF coils can be added with additional arguments.
# see the documentation for more details
# https://paramak.readthedocs.io/en/main/paramak.parametric_reactors.html
my_reactor.show()
```
The next section defines the materials. This can be done using openmc.Materials or in this case strings that look up materials from the neutronics material maker.
```
my_reactor.export_stp()
from IPython.display import FileLink
display(FileLink('blanket.stp'))
display(FileLink('pf_coil.stp'))
display(FileLink('center_column.stp'))
display(FileLink('Graveyard.stp'))
```
The next section defines the materials. This can be done using openmc.Materials or in this case strings that look up materials from the neutronics material maker.
```
from neutronics_material_maker import Material
mat1 = Material.from_library(name='Li4SiO4')
mat2 = Material.from_library(name='copper')
mat3 = Material.from_library(name='WC')
```
This next step makes a simple point source.
```
import openmc
# initialises a new source object
source = openmc.Source()
# sets the location of the source to x=0 y=0 z=0
source.space = openmc.stats.Point((100, 0, 0))
# sets the direction to isotropic
source.angle = openmc.stats.Isotropic()
# sets the energy distribution to 100% 14MeV neutrons
source.energy = openmc.stats.Discrete([14e6], [1])
```
This next section combines the geometry with the materials and specifies a few mesh tallies
```
import paramak_neutronics
neutronics_model = paramak_neutronics.NeutronicsModel(
geometry=my_reactor,
cell_tallies=['heating', 'flux', 'TBR', 'spectra'],
mesh_tally_2d=['heating', 'flux', '(n,Xt)'],
mesh_tally_3d=['heating', 'flux', '(n,Xt)'],
source=source,
simulation_batches=2,
simulation_particles_per_batch=10000,
materials={
'blanket_material': mat1,
'pf_coil_material': mat2,
'center_column_material': mat3,
}
)
# You will need to have Trelis installed to run this command
neutronics_model.simulate()
```
The next section produces download links for:
- vtk files that contain the 3D mesh results (open with Paraview)
- png images that show the resuls of the 2D mesh tally
```
from IPython.display import FileLink
display(FileLink('heating_on_3D_mesh.vtk'))
display(FileLink('flux_on_3D_mesh.vtk'))
display(FileLink('tritium_production_on_3D_mesh.vtk'))
display(FileLink('flux_on_2D_mesh_xy.png'))
display(FileLink('flux_on_2D_mesh_xz.png'))
display(FileLink('flux_on_2D_mesh_yz.png'))
display(FileLink('heating_on_2D_mesh_xy.png'))
display(FileLink('heating_on_2D_mesh_xz.png'))
display(FileLink('heating_on_2D_mesh_yz.png'))
display(FileLink('tritium_production_on_2D_mesh_yz.png'))
display(FileLink('tritium_production_on_2D_mesh_xz.png'))
display(FileLink('tritium_production_on_2D_mesh_yz.png'))
```
|
github_jupyter
|
# MDT Validation Notebook
Validated on Synthea +MDT population vs MEPS for Pediatric Asthma
```
import pandas as pd
import datetime as dt
import numpy as np
from scipy.stats import chi2_contingency
```
# Grab medication RXCUI of interest
Grabs the MEPS product RXCUI lists for filtering of Synthea to medications of interest.
Path to this will be MDT module - log - rxcui_ndc_df_output.csv
```
rxcui_df = pd.read_csv(r"") # MDT produced medication list
rxcui_df = rxcui_df[['medication_product_name','medication_product_rxcui']].drop_duplicates()
rxcui_df['medication_product_rxcui'] = rxcui_df['medication_product_rxcui'].astype(int)
```
# Read Synthea Population
Reads Synthea Medication file and filters on medications of interest
The path for this will be synthea -> output -> csv -> medications.csv
```
col_list = ['START','PATIENT','CODE']
syn_med_df = pd.DataFrame(columns = ['START','PATIENT','CODE','medication_product_rxcui','medication_product_name'])
for x in pd.read_csv(r"", usecols=col_list, chunksize=100000):
x['CODE'] = x['CODE'].astype(int)
temp_df = x.merge(rxcui_df, how="inner", left_on='CODE', right_on='medication_product_rxcui')
syn_med_df = syn_med_df.append(temp_df)
```
# Synthea Patient Population Filtering
Reads and merges Synthea patient data to allow for patient management.
The path for this will be synthea -> output -> csv -> patients.csv
This step can be skipped if not filtering by patient. For the pediatic use case we limited to patients who received medications when they were < 6 years of age
```
syn_pat_df = pd.read_csv(r"")
syn_pat_df = syn_pat_df.merge(syn_med_df, how='inner', left_on='Id', right_on='PATIENT')
syn_pat_df['START'] = pd.to_datetime(syn_pat_df['START']).dt.date
syn_pat_df['BIRTHDATE'] = pd.to_datetime(syn_pat_df['BIRTHDATE']).dt.date
syn_pat_df['age_in_days'] = (syn_pat_df['START'] - syn_pat_df['BIRTHDATE']).dt.days
syn_med_df = syn_pat_df[syn_pat_df['age_in_days'] < 2191]
```
# Synthea distributions
Gets total patient counts and medication distributions from Synthea population
```
syn_med_df = syn_med_df.groupby(['medication_product_name']).agg(patient_count=('CODE','count')).reset_index()
total_patients = syn_med_df['patient_count'].sum()
syn_med_df['percent'] = syn_med_df['patient_count']/total_patients
syn_med_df
```
# MEPS Expected
generates the expected MEPS patient counts for chi squared goodness of fit test
Path to file will be in you MDT module - log - validation_df.csv
```
meps_df = pd.read_csv(r"")
meps_df = meps_df[meps_df['age'] == '0-5'][['medication_product_name','validation_percent_product_patients']]
meps_df['patient_count'] = meps_df['validation_percent_product_patients'] * total_patients
meps_df['patient_count'] = meps_df['patient_count'].round(0)
meps_df
```
# Run Chi Squared
Runs chi squared test for two different populations
Take the values for patient count from syn_med_df and meps_df for this.
Numbers used are for the pediatric asthma use case of Synthea +MDT vs MEPS
```
obs = np.array([[203, 216],
[977, 979],
[513, 489],
[1819, 1836],
[1, 0],
[2378, 2332],
[1070, 1093]])
chi2, p, df, ob = chi2_contingency(obs)
print(f"""X2 = {chi2}
p-value = {p}
degrees of freedom = {df}
observatrions = {ob}""")
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook, tqdm
from scipy.spatial.distance import jaccard
from surprise import Dataset, Reader, KNNBasic, KNNWithMeans, SVD, SVDpp, accuracy
from surprise.model_selection import KFold, train_test_split, cross_validate, GridSearchCV
import warnings
warnings.simplefilter('ignore')
# !find * -iname 'movies.c*' -or -iname 'ratings.csv' -print -or -iname 'Library' -prune -or -iname 'Dropbox' -prune
# !find * -iname 'movies.c*' -or -iname 'ratings.csv' -print -or -iname 'Library' -prune
movies = pd.read_csv('movies.csv') # Подгружаем данные
ratings = pd.read_csv('ratings.csv')
movies_with_ratings = movies.join(ratings.set_index('movieId'), on='movieId').reset_index(drop=True) # Объеденяем 'фильмы' и 'Оценки'
movies_with_ratings.dropna(inplace=True) # Удаляем пропуски
movies_with_ratings.head()
num_movies = movies_with_ratings.movieId.unique().shape[0] # len() Получаем колличество уникальных ID фильмов
uniques = movies_with_ratings.movieId.unique() # Список уникальных ID фильмов <class 'numpy.ndarray'>
user_vector = {} # Формируем словарь (векторов), где {key=ID_юзера: values=array([Рейтинга])}
for user, group in movies_with_ratings.groupby('userId'):
user_vector[user] = np.zeros(num_movies)
for i in range(len(group.movieId.values)):
m = np.argwhere(uniques==group.movieId.values[i])[0][0]
r = group.rating.values[i]
user_vector[user][m] = r
dataset = pd.DataFrame({
'uid': movies_with_ratings.userId,
'iid': movies_with_ratings.title,
'rating': movies_with_ratings.rating
}) # Формируем новый 'dataset' который будет учавствовать в нашей модели из библиотеки 'surprise'
dataset.head()
reader = Reader(rating_scale=(0.5, 5.0)) # Указываем рейтинг где 0.5 минимальный, а 5.0 максимальный
data = Dataset.load_from_df(dataset, reader) # Преобразовываем 'dataset' в необходимый формат библиотеки 'surprise'
trainset, testset = train_test_split(data, test_size=.15, random_state=42) # Делим на train и test выборку
algo = SVDpp(n_factors=20, n_epochs=20) # Наша модель SVD++ (https://surprise.readthedocs.io/en/stable/matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVDpp)
algo.fit(trainset) # Обучаем модель на 'train'
test_pred = algo.test(testset) # Проверяем на 'test'
accuracy.rmse(test_pred, verbose=True) # Смотрим на 'Среднюю Квадратическую Ошибку' (Root Mean Square Error)
# Root Mean Square Error (RMSE) is the standard deviation of the residuals (prediction errors). \
# Residuals are a measure of how far from the regression line data points are; \
# RMSE is a measure of how spread out these residuals are. \
# In other words, it tells you how concentrated the data is around the line of best fit.
def recommendation(uid=2.0, neighbors=5, ratin=4.5, films=5, top=5):
'''
uid - идентификационный номер пользователя, который запросил рекомендации
neighbors - указываем необходимое количество похожих пользователей на 'uid' для поиска
ratin - рейтинг фильмов похожих пользователей на 'uid'
films - количество фильмов для предсказания оценки и сортировки
top - количество рекомендованных фильмов пользователю 'uid'
'''
titles = [key for key in user_vector.keys() if key != uid] # только те ключи где != ID чтобы не брать фильмы пользователя которые он посмотрел и оценил
distances = [jaccard(user_vector[uid], user_vector[key]) for key in user_vector.keys() if key != uid] # Джаккард (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jaccard.html#scipy.spatial.distance.jaccard)
best_indexes = np.argsort(distances)[:neighbors] # Сортировка
similar_users = np.array([(titles[i], distances[i]) for i in best_indexes])[:, 0]
movies_with_ratings.sort_values('timestamp', inplace=True) # Сортировка по времени
movies = np.array(list(set([]))) # Конструкция list(set()) для исключения дублей
for user in similar_users:
a = np.array(movies_with_ratings[movies_with_ratings.rating >= ratin][movies_with_ratings.userId == user][-films:].title)
movies = np.concatenate([a, movies])
user_movies = movies_with_ratings[movies_with_ratings.userId == uid].title.unique()
scores = list(set([algo.predict(uid=uid, iid=movie).est for movie in movies]))
titles_s = list(set([movie for movie in movies]))
best_indexes = np.argsort(scores)[-top:] # Сортировка
scores_r = [scores[i] for i in reversed(best_indexes)] #list(reversed([1, 2, 3, 4])) -> [4, 3, 2, 1]
titles_r = [titles_s[i] for i in reversed(best_indexes)]
# Объеденяем в один dataframe для вывода рекомендаций
df1, df2 = pd.DataFrame(data=titles_r).reset_index(), pd.DataFrame(data=scores_r).reset_index()
df1.columns, df2.columns = ['index','films'], ['index','scores']
df = pd.merge(df1, df2, on='index')
df['rank'] = df.scores.rank(ascending=False).astype('int')
data = df[['rank', 'films', 'scores']]
return data
''' Пользователь 2
Похожих пользователей 10
Из фильмов с минимальным рейтингом 4.5 у похожих пользователей
По топ 10 фильмам похожих пользователей
Топ рекомендаций 10 фильмов для пользователя '''
data = recommendation(uid=2.0, neighbors=10, ratin=4.5, films=10, top=10)
data.head(10)
pass
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/ArpitaChatterjee/Comedian-transcript-Analysis/blob/main/Exploratory_Data_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#To find the pattern of each comedian and find the reason of the likable
1. Most common words
2. size of vocab
3. Amt. of profanity used
##Most common words
```
#read dmt
import pandas as pd
data=pd.read_pickle('/content/drive/MyDrive/Colab Notebooks/NLP/dtm.pkl')
data= data.transpose()
data.head()
#find the top 30 words said by each comedian
top_dict={}
for c in data.columns:
top= data[c].sort_values(ascending=False).head(30)
top_dict[c]=list(zip(top.index, top.values))
top_dict
#print top 15 words by each comedian
for comedian, top_words in top_dict.items():
print(comedian)
print(', '.join([word for word, count in top_words[0:14]]))
print('---')
```
**NOTE:** At this point, we could go on and create word clouds. However, by looking at these top words, you can see that some of them have very little meaning and could be added to a stop words list.
```
#look at most common top words and add to dtop word list
from collections import Counter
#pull out top 30 words
words=[]
for comedian in data.columns:
top = [word for (word, count) in top_dict[comedian]]
for t in top:
words.append(t)
words
#aggregate the list and identify the most common words
Counter(words).most_common()
#if more tham half the comedians have same top words, remove 'em as stop word
add_stop_words= [word for word, count in Counter(words).most_common() if count>6]
add_stop_words
#update the DTM with the new list of stop words
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
#read the clean data
data_clean= pd.read_pickle('/content/drive/MyDrive/Colab Notebooks/NLP/data_clean.pkl')
#add new stop words
stop_words= text.ENGLISH_STOP_WORDS.union(add_stop_words)
#recreate the dtm
cv= CountVectorizer(stop_words=stop_words)
data_cv= cv.fit_transform(data_clean.transcript)
data_stop =pd.DataFrame(data_cv.toarray(), columns=cv.get_feature_names())
data_stop.index = data_clean.index
#pickle for later use
import pickle
pickle.dump(cv, open("/content/drive/MyDrive/Colab Notebooks/NLP/cv.pkl", "wb"))
data_stop.to_pickle("/content/drive/MyDrive/Colab Notebooks/NLP/dtm_stop.pkl")
!pip install wordcloud
from wordcloud import WordCloud
wc= WordCloud(stopwords=stop_words, background_color='white', colormap='Dark2', max_font_size=150, random_state=42 )
#reset output dimension
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize']=[16, 6]
full_names=['Ali Wong', 'Anthony Jeselnik', 'Bill Burr', 'Bo Burnham', 'Dave Chappelle', 'Hasan Minhaj',
'Jim Jefferies', 'Joe Rogan', 'John Mulaney', 'Louis C.K.', 'Mike Birbiglia', 'Ricky Gervais']
#create subplots for each comedian
for index, comedian in enumerate(data.columns):
wc.generate(data_clean.transcript[comedian])
plt.subplot(3, 4, index+1)
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.title(full_names[index])
plt.show()
```
###**Finding**
* Ali Wong says the s-word a lot and talks about her asian. I guess that's funny to me.
* A lot of people use the F-word. Let's dig into that later.
# **Number of words**
```
#find no of unique words each of em used
#identify the nonzero item in dtm, meaning that the word appaers atleast once
unique_list=[]
for comedian in data.columns:
uniques = data[comedian].to_numpy().nonzero()[0].size
unique_list.append(uniques)
#create a new dataframe that contains this unique word count
data_words = pd.DataFrame(list(zip(full_names, unique_list)),columns=['comedian', 'unique_words'])
data_unique_sort= data_words.sort_values(by='unique_words')
data_unique_sort
#calculate the words permin of each comedian
#total no of words comedian uses
total_list=[]
for comedian in data.columns:
totals= sum(data[comedian])
total_list.append(totals)
#comedy spl runtime from imdb, in mins
run_times= [60, 59, 80, 60, 67, 73, 77, 63, 62, 58, 76, 79]
#add some more col to dataframe
data_words['total_words'] = total_list
data_words['run_times']= run_times
data_words['words_per_min']= data_words['total_words']/ data_words['run_times']
#sort the df to check the slowest and fastest
data_wpm_sort= data_words.sort_values(by='words_per_min')
data_wpm_sort
#plot the findings
import numpy as np
y_pos= np.arange(len(data_words))
plt.subplot(1, 2, 1)
plt.barh(y_pos, data_unique_sort.unique_words, align='center')
plt.yticks(y_pos, data_unique_sort.comedian)
plt.title('Number of Unique Words', fontsize=20)
plt.subplot(1, 2, 2)
plt.barh(y_pos, data_wpm_sort.words_per_min, align='center')
plt.yticks(y_pos, data_wpm_sort.comedian)
plt.title('Number of Words Per Minute', fontsize=20)
plt.tight_layout()
plt.show()
```
##**Finding**
* **Vocabulary**
* Ricky Gervais (British comedy) and Bill Burr (podcast host) use a lot of words in their comedy
* Louis C.K. (self-depricating comedy) and Anthony Jeselnik (dark humor) have a smaller vocabulary
* **Talking Speed**
* Joe Rogan (blue comedy) and Bill Burr (podcast host) talk fast
* Bo Burnham (musical comedy) and Anthony Jeselnik (dark humor) talk slow
Ali Wong is somewhere in the middle in both cases. Nothing too interesting here.
## **Amt of Profanity**
```
Counter(words).most_common()
#isolate just thse bad words
data_bad_words = data.transpose()[['fucking', 'fuck', 'shit']]
data_profanity = pd.concat([data_bad_words.fucking+ data_bad_words.fuck, data_bad_words.shit], axis=1)
data_profanity.columns = ['f_words', 's_words']
data_profanity
#lets create a scatter plot of our findings
plt.rcParams['figure.figsize']=[10, 8]
for i, comedian in enumerate(data_profanity.index):
x= data_profanity.f_words.loc[comedian]
y= data_profanity.s_words.loc[comedian]
plt.scatter(x, y, color='blue')
plt.text(x+1.5, y+0.5, full_names[i], fontsize=10)
plt.xlim(-5, 155)
plt.title('No. of Bad-words used in Routine', fontsize=20)
plt.xlabel('No of F words', fontsize=15)
plt.ylabel('No of S words', fontsize=15)
plt.show()
```
## **Finding**
* **Averaging 2 F-Bombs Per Minute!** - I don't like too much swearing, especially the f-word, which is probably why I've never heard of Bill Bur, Joe Rogan and Jim Jefferies.
* **Clean Humor** - It looks like profanity might be a good predictor of the type of comedy I like. Besides Ali Wong, my two other favorite comedians in this group are John Mulaney and Mike Birbiglia.
My conclusion - yes, it does, for a first pass. There are definitely some things that could be better cleaned up, such as adding more stop words or including bi-grams. But we can save that for another day. The results, especially the profanity findings, are interesting and make general sense,
```
```
|
github_jupyter
|
# Going deeper with Tensorflow
В этом семинаре мы начнем изучать [Tensorflow](https://www.tensorflow.org/) для построения deep learning моделей.
Для установки tf на свою машину
* `pip install tensorflow` версия с поддержкой **cpu-only** для Linux & Mac OS
* для автомагической поддержки GPU смотрите документацию [TF install page](https://www.tensorflow.org/install/)
```
import tensorflow as tf
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.1)
s = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
```
# Приступим
Для начала, давайте имплементируем простую функцию на numpy просто для сравнения. Напишите подсчет суммы квадратов чисел от 0 до N-1.
**Подсказка:**
* Массив чисел от 0 до N-1 включительно - numpy.arange(N)
```
import numpy as np
def sum_squares(N):
return <student.Implement_me()>
%%time
sum_squares(10**8)
```
# Tensoflow teaser
Doing the very same thing
```
#I gonna be your function parameter
N = tf.placeholder('int64', name="input_to_your_function")
#i am a recipe on how to produce sum of squares of arange of N given N
result = tf.reduce_sum((tf.range(N)**2))
%%time
#example of computing the same as sum_squares
print(result.eval({N:10**8}))
```
# How does it work?
1. define placeholders where you'll send inputs;
2. make symbolic graph: a recipe for mathematical transformation of those placeholders;
3. compute outputs of your graph with particular values for each placeholder
* output.eval({placeholder:value})
* s.run(output, {placeholder:value})
* So far there are two main entities: "placeholder" and "transformation"
* Both can be numbers, vectors, matrices, tensors, etc.
* Both can be int32/64, floats of booleans (uint8) of various size.
* You can define new transformations as an arbitrary operation on placeholders and other transformations
* tf.reduce_sum(tf.arange(N)\**2) are 3 sequential transformations of placeholder N
* There's a tensorflow symbolic version for every numpy function
* `a+b, a/b, a**b, ...` behave just like in numpy
* np.mean -> tf.reduce_mean
* np.arange -> tf.range
* np.cumsum -> tf.cumsum
* If if you can't find the op you need, see the [docs](https://www.tensorflow.org/api_docs/python).
Still confused? We gonna fix that.
```
#Default placeholder that can be arbitrary float32 scalar, vertor, matrix, etc.
arbitrary_input = tf.placeholder('float32')
#Input vector of arbitrary length
input_vector = tf.placeholder('float32',shape=(None,))
#Input vector that _must_ have 10 elements and integer type
fixed_vector = tf.placeholder('int32',shape=(10,))
#Matrix of arbitrary n_rows and 15 columns (e.g. a minibatch your data table)
input_matrix = tf.placeholder('float32',shape=(None,15))
#You can generally use None whenever you don't need a specific shape
input1 = tf.placeholder('float64',shape=(None,100,None))
input2 = tf.placeholder('int32',shape=(None,None,3,224,224))
#elementwise multiplication
double_the_vector = input_vector*2
#elementwise cosine
elementwise_cosine = tf.cos(input_vector)
#difference between squared vector and vector itself
vector_squares = input_vector**2 - input_vector
#Practice time: create two vectors of type float32
my_vector = <student.init_float32_vector()>
my_vector2 = <student.init_one_more_such_vector()>
#Write a transformation(recipe):
#(vec1)*(vec2) / (sin(vec1) +1)
my_transformation = <student.implementwhatwaswrittenabove()>
print(my_transformation)
#it's okay, it's a symbolic graph
#
dummy = np.arange(5).astype('float32')
my_transformation.eval({my_vector:dummy,my_vector2:dummy[::-1]})
```
### Visualizing graphs
It's often useful to visualize the computation graph when debugging or optimizing.
Interactive visualization is where tensorflow really shines as compared to other frameworks.
There's a special instrument for that, called Tensorboard. You can launch it from console:
```tensorboard --logdir=/tmp/tboard --port=7007```
If you're pathologically afraid of consoles, try this:
```os.system("tensorboard --logdir=/tmp/tboard --port=7007 &"```
_(but don't tell anyone we taught you that)_
```
# launch tensorflow the ugly way, uncomment if you need that
import os
port = 6000 + os.getuid()
print("Port: %d" % port)
#!killall tensorboard
os.system("tensorboard --logdir=./tboard --port=%d &" % port)
# show graph to tensorboard
writer = tf.summary.FileWriter("./tboard", graph=tf.get_default_graph())
writer.close()
```
One basic functionality of tensorboard is drawing graphs. One you've run the cell above, go to `localhost:7007` in your browser and switch to _graphs_ tab in the topbar.
Here's what you should see:
<img src="https://s12.postimg.org/a374bmffx/tensorboard.png" width=480>
Tensorboard also allows you to draw graphs (e.g. learning curves), record images & audio ~~and play flash games~~. This is useful when monitoring learning progress and catching some training issues.
One researcher said:
```
If you spent last four hours of your worktime watching as your algorithm prints numbers and draws figures, you're probably doing deep learning wrong.
```
You can read more on tensorboard usage [here](https://www.tensorflow.org/get_started/graph_viz)
# Do It Yourself
__[2 points max]__
```
# Quest #1 - implement a function that computes a mean squared error of two input vectors
# Your function has to take 2 vectors and return a single number
<student.define_inputs_and_transformations()>
mse =<student.define_transformation()>
compute_mse = lambda vector1, vector2: <how to run you graph?>
# Tests
from sklearn.metrics import mean_squared_error
for n in [1,5,10,10**3]:
elems = [np.arange(n),np.arange(n,0,-1), np.zeros(n),
np.ones(n),np.random.random(n),np.random.randint(100,size=n)]
for el in elems:
for el_2 in elems:
true_mse = np.array(mean_squared_error(el,el_2))
my_mse = compute_mse(el,el_2)
if not np.allclose(true_mse,my_mse):
print('Wrong result:')
print('mse(%s,%s)' % (el,el_2))
print("should be: %f, but your function returned %f" % (true_mse,my_mse))
raise ValueError,"Что-то не так"
print("All tests passed")
```
# variables
The inputs and transformations have no value outside function call. This isn't too comfortable if you want your model to have parameters (e.g. network weights) that are always present, but can change their value over time.
Tensorflow solves this with `tf.Variable` objects.
* You can assign variable a value at any time in your graph
* Unlike placeholders, there's no need to explicitly pass values to variables when `s.run(...)`-ing
* You can use variables the same way you use transformations
```
#creating shared variable
shared_vector_1 = tf.Variable(initial_value=np.ones(5))
#initialize variable(s) with initial values
s.run(tf.global_variables_initializer())
#evaluating shared variable (outside symbolicd graph)
print("initial value", s.run(shared_vector_1))
# within symbolic graph you use them just as any other inout or transformation, not "get value" needed
#setting new value
s.run(shared_vector_1.assign(np.arange(5)))
#getting that new value
print("new value", s.run(shared_vector_1))
```
# tf.gradients - why graphs matter
* Tensorflow can compute derivatives and gradients automatically using the computation graph
* Gradients are computed as a product of elementary derivatives via chain rule:
$$ {\partial f(g(x)) \over \partial x} = {\partial f(g(x)) \over \partial g(x)}\cdot {\partial g(x) \over \partial x} $$
It can get you the derivative of any graph as long as it knows how to differentiate elementary operations
```
my_scalar = tf.placeholder('float32')
scalar_squared = my_scalar**2
#a derivative of scalar_squared by my_scalar
derivative = tf.gradients(scalar_squared, my_scalar)[0]
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(-3,3)
x_squared, x_squared_der = s.run([scalar_squared,derivative],
{my_scalar:x})
plt.plot(x, x_squared,label="x^2")
plt.plot(x, x_squared_der, label="derivative")
plt.legend();
```
# Why that rocks
```
my_vector = tf.placeholder('float32',[None])
#Compute the gradient of the next weird function over my_scalar and my_vector
#warning! Trying to understand the meaning of that function may result in permanent brain damage
weird_psychotic_function = tf.reduce_mean((my_vector+my_scalar)**(1+tf.nn.moments(my_vector,[0])[1]) + 1./ tf.atan(my_scalar))/(my_scalar**2 + 1) + 0.01*tf.sin(2*my_scalar**1.5)*(tf.reduce_sum(my_vector)* my_scalar**2)*tf.exp((my_scalar-4)**2)/(1+tf.exp((my_scalar-4)**2))*(1.-(tf.exp(-(my_scalar-4)**2))/(1+tf.exp(-(my_scalar-4)**2)))**2
der_by_scalar = <student.compute_grad_over_scalar()>
der_by_vector = <student.compute_grad_over_vector()>
#Plotting your derivative
scalar_space = np.linspace(1, 7, 100)
y = [s.run(weird_psychotic_function, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y, label='function')
y_der_by_scalar = [s.run(der_by_scalar, {my_scalar:x, my_vector:[1, 2, 3]})
for x in scalar_space]
plt.plot(scalar_space, y_der_by_scalar, label='derivative')
plt.grid()
plt.legend();
```
# Almost done - optimizers
While you can perform gradient descent by hand with automatic grads from above, tensorflow also has some optimization methods implemented for you. Recall momentum & rmsprop?
```
y_guess = tf.Variable(np.zeros(2,dtype='float32'))
y_true = tf.range(1,3,dtype='float32')
loss = tf.reduce_mean((y_guess - y_true + tf.random_normal([2]))**2)
optimizer = tf.train.MomentumOptimizer(0.01,0.9).minimize(loss,var_list=y_guess)
#same, but more detailed:
#updates = [[tf.gradients(loss,y_guess)[0], y_guess]]
#optimizer = tf.train.MomentumOptimizer(0.01,0.9).apply_gradients(updates)
from IPython.display import clear_output
s.run(tf.global_variables_initializer())
guesses = [s.run(y_guess)]
for _ in range(100):
s.run(optimizer)
guesses.append(s.run(y_guess))
clear_output(True)
plt.plot(*zip(*guesses),marker='.')
plt.scatter(*s.run(y_true),c='red')
plt.show()
```
# Logistic regression example
Implement the regular logistic regression training algorithm
Tips:
* Use a shared variable for weights
* X and y are potential inputs
* Compile 2 functions:
* `train_function(X, y)` - returns error and computes weights' new values __(through updates)__
* `predict_fun(X)` - just computes probabilities ("y") given data
We shall train on a two-class MNIST dataset
* please note that target `y` are `{0,1}` and not `{-1,1}` as in some formulae
```
from sklearn.datasets import load_digits
mnist = load_digits(2)
X,y = mnist.data, mnist.target
print("y [shape - %s]:" % (str(y.shape)), y[:10])
print("X [shape - %s]:" % (str(X.shape)))
print('X:\n',X[:3,:10])
print('y:\n',y[:10])
plt.imshow(X[0].reshape([8,8]))
# inputs and shareds
weights = <student.code_variable()>
input_X = <student.code_placeholder()>
input_y = <student.code_placeholder()>
predicted_y = <predicted probabilities for input_X>
loss = <logistic loss (scalar, mean over sample)>
optimizer = <optimizer that minimizes loss>
train_function = <compile function that takes X and y, returns log loss and updates weights>
predict_function = <compile function that takes X and computes probabilities of y>
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.metrics import roc_auc_score
for i in range(5):
<run optimizer operation>
loss_i = <compute loss at iteration i>
print("loss at iter %i:%.4f" % (i, loss_i))
print("train auc:",roc_auc_score(y_train, predict_function(X_train)))
print("test auc:",roc_auc_score(y_test, predict_function(X_test)))
print ("resulting weights:")
plt.imshow(shared_weights.get_value().reshape(8, -1))
plt.colorbar();
```
# Bonus: my1stNN
Your ultimate task for this week is to build your first neural network [almost] from scratch and pure tensorflow.
This time you will same digit recognition problem, but at a larger scale
* images are now 28x28
* 10 different digits
* 50k samples
Note that you are not required to build 152-layer monsters here. A 2-layer (one hidden, one output) NN should already have ive you an edge over logistic regression.
__[bonus score]__
If you've already beaten logistic regression with a two-layer net, but enthusiasm still ain't gone, you can try improving the test accuracy even further! The milestones would be 95%/97.5%/98.5% accuraсy on test set.
__SPOILER!__
At the end of the notebook you will find a few tips and frequently made mistakes. If you feel enough might to shoot yourself in the foot without external assistance, we encourage you to do so, but if you encounter any unsurpassable issues, please do look there before mailing us.
```
from mnist import load_dataset
#[down]loading the original MNIST dataset.
#Please note that you should only train your NN on _train sample,
# _val can be used to evaluate out-of-sample error, compare models or perform early-stopping
# _test should be hidden under a rock untill final evaluation...
# But we both know it is near impossible to catch you evaluating on it.
X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
print (X_train.shape,y_train.shape)
plt.imshow(X_train[0,0])
<here you could just as well create computation graph>
<this may or may not be a good place to evaluating loss and optimizer>
<this may be a perfect cell to write a training&evaluation loop in>
<predict & evaluate on test here, right? No cheating pls.>
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
# SPOILERS!
Recommended pipeline
* Adapt logistic regression from previous assignment to classify some number against others (e.g. zero vs nonzero)
* Generalize it to multiclass logistic regression.
- Either try to remember lecture 0 or google it.
- Instead of weight vector you'll have to use matrix (feature_id x class_id)
- softmax (exp over sum of exps) can implemented manually or as T.nnet.softmax (stable)
- probably better to use STOCHASTIC gradient descent (minibatch)
- in which case sample should probably be shuffled (or use random subsamples on each iteration)
* Add a hidden layer. Now your logistic regression uses hidden neurons instead of inputs.
- Hidden layer uses the same math as output layer (ex-logistic regression), but uses some nonlinearity (sigmoid) instead of softmax
- You need to train both layers, not just output layer :)
- Do not initialize layers with zeros (due to symmetry effects). A gaussian noize with small sigma will do.
- 50 hidden neurons and a sigmoid nonlinearity will do for a start. Many ways to improve.
- In ideal casae this totals to 2 .dot's, 1 softmax and 1 sigmoid
- __make sure this neural network works better than logistic regression__
* Now's the time to try improving the network. Consider layers (size, neuron count), nonlinearities, optimization methods, initialization - whatever you want, but please avoid convolutions for now.
|
github_jupyter
|
<link rel="stylesheet" href="../../styles/theme_style.css">
<!--link rel="stylesheet" href="../../styles/header_style.css"-->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<table width="100%">
<tr>
<td id="image_td" width="15%" class="header_image_color_7"><div id="image_img"
class="header_image_7"></div></td>
<td class="header_text"> Rock, Paper or Scissor Game - Train and Classify [Volume 2] </td>
</tr>
</table>
<div id="flex-container">
<div id="diff_level" class="flex-item">
<strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
<span class="fa fa-star checked"></span>
<span class="fa fa-star checked"></span>
<span class="fa fa-star checked"></span>
<span class="fa fa-star"></span>
</div>
<div id="tag" class="flex-item-tag">
<span id="tag_list">
<table id="tag_list_table">
<tr>
<td class="shield_left">Tags</td>
<td class="shield_right" id="tags">train_and_classify☁machine-learning☁features☁extraction</td>
</tr>
</table>
</span>
<!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
</div>
</div>
<span class="color4"><strong>Previous Notebooks that are part of "Rock, Paper or Scissor Game - Train and Classify" module</strong></span>
<ul>
<li><a href="classification_game_volume_1.ipynb"><strong>Rock, Paper or Scissor Game - Train and Classify [Volume 1] | Experimental Setup <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
</ul>
<span class="color7"><strong>Following Notebooks that are part of "Rock, Paper or Scissor Game - Train and Classify" module</strong></span>
<ul>
<li><a href="classification_game_volume_3.ipynb"><strong>Rock, Paper or Scissor Game - Train and Classify [Volume 3] | Training a Classifier <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
<li><a href="../Evaluate/classification_game_volume_4.ipynb"><strong>Rock, Paper or Scissor Game - Train and Classify [Volume 4] | Performance Evaluation <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
</ul>
<table width="100%">
<tr>
<td style="text-align:left;font-size:12pt;border-top:dotted 2px #62C3EE">
<span class="color1">☌</span> After the presentation of data acquisition conditions on the previous <a href="classification_game_volume_1.ipynb">Jupyter Notebook <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, we will follow our Machine Learning Journey by specifying which features will be extracted.
<br>
"Features" are numerical parameters extracted from the training data (in our case physiological signals acquired when executing gestures of "Rock, Paper or Scissor" game), characterizing objectively the training example.
A good feature is a parameter that has the ability to separate the different classes of our classification system, i.e, a parameter with a characteristic range of values for each available class.
</td>
</tr>
</table>
<hr>
<p style="font-size:20pt;color:#62C3EE;padding-bottom:5pt">Starting Point (Setup)</p>
<strong>List of Available Classes:</strong>
<br>
<ol start="0">
<li><span class="color1"><strong>"No Action"</strong></span> [When the hand is relaxed]</li>
<li><span class="color4"><strong>"Paper"</strong></span> [All fingers are extended]</li>
<li><span class="color7"><strong>"Rock"</strong></span> [All fingers are flexed]</li>
<li><span class="color13"><strong>"Scissor"</strong></span> [Forefinger and middle finger are extended and the remaining ones are flexed]</li>
</ol>
<table align="center">
<tr>
<td height="200px">
<img src="../../images/train_and_classify/classification_game_volume_2/classification_game_paper.png" style="display:block;height:100%">
</td>
<td height="200px">
<img src="../../images/train_and_classify/classification_game_volume_2/classification_game_stone.png" style="display:block;height:100%">
</td>
<td height="200px">
<img src="../../images/train_and_classify/classification_game_volume_2/classification_game_scissor.png" style="display:block;height:100%">
</td>
</tr>
<tr>
<td style="text-align:center">
<strong>Paper</strong>
</td>
<td style="text-align:center">
<strong>Rock</strong>
</td>
<td style="text-align:center">
<strong>Scissor</strong>
</td>
</tr>
</table>
<strong>Acquired Data:</strong>
<br>
<ul>
<li>Electromyography (EMG) | 2 muscles | Adductor pollicis and Flexor digitorum superficialis</li>
<li>Accelerometer (ACC) | 1 axis | Sensor parallel to the thumb nail (Axis perpendicular)</li>
</ul>
<p style="font-size:20pt;color:#62C3EE;padding-bottom:5pt">Protocol/Feature Extraction</p>
<strong>Extracted Features</strong>
<ul>
<li><span style="color:#E84D0E"><strong>[From] EMG signal</strong></span></li>
<ul>
<li>Standard Deviation ☆</li>
<li>Maximum sampled value ☝</li>
<li><a href="https://en.wikipedia.org/wiki/Zero-crossing_rate">Zero-Crossing Rate</a> ☌</li>
<li>Standard Deviation of the absolute signal ☇</li>
</ul>
<li><span style="color:#FDC400"><strong>[From] ACC signal</strong></span></li>
<ul>
<li>Average Value ☉</li>
<li>Standard Deviation ☆</li>
<li>Maximum sampled value ☝</li>
<li><a href="https://en.wikipedia.org/wiki/Zero-crossing_rate">Zero-Crossing Rate</a> ☌</li>
<li><a href="https://en.wikipedia.org/wiki/Slope">Slope of the regression curve</a> ☍</li>
</ul>
</ul>
<strong>Formal definition of parameters</strong>
<br>
☝ | Maximum Sample Value of a set of elements is equal to the last element of the sorted set
☉ | $\mu = \frac{1}{N}\sum_{i=1}^N (sample_i)$
☆ | $\sigma = \sqrt{\frac{1}{N}\sum_{i=1}^N(sample_i - \mu_{signal})^2}$
☌ | $zcr = \frac{1}{N - 1}\sum_{i=1}^{N-1}bin(i)$
☇ | $\sigma_{abs} = \sqrt{\frac{1}{N}\sum_{i=1}^N(|sample_i| - \mu_{signal_{abs}})^2}$
☍ | $m = \frac{\Delta signal}{\Delta t}$
... being $N$ the number of acquired samples (that are part of the signal), $sample_i$ the value of the sample number $i$, $signal_{abs}$ the absolute signal, $\Delta signal$ is the difference between the y coordinate of two points of the regression curve and $\Delta t$ the difference between the x (time) coordinate of the same two points of the regression curve.
... and
$bin(i)$ a binary function defined as:
$bin(i) = \begin{cases} 1, & \mbox{if } signal_i \times signal_{i-1} \leq 0 \\ 0, & \mbox{if } signal_i \times signal_{i-1}>0 \end{cases}$
<hr>
<p class="steps">0 - Import of the needed packages for a correct execution of the current <span class="color4">Jupyter Notebook</span></p>
```
# Package that ensures a programatically interaction with operating system folder hierarchy.
from os import listdir
# Package used for clone a dictionary.
from copy import deepcopy
# Functions intended to extract some statistical parameters.
from numpy import max, std, average, sum, absolute
# With the following import we will be able to extract the linear regression parameters after
# fitting experimental points to the model.
from scipy.stats import linregress
# biosignalsnotebooks own package that supports some functionalities used on the Jupyter Notebooks.
import biosignalsnotebooks as bsnb
```
<p class="steps">1 - Loading of all signals that integrates our training samples (storing them inside a dictionary)</p>
The acquired signals are stored inside a folder which can be accessed through a relative path <span class="color7">"../../signal_samples/classification_game/data"</span>
<p class="steps">1.1 - Identification of the list of files/examples</p>
```
# Transposition of data from signal files to a Python dictionary.
relative_path = "../../signal_samples/classification_game"
data_folder = "data"
# List of files (each file is a training example).
list_examples = listdir(relative_path + "/" + data_folder)
print(list_examples)
```
The first digit of filename identifies the class to which the training example belongs and the second digit is the trial number <span class="color1">(<i><class>_<trial>.txt</i>)</span>
<p class="steps">1.2 - Access the content of each file and store it on the respective dictionary entry</p>
```
# Initialization of dictionary.
signal_dict = {}
# Scrolling through each entry in the list.
for example in list_examples:
if ".txt" in example: # Read only .txt files.
# Get the class to which the training example under analysis belong.
example_class = example.split("_")[0]
# Get the trial number of the training example under analysis.
example_trial = example.split("_")[1].split(".")[0]
# Creation of a new "class" entry if it does not exist.
if example_class not in signal_dict.keys():
signal_dict[example_class] = {}
# Load data.
complete_data = bsnb.load(relative_path + "/" + data_folder + "/" + example)
# Store data in the dictionary.
signal_dict[example_class][example_trial] = complete_data
```
<p class="steps">1.3 - Definition of the content of each channel</p>
```
# Channels (CH1 Flexor digitorum superficialis | CH2 Aductor policis | CH3 Accelerometer axis Z).
emg_flexor = "CH1"
emg_adductor = "CH2"
acc_z = "CH3"
```
<p class="steps">2 - Extraction of features according to the signal under analysis</p>
The extracted values of each feature will be stored in a dictionary with the same hierarchical structure as "signal_dict"
```
# Clone "signal_dict".
features_dict = deepcopy(signal_dict)
# Navigate through "signal_dict" hierarchy.
list_classes = signal_dict.keys()
for class_i in list_classes:
list_trials = signal_dict[class_i].keys()
for trial in list_trials:
# Initialise "features_dict" entry content.
features_dict[class_i][trial] = []
for chn in [emg_flexor, emg_adductor, acc_z]:
# Temporary storage of signal inside a reusable variable.
signal = signal_dict[class_i][trial][chn]
# Start the feature extraction procedure accordingly to the channel under analysis.
if chn == emg_flexor or chn == emg_adductor: # EMG Features.
# Converted signal (taking into consideration that our device is a "biosignalsplux", the resolution is
# equal to 16 bits and the output unit should be in "mV").
signal = bsnb.raw_to_phy("EMG", device="biosignalsplux", raw_signal=signal, resolution=16, option="mV")
# Standard Deviation.
features_dict[class_i][trial] += [std(signal)]
# Maximum Value.
features_dict[class_i][trial] += [max(signal)]
# Zero-Crossing Rate.
features_dict[class_i][trial] += [sum([1 for i in range(1, len(signal))
if signal[i]*signal[i-1] <= 0]) / (len(signal) - 1)]
# Standard Deviation of the absolute signal.
features_dict[class_i][trial] += [std(absolute(signal))]
else: # ACC Features.
# Converted signal (taking into consideration that our device is a "biosignalsplux", the resolution is
# equal to 16 bits and the output unit should be in "g").
signal = bsnb.raw_to_phy("ACC", device="biosignalsplux", raw_signal=signal, resolution=16, option="g")
# Average value.
features_dict[class_i][trial] += [average(signal)]
# Standard Deviation.
features_dict[class_i][trial] += [std(signal)]
# Maximum Value.
features_dict[class_i][trial] += [max(signal)]
# Zero-Crossing Rate.
features_dict[class_i][trial] += [sum([1 for i in range(1, len(signal))
if signal[i]*signal[i-1] <= 0]) / (len(signal) - 1)]
# Slope of the regression curve.
x_axis = range(0, len(signal))
features_dict[class_i][trial] += [linregress(x_axis, signal)[0]]
```
Each training array has the following structure/content:
<br>
\[$\sigma_{emg\,flexor}$, $max_{emg\,flexor}$, $zcr_{emg\,flexor}$, $\sigma_{emg\,flexor}^{abs}$, $\sigma_{emg\,adductor}$, $max_{emg\,adductor}$, $zcr_{emg\,adductor}$, $\sigma_{emg\,adductor}^{abs}$, $\mu_{acc\,z}$, $\sigma_{acc\,z}$, $max_{acc\,z}$, $zcr_{acc\,z}$, $m_{acc\,z}$\]
<p class="steps">3 - Storage of the content inside the filled "features_dict" to an external file (<a href="https://fileinfo.com/extension/json">.json <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>)</p>
With this procedure it is possible to ensure a "permanent" memory of the results produced during feature extraction, reusable in the future by simple reading the file (without the need to reprocess again).
```
# Package dedicated to the manipulation of json files.
from json import dump
filename = "classification_game_features.json"
# Generation of .json file in our previously mentioned "relative_path".
# [Generation of new file]
with open(relative_path + "/features/" + filename, 'w') as file:
dump(features_dict, file)
```
We reach the end of the "Classification Game" second volume. Now all the features of training examples are in our possession.
If you are feeling your interest increasing, please jump to the next <a href="../Train_and_Classify/classification_game_volume_3.ipynb">volume <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>
<strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
<span class="color6">**Auxiliary Code Segment (should not be replicated by
the user)**</span>
```
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
```
|
github_jupyter
|
```
import pandas as pd
import os
import time
import re
import numpy as np
import json
from urllib.parse import urlparse, urljoin
run_root = "/home/icejm/Code/OpenWPM/stockdp/page_ana/"
# gather all potent/black links
count = 0
for root, dirs, files in os.walk(os.path.abspath('.')):
if len(dirs)==0:
for i in files:
if i.endswith(".json") and i.startswith("potent"):
count += 1
file = ((root+'/'+i).split(run_root))[1]
web_name = root.split('/')[-1]
with open(file,"r") as f:
text = f.read()
if i.startswith("potent"):
tmp_data = json.loads(text)
for each_page in tmp_data:
with open("potentlist.csv", "a+") as potent_f:
for j in tmp_data[each_page]:
j = j.replace(",", "/").replace("http://", "").replace("https://", "")
write_data = j+','+web_name+'\n'
potent_f.writelines(write_data)
print(count)
potent_dp_links = pd.read_csv("potentlist.csv", names=["url", "website"])
print(potent_dp_links.shape)
potent_dp_links.head()
def getlistnum(li):
li = list(li)
set1 = set(li)
dict1 = {}
for item in set1:
dict1.update({item:li.count(item)})
return dict1
getlistnum(potent_dp_links['website'])
x = "data.eastmoney.com/report/zw_stock.jshtml?encodeUrl=zXF5Zl6XRyYdSx1spWVTCqDhUpdvWCPeqRcR2Jjm0qE="
path = urlparse(x).path + urlparse(x).params + urlparse(x).query + urlparse(x).fragment
print(path)
print(len(re.findall("([a-z])",path)))
print(len(re.findall("([A-Z])",path)))
print(len(re.findall("([/_\.\%&#\-\?])",x)))
```
# Build Features
## 1.Basic Features
length, num of (signs, upper characters, lower character, number)
```
def build_features(df):
processed_features = df[["url"]].copy()
processed_features["path"] = processed_features["url"].map(
lambda x: urlparse(x).path + urlparse(x).params + urlparse(x).query + urlparse(x).fragment)
processed_features["path_len"] = processed_features["path"].map(
lambda x: len(x))
processed_features["num_sign"] = processed_features["url"].map(
lambda x: len(re.findall("([/_\.\%&#\-\?])",x)))
processed_features["num_upper_char"] = processed_features["path"].map(
lambda x: len(re.findall("([A-Z])",x)))
processed_features["num_lower_char"] = processed_features["path"].map(
lambda x: len(re.findall("([a-z])",x)))
processed_features["num_number"] = processed_features["path"].map(
lambda x: len(re.findall("(\d)",x)))
processed_features.drop(['url', 'path'], axis=1, inplace=True)
return processed_features
feature = build_features(potent_dp_links)
data = pd.concat([potent_dp_links, feature], axis = 1, ignore_index = False)
data.head()
```
# 2. Levenshtein
buuild a series distance between url and the website url
1. Edit Distance
2. Levenshtein Ratio
3. Jaro/Jaro-Winkler Dsitance(the answers are actually same)
```
import Levenshtein
def build_leven_features(df):
processed_features = []
for index, row in df.iterrows():
str1 = row['url']
str2 = row['website']
row['edit-dis'] = Levenshtein.distance(str1, str2)
row['leven-ratio'] = Levenshtein.ratio(str1, str2)
row['jw-dis'] = Levenshtein.jaro_winkler(str1, str2)
processed_features.append(row)
back_data = pd.DataFrame(processed_features).drop(['url', 'website'], axis=1)
return back_data
leven_features = build_leven_features(potent_dp_links)
data = pd.concat([data, leven_features], axis = 1, ignore_index = False)
data.to_csv("featured_data.csv", index=False)
data_features = data.drop(['url', 'website'], axis=1)
data = pd.read_csv("featured_data.csv", )
potent_dp_links = pd.read_csv("potentlist.csv", names=["url", "website"])
data_features = data.drop(['url', 'website'], axis=1)
import seaborn as sns
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
from hdbscan import HDBSCAN
%matplotlib inline
data_features.describe()
data_features = data_features[['path_len','num_sign','num_upper_char','num_lower_char','num_number','edit-dis','leven-ratio','jw-dis']]
dfData = abs(pd.DataFrame(data_features).corr())
plt.subplots(figsize=(12, 9)) # 设置画面大小
sns.heatmap(dfData, annot=True, vmax=1, square=True, cmap="Blues")
data_features = normalize(data_features, axis=1)
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
# pca = PCA(tol=10)
pca_data = pca.fit_transform(data_features)
print('Matrix of PCs: %s' % str(pca_data.shape))
print('Data matrix: %s' % str(data_features.shape))
print('%d singular values: %s' % (pca.singular_values_.shape[0], str(pca.singular_values_)))
```
# Clustering
## DBSCAN
```
# test dbscan
from sklearn.cluster import DBSCAN
from sklearn.utils import parallel_backend
with parallel_backend('threading'):
clusterer = DBSCAN(eps=0.005, min_samples=5, n_jobs=10, metric='euclidean')
cluster_labels = clusterer.fit(data_features)
potent_dp_links['cluster_dbscan'] = pd.Series(cluster_labels.labels_).values
print('Number of clusters: %d' % len(set(cluster_labels.labels_)))
with parallel_backend('threading'):
clusterer = DBSCAN(eps=0.005, min_samples=5, n_jobs=10, metric='euclidean')
cluster_labels = clusterer.fit(pca_data)
potent_dp_links['pca_cluster_dbscan'] = pd.Series(cluster_labels.labels_).values
print('Number of clusters: %d' % len(set(cluster_labels.labels_)))
```
## HDBSCAN
```
clusterer = HDBSCAN(min_cluster_size=5, metric='euclidean')
cluster_labels = clusterer.fit_predict(data_features)
pca_cluster_labels = clusterer.fit_predict(pca_data)
potent_dp_links['cluster_hdbscan'] = pd.Series(cluster_labels).values
potent_dp_links['pca_cluster_hdbscan'] = pd.Series(pca_cluster_labels).values
print('HDBSCAN without PCA: \n Number of clusters: %s' % len(potent_dp_links['cluster_hdbscan'].value_counts()))
# print('cluster_hdbscan.value_counts(): \n %s' % potent_dp_links['cluster_hdbscan'].value_counts().to_string())
print('HDBSCAN wit PCA: \n Number of clusters: %s' % len(potent_dp_links['pca_cluster_hdbscan'].value_counts()))
# print('cluster_hdbscan.value_counts(): \n %s' % potent_dp_links['cluster_hdbscan'].value_counts().to_string())
hdbscan = HDBSCAN(min_cluster_size=5, min_samples=4, cluster_selection_epsilon=0.001, metric='euclidean')
dbscan = DBSCAN(eps=0.001, min_samples=4, metric='euclidean')
hdbscan_labels = hdbscan.fit_predict(data_features)
pca_hdbscan_labels = hdbscan.fit_predict(pca_data)
dbscan_labels = dbscan.fit_predict(data_features)
pca_dbscan_labels = dbscan.fit_predict(pca_data)
potent_dp_links['cluster_hdbscan'] = pd.Series(hdbscan_labels).values
potent_dp_links['pca_cluster_hdbscan'] = pd.Series(pca_hdbscan_labels).values
potent_dp_links['cluster_dbscan'] = pd.Series(dbscan_labels).values
potent_dp_links['pca_cluster_dbscan'] = pd.Series(pca_dbscan_labels).values
print('HDBSCAN without PCA: \n Number of clusters: %s' % len(potent_dp_links['cluster_hdbscan'].value_counts()))
print('HDBSCAN wit PCA: \n Number of clusters: %s' % len(potent_dp_links['pca_cluster_hdbscan'].value_counts()))
print('DBSCAN without PCA: \n Number of clusters: %s' % len(potent_dp_links['cluster_dbscan'].value_counts()))
print('DBSCAN wit PCA: \n Number of clusters: %s' % len(potent_dp_links['pca_cluster_dbscan'].value_counts()))
potent_dp_links.head()
# Silhouette Coefficient
from sklearn import metrics
s1 = metrics.silhouette_score(data_features, potent_dp_links['cluster_hdbscan'], metric='euclidean')
s2 = metrics.silhouette_score(pca_data, potent_dp_links['pca_cluster_hdbscan'], metric='euclidean')
s3 = metrics.silhouette_score(data_features, potent_dp_links['cluster_dbscan'], metric='euclidean')
s4 = metrics.silhouette_score(pca_data, potent_dp_links['pca_cluster_dbscan'], metric='euclidean')
print('Silhouette score: %.5f' % s1)
print('Silhouette score: %.5f' % s2)
print('Silhouette score: %.5f' % s3)
print('Silhouette score: %.5f' % s4)
# Calinski-Harabaz Index
from sklearn import metrics
chi1 = metrics.calinski_harabasz_score(data_features, potent_dp_links['cluster_hdbscan'])
chi2 = metrics.calinski_harabasz_score(pca_data, potent_dp_links['pca_cluster_hdbscan'])
chi3 = metrics.calinski_harabasz_score(data_features, potent_dp_links['cluster_dbscan'])
chi4 = metrics.calinski_harabasz_score(pca_data, potent_dp_links['pca_cluster_dbscan'])
print('Calinski-Harabaz Index: %.3f' % chi1)
print('Calinski-Harabaz Index: %.3f' % chi2)
print('Calinski-Harabaz Index: %.3f' % chi3)
print('Calinski-Harabaz Index: %.3f' % chi4)
# Davies-Bouldin Index
from sklearn.metrics import davies_bouldin_score
dbi1 = davies_bouldin_score(data_features, potent_dp_links['cluster_hdbscan'])
print('Davies-Bouldin Index: %.5f' % dbi1)
dbi2 = davies_bouldin_score(pca_data, potent_dp_links['pca_cluster_hdbscan'])
print('Davies-Bouldin Index: %.5f' % dbi2)
dbi3 = davies_bouldin_score(data_features, potent_dp_links['cluster_dbscan'])
print('Davies-Bouldin Index: %.5f' % dbi3)
dbi4 = davies_bouldin_score(pca_data, potent_dp_links['pca_cluster_dbscan'])
print('Davies-Bouldin Index: %.5f' % dbi4)
para_min_cluster_size = [2,3,4,5,6,7,8,9,10]
para_min_samples = [3,4,5]
cluster_selection_epsilon = [0.1,0.01,0.001, 0.0001, 0]
for i in cluster_selection_epsilon:
clusterer = hdbscan.HDBSCAN(min_cluster_size=5, min_samples=4, cluster_selection_epsilon=0.001, metric='euclidean')
pca_cluster_labels = clusterer.fit_predict(pca_data)
s4 = metrics.silhouette_score(pca_data, pca_cluster_labels, metric='euclidean')
print('Number of clusters: %s, Silhouette score: %.5f' %
(len(pd.DataFrame(pca_cluster_labels).value_counts()), s4))
potent_dp_links.to_csv("potent_dp_links_cluster.csv")
```
|
github_jupyter
|
# CSAILVision semantic segmention models
This is a semantic segmentation notebook using an [ADE20K](http://groups.csail.mit.edu/vision/datasets/ADE20K/) pretrained model from the open source project [CSAILVision/semantic-segmentation-pytorch](https://github.com/CSAILVision/semantic-segmentation-pytorch).
For other deep-learning Colab notebooks, visit [tugstugi/dl-colab-notebooks](https://github.com/tugstugi/dl-colab-notebooks).
## Clone repo and install dependencies
```
import os
from os.path import exists, join, basename, splitext
git_repo_url = 'https://github.com/CSAILVision/semantic-segmentation-pytorch.git'
project_name = splitext(basename(git_repo_url))[0]
if not exists(project_name):
# clone and install dependencies
!git clone -q $git_repo_url
#!cd $project_name && pip install -q -r requirement.txt
import sys
sys.path.append(project_name)
import time
import matplotlib
import matplotlib.pylab as plt
plt.rcParams["axes.grid"] = False
```
## Download a pretrained model
According to [https://github.com/CSAILVision/semantic-segmentation-pytorch#performance](https://github.com/CSAILVision/semantic-segmentation-pytorch#performance), **UperNet101** was the best performing model. We will use it as the pretrained model:
```
ENCODER_NAME = 'resnet101'
DECODER_NAME = 'upernet'
PRETRAINED_ENCODER_MODEL_URL = 'http://sceneparsing.csail.mit.edu/model/pytorch/baseline-%s-%s/encoder_epoch_50.pth' % (ENCODER_NAME, DECODER_NAME)
PRETRAINED_DECODER_MODEL_URL = 'http://sceneparsing.csail.mit.edu/model/pytorch/baseline-%s-%s/decoder_epoch_50.pth' % (ENCODER_NAME, DECODER_NAME)
pretrained_encoder_file = basename(PRETRAINED_ENCODER_MODEL_URL)
if not exists(pretrained_encoder_file):
!wget -q $PRETRAINED_ENCODER_MODEL_URL
pretrained_decoder_file = basename(PRETRAINED_DECODER_MODEL_URL)
if not exists(pretrained_decoder_file):
!wget -q $PRETRAINED_DECODER_MODEL_URL
```
## Prepare model
Load the pretrained model:
```
from types import SimpleNamespace
import torch
from models import ModelBuilder, SegmentationModule
from dataset import TestDataset
from utils import colorEncode
from scipy.io import loadmat
# options
options = SimpleNamespace(fc_dim=2048,
num_class=150,
imgSize = [300, 400, 500, 600],
imgMaxSize=1000,
padding_constant=8,
segm_downsampling_rate=8)
# create model
builder = ModelBuilder()
net_encoder = builder.build_encoder(arch=ENCODER_NAME, weights=pretrained_encoder_file,
fc_dim=options.fc_dim)
net_decoder = builder.build_decoder(arch=DECODER_NAME, weights=pretrained_decoder_file,
fc_dim=options.fc_dim, num_class=options.num_class, use_softmax=True)
segmentation_module = SegmentationModule(net_encoder, net_decoder, torch.nn.NLLLoss(ignore_index=-1))
segmentation_module = segmentation_module.eval()
torch.set_grad_enabled(False)
if torch.cuda.is_available():
segmentation_module = segmentation_module.cuda()
# test on a given image
def test(test_image_name):
dataset_test = TestDataset([{'fpath_img': test_image_name}], options, max_sample=-1)
batch_data = dataset_test[0]
segSize = (batch_data['img_ori'].shape[0], batch_data['img_ori'].shape[1])
img_resized_list = batch_data['img_data']
scores = torch.zeros(1, options.num_class, segSize[0], segSize[1])
if torch.cuda.is_available():
scores = scores.cuda()
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
if torch.cuda.is_available():
feed_dict = {k: o.cuda() for k, o in feed_dict.items()}
# forward pass
pred_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + pred_tmp / len(options.imgSize)
_, pred = torch.max(scores, dim=1)
return pred.squeeze(0).cpu().numpy()
```
## Evaluate on a test image
First, download a test image from the internet:
```
IMAGE_URL = 'https://raw.githubusercontent.com/tugstugi/dl-colab-notebooks/master/resources/lidl.jpg'
image_file = basename(IMAGE_URL)
!wget -q -O $image_file $IMAGE_URL
plt.figure(figsize=(10, 5))
plt.imshow(matplotlib.image.imread(image_file))
```
Now, test on the downloaded image:
```
t = time.time()
pred = test(image_file)
print("executed in %.3fs" % (time.time()-t))
pred_color = colorEncode(pred, loadmat(os.path.join(project_name, 'data/color150.mat'))['colors'])
plt.imshow(pred_color)
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark= False
m = 250
```
# Generate dataset
```
np.random.seed(12)
y = np.random.randint(0,3,500)
idx= []
for i in range(3):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((500,))
np.random.seed(12)
x[idx[0]] = np.random.uniform(low =-1,high =0,size= sum(idx[0]))
x[idx[1]] = np.random.uniform(low =0,high =1,size= sum(idx[1]))
x[idx[2]] = np.random.uniform(low =2,high =3,size= sum(idx[2]))
x[idx[0]][0], x[idx[2]][5]
print(x.shape,y.shape)
idx= []
for i in range(3):
idx.append(y==i)
for i in range(3):
y= np.zeros(x[idx[i]].shape[0])
plt.scatter(x[idx[i]],y,label="class_"+str(i))
plt.legend()
bg_idx = [ np.where(idx[2] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(3):
y= np.zeros(x[idx[i]].shape[0])
plt.scatter(x[idx[i]],y,label="class_"+str(i))
plt.legend()
foreground_classes = {'class_0','class_1' }
background_classes = {'class_2'}
fg_class = np.random.randint(0,2)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(2,3)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
a.shape
np.reshape(a,(m,1))
desired_num = 2000
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,2)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(2,3)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][j])
mosaic_list_of_images[0:2], mosaic_list_of_images[1000:1002]
np.zeros(5)
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m)
for i in range(len(mosaic_dataset)):
img = torch.zeros([1], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][j]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][j]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:1000], mosaic_label[0:1000], fore_idx[0:1000] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[1000:2000], mosaic_label[1000:2000], fore_idx[1000:2000] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# mean = torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)
# std = torch.std(avg_image_dataset_1, keepdims= True, axis = 0)
# avg_image_dataset_1 = (avg_image_dataset_1 - mean) / std
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
# print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# mean = torch.mean(test_dataset, keepdims= True, axis = 0)
# std = torch.std(test_dataset, keepdims= True, axis = 0)
# test_dataset = (test_dataset - mean) / std
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
# print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
# idx1 = []
# for i in range(3):
# idx1.append(y1 == i)
# for i in range(3):
# z = np.zeros(x1[idx1[i]].shape[0])
# plt.scatter(x1[idx1[i]],z,label="class_"+str(i))
# plt.legend()
plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0')
plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1')
# plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2')
plt.legend()
plt.title("dataset1 CIN with alpha = 1/"+str(m))
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
idx_1 = y1==0
idx_2 = np.where(idx_1==True)[0]
idx_3 = np.where(idx_1==False)[0]
color = ['#1F77B4','orange', 'brown']
true_point = len(idx_2)
plt.scatter(x1[idx_2[:25]], y1[idx_2[:25]]*0, label='class 0', c= color[0], marker='o')
plt.scatter(x1[idx_3[:25]], y1[idx_3[:25]]*0, label='class 1', c= color[1], marker='o')
plt.scatter(x1[idx_3[50:75]], y1[idx_3[50:75]]*0, c= color[1], marker='o')
plt.scatter(x1[idx_2[50:75]], y1[idx_2[50:75]]*0, c= color[0], marker='o')
plt.legend()
plt.xticks( fontsize=14, fontweight = 'bold')
plt.yticks( fontsize=14, fontweight = 'bold')
plt.xlabel("X", fontsize=14, fontweight = 'bold')
# plt.savefig(fp_cin+"ds1_alpha_04.png", bbox_inches="tight")
# plt.savefig(fp_cin+"ds1_alpha_04.pdf", bbox_inches="tight")
avg_image_dataset_1[0:10]
x1 = (test_dataset).numpy()/m
y1 = np.array(labels)
# idx1 = []
# for i in range(3):
# idx1.append(y1 == i)
# for i in range(3):
# z = np.zeros(x1[idx1[i]].shape[0])
# plt.scatter(x1[idx1[i]],z,label="class_"+str(i))
# plt.legend()
plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0')
plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1')
# plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2')
plt.legend()
plt.title("test dataset1 ")
test_dataset.numpy()[0:10]/m
test_dataset = test_dataset/m
test_dataset.numpy()[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape, avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(test_dataset, labels )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(1,50)
self.linear2 = nn.Linear(50,10)
self.linear3 = nn.Linear(10,2)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.zeros_(self.linear2.bias)
torch.nn.init.xavier_normal_(self.linear3.weight)
torch.nn.init.zeros_(self.linear3.bias)
def forward(self,x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = (self.linear3(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/i
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the 1000 test dataset %d: %.2f %%' % (number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.0001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1500
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 1000 train images: %.2f %%' % ( 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi, net
train_loss_all=[]
testloader_list= [ testloader_1 ]
loss, net = train_all(trainloader_1, 1, testloader_list)
train_loss_all.append(loss)
net.linear1.weight, net.linear1.bias
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
|
github_jupyter
|
# Bayesian Camera Calibration
> Let's apply Bayesian analysis to calibrate a camera
- toc: true
- badges: true
- comments: true
- categories: [Bayesian, Computer Vision]
- image: images/2020-03-28-Bayesian-Camera-Calibration/header.jpg
```
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
plt.rcParams['figure.figsize'] = [10,10]
def x_rot(theta,x,y,z):
theta *= np.pi/180
x_rot = x
y_rot = np.cos(theta)*y - np.sin(theta)*z
z_rot = np.sin(theta)*y + np.cos(theta)*z
return(x_rot,y_rot,z_rot)
def y_rot(theta,x,y,z):
theta *= np.pi/180
x_rot = np.cos(theta)*x + np.sin(theta)*z
y_rot = y
z_rot = -np.sin(theta)*x + np.cos(theta)*z
return(x_rot,y_rot,z_rot)
def z_rot(theta,x,y,z):
theta *= np.pi/180
x_rot = np.cos(theta)*x - np.sin(theta)*y
y_rot = np.sin(theta)*x + np.cos(theta)*y
z_rot = z
return(x_rot,y_rot,z_rot)
points = np.loadtxt("data/2020-02-23-An-Adventure-In-Camera-Calibration/points.csv")
points_2d = points[:,0:2]
points_3d = points[:,2:5]
number_points = points.shape[0]
px = points_2d[:,0]
py = points_2d[:,1]
X_input = points_3d[:,0]
Y_input = points_3d[:,1]
Z_input = points_3d[:,2]
def rotate(theta_Z_est,theta_Y_est,theta_X_est, X_est, Y_est, Z_est):
X_est, Y_est, Z_est = z_rot(theta_Z_est, X_est, Y_est, Z_est)
X_est, Y_est, Z_est = y_rot(theta_Y_est, X_est, Y_est, Z_est)
X_est, Y_est, Z_est = x_rot(theta_X_est, X_est, Y_est, Z_est)
return(X_est, Y_est, Z_est)
# Define priors
X_translate_est = pm.Normal('X_translate', mu = -7, sigma = 1)
Y_translate_est = pm.Normal('Y_translate', mu = -13, sigma = 1)
Z_translate_est = pm.Normal('Z_translate', mu = 3, sigma = 1)
focal_length_est = pm.Normal('focal_length',mu = 1000, sigma = 100)
theta_Z_est = pm.Normal('theta_Z',mu = -45, sigma = 30)
theta_Y_est = pm.Normal('theta_Y',mu = 0, sigma = 15)
theta_X_est = pm.Normal('theta_X',mu = 90, sigma = 30)
c_x_est = pm.Normal('c_x',mu = 1038.42, sigma = 100)
c_y_est = pm.Normal('c_y',mu = 2666.56, sigma = 100)
k1 = -0.351113
k2 = 0.185768
k3 = -0.032289
error_scale = 2
X_est = X_input + X_translate_est
Y_est = Y_input + Y_translate_est
Z_est = Z_input + Z_translate_est
X_est, Y_est, Z_est = rotate(theta_Z_est, theta_Y_est, theta_X_est, X_est, Y_est, Z_est)
px_est = X_est / Z_est
py_est = Y_est / Z_est
r = np.sqrt(px_est**2 + py_est**2)
px_est *= (1 + k1 * r + k2 * r**2 + k3 * r**3)
py_est *= (1 + k1 * r + k2 * r**2 + k3 * r**3)
px_est *= focal_length_est
py_est *= focal_length_est
px_est += c_x_est
py_est += c_y_est
delta = np.sqrt((px - px_est)**2 + (py - py_est)**2)
# Define likelihood
likelihood = pm.Normal('error', mu = delta, sigma = error_scale, observed=np.zeros(number_points))
# Inference!
trace = pm.sample(2_000, cores=4, tune=5000)
plt.figure(figsize=(7, 7))
pm.traceplot(trace[1000:])
plt.tight_layout();
pm.plot_posterior(trace);
pm.summary(trace)
```
|
github_jupyter
|
# How to read data from varius file formats
some of the most basic things noone ever treaches you is how to actually access your data in various formats. This notebook shows a couple of examples on how to read data from a number of sources. Feel free to edit this notebook with more methods that you have worked with.
```
#import relevant packages
#from urllib.request import urlretrieve
from urllib2 import urlopen
import matplotlib.pyplot as plt
import pandas as pd
from sqlalchemy import create_engine
import numpy as np
from astropy.io import fits
import urllib
import h5py
import pickle
%matplotlib inline
```
# importing files from the internet
```
# Assign url of file: url
url ='https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
# Save file locally
testfile = urllib.URLopener()
testfile.retrieve(url, "winequality-red.csv")
# or use wget
#file_name = wget.download(url)
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
print(df.head())
pd.DataFrame.hist(df.ix[:, 0:1])
plt.xlabel('fixed acidity (g(tartaric acid)/dm$^3$)')
plt.ylabel('count')
plt.show()
```
# same thing with csv or txt file
```
example_sheet='cereal.csv'
example_file='cereal.txt'
xl = pd.read_csv(example_sheet)
x2 = pd.read_csv(example_sheet)
# akternatively you can use read_csv
print (xl.keys())
# pandas lets you specify seperators as well as number of colums and filling nans
#pd.read_csv(file, sep='\t', comment='#', na_values='Nothing')
# textfiles
data = np.loadtxt(example_file, delimiter='\t', skiprows=1, usecols=[4,5])
```
# Chunks
```
chunksize = 10 ** 6
for chunk in pd.read_csv(example_file,sep='\t', chunksize=chunksize):
print len(chunk) # print len can be replaced with any process that you would want to use
#similarly using read_table
for chunk in pd.read_table(example_file,sep='\t', chunksize=chunksize):
len(chunk)
```
# reading fits files
```
filename= 'example.fits'
hdulist = fits.open(filename)
final_data = hdulist[1].data
final_data.columns()
final_data[1]
```
# writing and reading HDF5 files
```
data_matrix = np.random.uniform(-1, 1, size=(10, 3))
# Write data to HDF5
data_file = h5py.File('file.hdf5', 'w')
data_file.create_dataset('group_name', data=data_matrix)
data_file.close()
filename_hdf = 'file.hdf5'
f = h5py.File(filename_hdf, 'r')
# List all groups
print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[0]
# Get the data
data = list(f[a_group_key])
```
# SQL databases
assuming you want to read them into python
also have a look at the databases talk sarah gave (27/04/18)
```
# make sql database with pandas
engine = create_engine('PATH')
pd.to_sql('new_database', engine)
pd.read_sql("SELCT * FROM new_database", engine)
```
# Reading pickled files
I didn't have a pickled file ready so we will make a mock file to start with
```
your_data = {'foo': 'bar'} #makes dictionary
#alternatively use pandas to make and read pickled files
# Store data (serialize)
with open('filename.pickle', 'wb') as handle:
pickle.dump(your_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Load data (deserialize)
with open('filename.pickle', 'rb') as handle:
unserialized_data = pickle.load(handle)
print(unserialized_data)
```
# Reading JSON files
|
github_jupyter
|
```
from itertools import combinations
import qiskit
import numpy as np
import tqix
import sys
def generate_u_pauli(num_qubits):
lis = [0, 1, 2]
coms = []
if num_qubits == 2:
for i in lis:
for j in lis:
coms.append([i, j])
if num_qubits == 3:
for i in lis:
for j in lis:
for k in lis:
coms.append([i, j, k])
if num_qubits == 4:
for i in lis:
for j in lis:
for k in lis:
for l in lis:
coms.append([i, j, k, l])
if num_qubits == 5:
for i in lis:
for j in lis:
for k in lis:
for l in lis:
for m in lis:
coms.append([i, j, k, l, m])
sigma = [tqix.sigmax(), tqix.sigmay(), tqix.sigmaz()]
Us = []
for com in coms:
U = sigma[com[0]]
for i in range(1, num_qubits):
U = np.kron(U, sigma[com[i]])
Us.append(U)
return Us[: 3**num_qubits]
def create_basic_vector(num_qubits: int):
"""Generate list of basic vectors
Args:
num_qubits (int): number of qubits
Returns:
np.ndarray: |00...0>, |00...1>, ..., |11...1>
"""
bs = []
for i in range(0, 2**num_qubits):
b = np.zeros((2**num_qubits, 1))
b[i] = 1
bs.append(b)
return bs
def calculate_sigma(U: np.ndarray, b: np.ndarray):
"""Calculate measurement values
Args:
U (np.ndarray): operator
b (np.ndarray): basic vector
Returns:
np.ndarray: sigma operator
"""
return (np.conjugate(np.transpose(U)) @ b @ np.conjugate(np.transpose(b)) @ U)
# def calculate_mu(density_matrix):
# M = np.zeros((2**num_qubits, 2**num_qubits), dtype=np.complex128)
# for i in range(0, num_observers):
# for j in range(0, 2**num_qubits):
# k = sigmass[i][j]
# M += np.trace(k @ density_matrix) * k
# M /= num_observers
# return M
def calculate_mu_inverse(density_matrix, num_qubits):
k = 3*density_matrix - \
np.trace(density_matrix) * np.identity(2 **
num_qubits, dtype=np.complex128)
# M = k.copy()
# for i in range(1, num_qubits):
# M = np.kron(M, k)
return k
def self_tensor(matrix, n):
product = matrix
for i in range(1, n):
product = np.kron(product, matrix)
return product
num_qubits = 4
psi = 2*np.random.rand(2**num_qubits)
psi = psi / np.linalg.norm(psi)
rho = qiskit.quantum_info.DensityMatrix(psi).data
def shadow(num_experiments):
num_observers = 3**num_qubits
Us, bs = [], []
bs = create_basic_vector(num_qubits)
Us = generate_u_pauli(num_qubits)
count_i = [0] * (num_observers)
sum_b_s = [np.zeros((2**num_qubits, 2**num_qubits),
dtype=np.complex128)] * (num_observers)
for i in range(0, num_experiments):
r = np.random.randint(0, num_observers)
count_i[r] += 1
U = Us[r]
sum_b = np.zeros((2**num_qubits, 2**num_qubits), dtype=np.complex128)
for j in range(0, 2**num_qubits):
k = calculate_sigma(U, bs[j])
sum_b_s[r] += np.trace(k @ rho)*calculate_mu_inverse(k, num_qubits)
temp = sum_b_s[r].copy()
sum_b_s[r] = (np.conjugate(np.transpose(
temp)) @ temp) / (np.trace(np.conjugate(np.transpose(temp)) @ temp))
ps = np.zeros(num_observers)
rho_hat = np.zeros((2**num_qubits, 2**num_qubits), dtype=np.complex128)
rho_hat_variant = 0
for i in range(0, num_observers):
ps[i] = count_i[i] / num_experiments
traceA = np.trace(self_tensor(tqix.sigmaz(), num_qubits) @ sum_b_s[i])
traceB = np.trace(self_tensor(tqix.sigmaz(), num_qubits) @ rho)
rho_hat_variant += ps[i] * (traceA - traceB)**2
rho_hat += ps[i] * sum_b_s[i]
return rho_hat_variant, rho_hat
# new_rho_hat = (np.conjugate(np.transpose(
# rho_hat)) @ rho_hat) / (np.trace(np.conjugate(np.transpose(rho_hat)) @ rho_hat))
# fidelity = qtm.base.trace_fidelity(rho, new_rho_hat)
# trace = qtm.base.trace_distance(rho, new_rho_hat)
# return trace, fidelity, rho, new_rho_hat
# traces = []
# fidelities = []
# rho_hats = []
# for i in range(0, 1):
# trace, fidelity, rho, new_rho_hat = shadow_tomo()
# traces.append(trace)
# fidelities.append(fidelity)
# rho_hats.append(new_rho_hat.copy())
# print(np.mean(traces))
# print(np.mean(fidelities))
# print(np.std(traces))
# print(np.std(fidelities))
# min_rho_hat = (rho_hats[np.argmin(traces)])
rho_hat_variantss = []
noe_large = [10**2, 10**3, 10**4, 10**5]
for noe in noe_large:
rho_hat_variants = []
for i in range(0, 10):
rho_hat_variant, rho_hat = shadow(noe)
rho_hat_variants.append(rho_hat_variant)
rho_hat_variantss.append(rho_hat_variants)
np.savetxt("./rho_hat_variantss" + str(num_qubits) + ".csv",
rho_hat_variantss,
delimiter=",")
averages_var = [0]*4
averages_std = [0]*4
for i in range(len(noe_large)):
averages_var[i] = np.mean(rho_hat_variantss[i])
averages_std[i] = np.std(rho_hat_variantss[i])
print(averages_var)
print(averages_std)
import matplotlib.pyplot as plt
plt.plot(noe_large, averages_var)
plt.subplot(2, 1, 1)
plt.plot(noe_large, averages_var)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('NoE')
plt.ylabel('Var')
plt.show()
```
L = 2, W_chain, Adam
Calculate $var(Z\otimes Z) = (\langle\tilde{\psi}|ZZ|\tilde{\psi}\rangle^2 - \langle\psi|ZZ|\psi\rangle^2)$
```
import sys
sys.path.insert(1, '../')
import qtm.fubini_study
import qtm.nqubit
import qtm.base
num_layers = 2
thetas = np.ones(num_layers*num_qubits*4)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_chain: (' + str(num_layers) +
',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_Wchain_layerd_state,
thetas, r=1/2, s=np.pi/2, num_layers=num_layers)
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.base.adam(thetas, m, v, i, grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.nqubit.create_Wchain_layerd_state(
qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(
qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
variances = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_Wchain_layerd_state(
qc, thetas, num_layers=num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc).data
variances.append((np.conjugate(np.transpose(psi_hat)) @ self_tensor(tqix.sigmaz(), num_qubits) @ psi_hat)
** 2 - (np.conjugate(np.transpose(psi)) @ self_tensor(tqix.sigmaz(), num_qubits) @ psi)**2)
plt.plot(variances)
np.savetxt("./thetass"+ str(num_qubits) + ".csv",
thetass,
delimiter=",")
np.savetxt("./variances" + str(num_qubits) + ".csv",
variances,
delimiter=",")
min((abs(x), x) for x in variances)[1]
variances[-1]
```
|
github_jupyter
|
# Simulate Artificial Physiological Signals
Neurokit's core signal processing functions surround electrocardiogram (ECG), respiratory (RSP), electrodermal activity (EDA), and electromyography (EMG) data. Hence, this example shows how to use Neurokit to simulate these physiological signals with customized parametric control.
```
# Load NeuroKit and other useful packages
import neurokit2 as nk
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = [8, 5] # Bigger images
```
## Cardiac Activity (ECG)
With `ecg_simulate()`, you can generate an artificial ECG signal of a desired length (in this case here, `duration=10`), noise, and heart rate. As you can see in the plot below, *ecg50* has about half the number of heart beats than *ecg100*, and *ecg50* also has more noise in the signal than the latter.
```
# Alternate heart rate and noise levels
ecg50 = nk.ecg_simulate(duration=10, noise=0.05, heart_rate=50)
ecg100 = nk.ecg_simulate(duration=10, noise=0.01, heart_rate=100)
# Visualize
pd.DataFrame({"ECG_100": ecg100,
"ECG_50": ecg50}).plot()
```
You can also choose to generate the default, simple simulation based on Daubechies wavelets, which roughly approximates one cardiac cycle, or a more complex one by specifiying `method="ecgsyn"`.
```
# Alternate methods
ecg_sim = nk.ecg_simulate(duration=10, method="simple")
ecg_com = nk.ecg_simulate(duration=10, method="ecgsyn")
# Visualize
pd.DataFrame({"ECG_Simple": ecg_sim,
"ECG_Complex": ecg_com}).plot(subplots=True)
```
## Respiration (RSP)
To simulate a synthetic respiratory signal, you can use `rsp_simulate()` and choose a specific duration and breathing rate. In this example below, you can see that *rsp7* has a lower breathing rate than *rsp15*. You can also decide which model you want to generate the signal. The *simple rsp15* signal incorporates `method = "sinusoidal"` which approximates a respiratory cycle based on the trigonometric sine wave. On the other hand, the *complex rsp15* signal specifies `method = "breathmetrics"` which uses a more advanced model by interpolating inhalation and exhalation pauses between each respiratory cycle.
```
# Simulate
rsp15_sim = nk.rsp_simulate(duration=20, respiratory_rate=15, method="sinusoidal")
rsp15_com = nk.rsp_simulate(duration=20, respiratory_rate=15, method="breathmetrics")
rsp7 = nk.rsp_simulate(duration=20, respiratory_rate=7, method="breathmetrics")
# Visualize respiration rate
pd.DataFrame({"RSP7": rsp7,
"RSP15_simple": rsp15_sim,
"RSP15_complex": rsp15_com}).plot(subplots=True)
```
## Electromyography (EMG)
Now, we come to generating an artificial EMG signal using `emg_simulate()`. Here, you can specify the number of bursts of muscular activity (`n_bursts`) in the signal as well as the duration of the bursts (`duration_bursts`). As you can see the active muscle periods in *EMG2_Longer* are greater in duration than that of *EMG2*, and *EMG5* contains more bursts than the former two.
```
# Simulate
emg2 = nk.emg_simulate(duration=10, burst_number=2, burst_duration=1.0)
emg2_long = nk.emg_simulate(duration=10, burst_number=2, burst_duration=1.5)
emg5 = nk.emg_simulate(duration=10, burst_number=5, burst_duration=1.0)
# Visualize
pd.DataFrame({"EMG2": emg2,
"EMG2_Longer": emg2_long,
"EMG5": emg5}).plot(subplots=True)
```
## Electrodermal Activity (EDA)
Finally, `eda_simulate()` can be used to generate a synthetic EDA signal of a given duration, specifying the number of skin conductance responses or activity 'peaks' (`n_scr`) and the `drift` of the signal. You can also modify the noise level of the signal.
```
# Simulate
eda1 = nk.eda_simulate(duration=10, scr_number=1, drift=-0.01, noise=0.05)
eda3 = nk.eda_simulate(duration=10, scr_number=3, drift=-0.01, noise=0.01)
eda3_long = nk.eda_simulate(duration=10, scr_number=3, drift=-0.1, noise=0.01)
# Visualize
pd.DataFrame({"EDA1": eda1,
"EDA3": eda3,
"EDA3_Longer": eda3_long}).plot(subplots=True)
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/cstorm125/abtestoo/blob/master/notebooks/frequentist_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# A/B Testing from Scratch: Frequentist Approach
Frequentist A/B testing is one of the most used and abused statistical methods in the world. This article starts with a simple problem of comparing two online ads campaigns (or teatments, user interfaces or slot machines). It outlines several useful statistical concepts and how we exploit them to solve our problem. At the end, it acknowledges some common pitfalls we face when doing a frequentist A/B test and proposes some possible solutions to a more robust A/B testing. Readers are encouraged to tinker with the widgets provided in order to explore the impacts of each parameter.
Thanks to [korakot](https://github.com/korakot) for notebook conversion to Colab.
```
# #depedencies for colab
# %%capture
# !pip install plotnine
import numpy as np
import pandas as pd
from typing import Collection, Tuple
#widgets เอาออก เปลี่ยนไปใช้ colab form แทน
#from ipywidgets import interact, interactive, fixed, interact_manual
#import ipywidgets as widgets
# from IPython.display import display
#plots
import matplotlib.pyplot as plt
from plotnine import *
#stats
import scipy as sp
#suppress annoying warning prints
import warnings
warnings.filterwarnings('ignore')
```
## Start with A Problem
A typical situation marketers (research physicians, UX researchers, or gamblers) find themselves in is that they have two variations of ads (treatments, user interfaces, or slot machines) and want to find out which one has the better performance in the long run.
Practitioners know this as A/B testing and statisticians as **hypothesis testing**. Consider the following problem. We are running an online ads campaign `A` for a period of time, but now we think a new ads variation might work better so we run an experiemnt by dividing our audience in half: one sees the existing campaign `A` whereas the other sees a new campaign `B`. Our performance metric is conversion (sales) per click (ignore [ads attribution problem](https://support.google.com/analytics/answer/1662518) for now). After the experiment ran for two months, we obtain daily clicks and conversions of each campaign and determine which campaign has the better performance.
We simulate the aforementioned problem with both campaigns getting randomly about a thousand clicks per day. The secrete we will pretend to not know is that hypothetical campaign `B` has slightly better conversion rate than `A` in the long run. With this synthetic data, we will explore some useful statistical concepts and exploit them for our frequentist A/B testing.
```
def gen_bernoulli_campaign(p1: float, p2: float,
lmh: Collection = [500, 1000, 1500],
timesteps: int = 60,
scaler: float = 300, seed: int = 1412) -> pd.DataFrame:
'''
:meth: generate fake impression-conversion campaign based on specified parameters
:param float p1: true conversion rate of group 1
:param float p2: true conversion rate of group 2
:param Collection lmh: low-, mid-, and high-points for the triangular distribution of clicks
:param int nb_days: number of timesteps the campaigns run for
:param float scaler: scaler for Gaussian noise
:param int seed: seed for Gaussian noise
:return: dataframe containing campaign results
'''
np.random.seed(seed)
ns = np.random.triangular(*lmh, size=timesteps * 2).astype(int)
np.random.seed(seed)
es = np.random.randn(timesteps * 2) / scaler
n1 = ns[:timesteps]
c1 = ((p1 + es[:timesteps]) * n1).astype(int)
n2 = ns[timesteps:]
c2 = ((p2 + es[timesteps:]) * n2).astype(int)
result = pd.DataFrame({'timesteps': range(timesteps), 'impression_a': n1, 'conv_a': c1, 'impression_b': n2, 'conv_b': c2})
result = result[['timesteps', 'impression_a', 'impression_b', 'conv_a', 'conv_b']]
result['cumu_impression_a'] = result.impression_a.cumsum()
result['cumu_impression_b'] = result.impression_b.cumsum()
result['cumu_conv_a'] = result.conv_a.cumsum()
result['cumu_conv_b'] = result.conv_b.cumsum()
result['cumu_rate_a'] = result.cumu_conv_a / result.cumu_impression_a
result['cumu_rate_b'] = result.cumu_conv_b / result.cumu_impression_b
return result
conv_days = gen_bernoulli_campaign(p1 = 0.10,
p2 = 0.105,
timesteps = 60,
scaler=300,
seed = 1412) #god-mode
conv_days.columns = [i.replace('impression','click') for i in conv_days.columns] #function uses impressions but we use clicks
conv_days.head()
rates_df = conv_days[['timesteps','cumu_rate_a','cumu_rate_b']].melt(id_vars='timesteps')
g = (ggplot(rates_df, aes(x='timesteps', y='value', color='variable')) + geom_line() + theme_minimal() +
xlab('Days of Experiment Run') + ylab('Cumulative Conversions / Cumulative Clicks'))
g
#sum after 2 months
conv_df = pd.DataFrame({'campaign_id':['A','B'], 'clicks':[conv_days.click_a.sum(),conv_days.click_b.sum()],
'conv_cnt':[conv_days.conv_a.sum(),conv_days.conv_b.sum()]})
conv_df['conv_per'] = conv_df['conv_cnt'] / conv_df['clicks']
conv_df
```
## Random Variables and Probability Distributions
Take a step back and think about the numbers we consider in our daily routines, whether it is conversion rate of an ads campaign, the relative risk of a patient group, or sales and revenues of a shop during a given period of time. From our perspective, they have one thing in common: **we do not know exactly how they come to be**. In fact, we would not need an A/B test if we do. For instance, if we know for certain that conversion rate of an ads campaign will be `0.05 + 0.001 * number of letters in the ads`, we can tell exactly which ads to run: the one with the highest number of letters in it.
With our lack of knowledge, we do the next best thing and assume that our numbers are generated by some mathematical formula, calling them **random variables**. For instance, we might think of the probability of a click converting the same way as a coin-flip event, with the probability of converting as $p$ (say 0.1) and not converting as $1-p$ (thus 0.9). With this, we can simulate the event aka click conversion for as many times as we want:
```
def bernoulli(n,p):
flips = np.random.choice([0,1], size=n, p=[1-p,p])
flips_df = pd.DataFrame(flips)
flips_df.columns = ['conv_flag']
g = (ggplot(flips_df,aes(x='factor(conv_flag)')) + geom_bar(aes(y = '(..count..)/sum(..count..)')) +
theme_minimal() + xlab('Conversion Flag') + ylab('Percentage of Occurence') +
geom_hline(yintercept=p, colour='red') + ggtitle(f'Distribution after {n} Trials'))
g.draw()
print(f'Expectation: {p}\nVariance: {p*(1-p)}')
print(f'Sample Mean: {np.mean(flips)}\nSample Variance: {np.var(flips)}')
# ใช้ colab form แทน interact
#interact(bernoulli, n=widgets.IntSlider(min=1,max=500,step=1,value=20),
# p=widgets.FloatSlider(min=0.1,max=0.9))
#@title {run: "auto"}
n = 20 #@param {type:"slider", min:1, max:500, step:1}
p = 0.1 #@param {type:"slider", min:0.1, max:0.9, step:0.1}
bernoulli(n, p)
```
**Probability distribution** is represented with the values of a random variable we are interested in the X-axis, and the chance of them appearing after a number of trials in the Y-axis. The distribution above is called [Bernoulli Distribution](http://mathworld.wolfram.com/BernoulliDistribution.html), usually used to model hypothetical coin flips and online advertisements. [Other distributions](https://en.wikipedia.org/wiki/List_of_probability_distributions) are used in the same manner for other types of random variables. [Cloudera](https://www.cloudera.com/) provided a [quick review](https://blog.cloudera.com/blog/2015/12/common-probability-distributions-the-data-scientists-crib-sheet/) on a few of them you might find useful.
<img src='https://github.com/cstorm125/abtestoo/blob/master/images/distribution.png?raw=1' alt='Common Probability Distributions; Cloudera'/>
## Law of Large Numbers
There are two sets of indicators of a distribution that are especially relevant to our problem: one derived theoretically and another derived from data we observed. **Law of Large Numbers (LLN)** describes the relationship of between them.
Theoretically, we can derive these values about any distribution:
* **Expectation** of a random variable $X_i$ is its long-run average dervied from repetitively sampling $X_i$ from the same distribution. Each distribution requires its own way to obtain the expectation. For our example, it is the weighted average of outcomes $X_i$ ($X_i=1$ converted; $X_i=0$ not converted) and their respective probabilities ($p$ converted; $1-p$ not converted):
\begin{align}
E[X_i] &= \mu = \sum_{i=1}^{k} p_i * X_i \\
&= (1-p)*0 + p*1 \\
&= p
\end{align}
where $k$ is number of patterns of outcomes
* **Variance** of a random variable $X_i$ represents the expectation of how much $X_i$ deviates from its expectation, for our example formulated as:
\begin{align}
Var(X_i) &= \sigma^2 = E[(X_i-E(X_i))^2] \\
&= E[X_i^2] - E[X_i]^2 \\
&= \{(1-p)*0^2 + p*1^2\} - p^2 \\
&= p(1-p)
\end{align}
Empirically, we can also calculate their counterparts with the any amount of data we have on hand:
* **Sample Mean** is simply an average of all $X_i$ we currently have in our sample of size $n$:
\begin{align}
\bar{X} &= \frac{1}{n} \sum_{i=1}^{n} X_i
\end{align}
* **Sample Variance** is the variance based on deviation from sample mean; the $n-1$ is due to [Bessel's correction](https://en.wikipedia.org/wiki/Bessel%27s_correction#Source_of_bias) (See Appendix):
\begin{align}
s^2 &= \frac{1}{n-1} \sum_{i=1}^{n} (X_i - \bar{X})^2
\end{align}
LLN posits that when we have a large enough number of sample $n$, the sample mean will converge to expectation. This can be shown with a simple simulation:
```
def lln(n_max,p):
mean_flips = []
var_flips = []
ns = []
for n in range(1,n_max):
flips = np.random.choice([0,1], size=n, p=[1-p,p])
ns.append(n)
mean_flips.append(flips.mean())
var_flips.append(flips.var())
flips_df = pd.DataFrame({'n':ns,'mean_flips':mean_flips,'var_flips':var_flips}).melt(id_vars='n')
g = (ggplot(flips_df,aes(x='n',y='value',colour='variable')) + geom_line() +
facet_wrap('~variable', ncol=1, scales='free') + theme_minimal() +
ggtitle(f'Expectation={p:2f}; Variance={p*(1-p):2f}') + xlab('Number of Samples') +
ylab('Value'))
g.draw()
# interact(lln, n_max=widgets.IntSlider(min=2,max=10000,step=1,value=1000),
# p=widgets.FloatSlider(min=0.1,max=0.9))
#@title {run: "auto"}
n = 1000 #@param {type:"slider", min:2, max:10000, step:1}
p = 0.1 #@param {type:"slider", min:0.1, max:0.9, step:0.1}
lln(n, p)
```
Notice that even though LLN does not says that sample variance will also converge to variance as $n$ grows large enough, it is also the case. Mathematically, it can be derived as follows:
\begin{align}
s^2 &= \frac{1}{n}\sum_{i=1}^{n}(X_i - \bar{X}^2) \\
&= \frac{1}{n}\sum_{i=1}^{n}(X_i - \mu)^2 \text{; as }n\rightarrow\infty\text{ }\bar{X}\rightarrow\mu\\
&=\frac{1}{n}(\sum_{i=1}^{n}{X_i}^2 - 2\mu\sum_{i=1}^{n}X_i + n\mu^2) \\
&=\frac{\sum_{i=1}^{n}{X_i}^2}{n} - \frac{2\mu\sum_{i=1}^{n}X_i}{n} + \mu^2 \\
&= \frac{\sum_{i=1}^{n}{X_i}^2}{n} - 2\mu\bar{X} + \mu^2\text{; as }\frac{\sum_{i=1}^{n}X_i}{n} = \bar{X}\\
&= \frac{\sum_{i=1}^{n}{X_i}^2}{n} - 2\mu^2 + \mu^2 = \frac{\sum_{i=1}^{n}{X_i}^2}{n} - \mu^2 \text{; as }n\rightarrow\infty\text{ }\bar{X}\rightarrow\mu\\
&= E[{X_i}^2] - E[X_i]^2 = Var(X_i) = \sigma^2
\end{align}
## Central Limit Theorem
Assuming some probability distribution for our random variable also lets us exploit another extremely powerful statistical concept: **Central Limit Theorem (CLT)**. To see CLT in action, let us simplify our problem a bit and say we are only trying to find out if a hypothetical ads campaign `C` has a conversion rate of more than 10% or not, assuming data collected so far say that `C` has 1,000 clicks and 107 conversions.
```
c_df = pd.DataFrame({'campaign_id':'C','clicks':1000,'conv_cnt':107,'conv_per':0.107},index=[0])
c_df
```
CLT goes as follows:
> If $X_i$ is an independent and identically distributed (i.i.d.) random variable with expectation $\mu$ and variance $\sigma^2$ and $\bar{X_j}$ is the sample mean of $n$ samples of $X_i$ we drew as part of sample group $j$, then when $n$ is large enough, $\bar{X_j}$ will follow a [normal distribution](http://mathworld.wolfram.com/NormalDistribution.html) with with expectation $\mu$ and variance $\frac{\sigma^2}{n}$
It is a mouthful to say and full of weird symbols, so let us break it down line by line.
**If $X_i$ is an independent and identically distributed (i.i.d.) random variable with expectation $\mu$ and variance $\sigma^2$** <br/>In our case, $X_i$ is if click $i$ is coverted ($X_i=1$) or not converted ($X_i=0$) with $\mu$ as some probability that represents how likely a click will convert on average. *Independent* means that the probability of each click converting depends only on itself and not other clicks. *Identically distributed* means that the true probability of each click converting is more or less the same. We need to rely on domain knowledge to verify these assumptions; for example, in online advertisement, we would expect, at least for when working with a reputable ads network such as Criteo, that each click comes from indepdent users, as opposed to, say, a click farm where we would see a lot of clicks behaving the same way by design. Identical distribution is a little difficult to assume since we would think different demographics the ads are shown to will react differently so they might not have the same expectation.
```
ind_df = pd.DataFrame({'iid':[False]*100+[True]*100,
'order': list(range(100)) + list(range(100)),
'conv_flag':[1]*50+ [0]*50+ list(np.random.choice([0,1], size=100))})
g = (ggplot(ind_df,aes(x='order',y='conv_flag',color='iid')) + geom_point() +
facet_wrap('~iid') + theme_minimal() + xlab('i-th Click') + ylab('Conversion') +
ggtitle('Both plots has conversion rate of 50% but only one is i.i.d.'))
g
```
**and $\bar{X_j}$ is the sample mean of $n$ samples of $X_i$ we drew as part of sample group $j$, then**<br/>
For campaign `C`, we can think of all the clicks we observed as one sample group, which exists in parallel with an infinite number of sample groups that we have not seen yet but can be drawn from the distribution by additional data collection. This way, we calculate the sample mean as total conversions divided by total number of clicks observed during the campaign.
<img src='https://github.com/cstorm125/abtestoo/blob/master/images/sample_group.png?raw=1' alt='Sample Group in Universe'>
**when $n$ is large enough, $\bar{X_j}$ will follow a [normal distribution](http://mathworld.wolfram.com/NormalDistribution.html) with with expectation $\mu$ and variance $\frac{\sigma^2}{n}$**</br>
Here's the kicker: regardless of what distribution each $X_i$ of sample group $j$ is drawn from, as long as you have enough number of sample $n$, the sample mean of that sample group $\bar{X_j}$ will converge to a normal distribution. Try increase $n$ in the plot below and see what happens.
```
def clt(n, dist):
n_total = n * 10000
if dist == 'discrete uniform':
r = np.random.uniform(size=n_total)
elif dist =='bernoulli':
r = np.random.choice([0,1],size=n_total,p=[0.9,0.1])
elif dist =='poisson':
r = np.random.poisson(size=n_total)
else:
raise ValueError('Choose distributions that are available')
#generate base distribution plot
r_df = pd.DataFrame({'r':r})
g1 = (ggplot(r_df, aes(x='r')) + geom_histogram(bins=30) + theme_minimal() +
xlab('Values') + ylab('Number of Samples') +
ggtitle(f'{dist} distribution where sample groups are drawn from'))
g1.draw()
#generate sample mean distribution plot
normal_distribution = np.random.normal(loc=np.mean(r), scale=np.std(r) / np.sqrt(n), size=10000)
sm_df = pd.DataFrame({'sample_means':r.reshape(-1,n).mean(1),
'normal_distribution': normal_distribution}).melt()
g2 = (ggplot(sm_df, aes(x='value',fill='variable')) +
geom_histogram(bins=30,position='nudge',alpha=0.5) +
theme_minimal() + xlab('Sample Means') + ylab('Number of Sample Means') +
ggtitle(f'Distribution of 10,000 sample means with size {n}'))
g2.draw()
dists = ['bernoulli','discrete uniform','poisson']
# interact(clt, n=widgets.IntSlider(min=1,max=100,value=1),
# dist = widgets.Dropdown(
# options=dists,
# value='bernoulli')
# )
#@title {run: "auto"}
n = 30 #@param {type:"slider", min:1, max:100, step:1}
dist = 'bernoulli' #@param ["discrete uniform", "bernoulli", "poisson"] {type:"string"}
clt(n, dist)
```
The expectation and variance of the sample mean distribution can be derived as follows:
\begin{align}
E[\bar{X_j}] &= E[\frac{\sum_{i=1}^{n} X_i}{n}] \\
&= \frac{1}{n} \sum_{i=1}^{n} E[X_i] = \frac{1}{n} \sum_{i=1}^{n} \mu\\
&= \frac{n\mu}{n} = \mu \\
Var(\bar{X_j}) &= Var(\frac{\sum_{i=1}^{n} X_i}{n}) \\
&= \frac{1}{n^2} \sum_{i=1}^{n} Var(X_i) = \frac{1}{n^2} \sum_{i=1}^{n} \sigma^2\\
&= \frac{n\sigma^2}{n^2} = \frac{\sigma^2}{n} \\
\end{align}
The fact that we know this specific normal distribution of sample means has expectation $\mu$ and variance $\frac{\sigma^2}{n}$ is especially useful. Remember we want to find out whether campaign `C` **in general, not just in any sample group,** has better conversion rate than 10%. Below is that exact normal distribution based on information from our sample group (1,000 clicks) and the assumption that conversion rate is 10%:
\begin{align}
E[\bar{X_j}] &= \mu = p\\
&= 0.1 \text{; by our assumption}\\
Var(\bar{X_j}) &= \frac{\sigma^2}{n} = \frac{p*(1-p)}{n}\\
&= \frac{0.1 * (1-0.1)}{1000}\\
&= 0.0009\\
\end{align}
```
n = c_df.clicks[0]
x_bar = c_df.conv_per[0]
p = 0.1
mu = p; variance = p*(1-p)/n; sigma = (variance)**(0.5)
# mu = 0; variance = 1; sigma = (variance)**(0.5)
x = np.arange(0.05, 0.15, 1e-3)
y = np.array([sp.stats.norm.pdf(i, loc=mu, scale=sigma) for i in x])
sm_df = pd.DataFrame({'x': x, 'y': y, 'crit':[False if i>x_bar else True for i in x]})
g = (ggplot(sm_df, aes(x='x', y='y')) + geom_area() +
theme_minimal() + xlab('Sample Means') + ylab('Probability Density Function') +
ggtitle('Sample mean distribution under our assumption'))
g
```
As long as we know the expectation (which we usually do as part of the assumption) and variance (which is more tricky) of the base distribution, we can use this normal distribution to model random variable from *any* distribution. That is, we can model *any* data as long as we can assume their expectation and variance.
## Think Like A ~~Detective~~ Frequentist
In a frequentist perspective, we treat a problem like a criminal persecution. First, we assume innocence of the defendant often called **null hypothesis** (in our case that conversion rate is *less than or equal to* 10%). Then, we collect the evidence (all clicks and conversions from campaign `C`). After that, we review how *unlikely* it is that we have this evidence assuming the defendant is innocent (by looking at where our sample mean lands on the sample mean distribution). Most frequentist tests are simply saying:
>If we assume that [conversion rate]() of [ads campaign C]() has the long-run [conversion rate]() of less than or equal to [10%](), our results with sample mean [0.107]() or more extreme ones are so unlikely that they happen only [23%]() of the time, calculated by the area of the distribution with higher value than our sample mean.
Note that you can substitute the highlighted parts with any other numbers and statistics you are comparing; for instance, medical trials instead of ads campaigns and relative risks instead of converion rates.
```
g = (ggplot(sm_df, aes(x='x', y='y', group='crit')) + geom_area(aes(fill='crit')) +
theme_minimal() + xlab('Sample Means') + ylab('Probability Density Function') +
ggtitle('Sample mean distribution under our assumption') +
guides(fill=guide_legend(title="Conversion Rate < 0.1")))
g
```
Whether 23% is unlikely *beyond reasonable doubt* depends on how much we are willing to tolerate the false positive rate (the percentage of innocent people you are willing to execute). By convention, a lot of practioners set this to 1-5% depending on their problems; for instance, an experiment in physics may use 1% or less because physical phenomena is highly reproducible whereas social science may use 5% because the human behaviors are more variable. This is not to be confused with **false discovery rate** which is the probability of our positive predictions turning out to be wrong. The excellent book [Statistics Done Wrong](https://www.statisticsdonewrong.com/p-value.html) has given this topic an extensive coverage that you definitely should check out (Reinhart, 2015).
This degree of acceptable unlikeliness is called **alpha** and the probability we observe is called **p-value**. We must set alpha as part of the assumption before looking at the data (the law must first state how bad an action is for a person to be executed).
## Transforming A Distribution
In the previous example of `C`, we are only interested when the conversion rate is *more than* 10% so we look only beyond the right-hand side of our sample mean (thus called **one-tailed tests**). If we were testing whether the conversion rate is *equal to* 10% or not we would be interested in both sides (thus called **two-tailed tests**). However, it is not straightforward since we have to know the equivalent position of our sample mean on the left-hand side of the distribution.
One way to remedy this is to convert the sample mean distribution to a distribution that is symmetrical around zero and has a fixed variance so the value on one side is equivalent to minus that value of the other side. **Standard normal distribution** is the normal distribution with expectation $\mu=0$ and variance $\sigma^2=1$. We convert any normal distribution to a standard normal distribution by:
1. Shift its expectation to zero. This can be done by substracting all values of a distribution by its expectation:
\begin{align}
E[\bar{X_j}-\mu] &= E[\bar{X_j}]-\mu \\
&= \mu-\mu \\
&= 0 \\
\end{align}
2. Scale its variance to 1. This can be done by dividing all values by square root of its variance called **standard deviation**:
\begin{align}
Var(\frac{\bar{X_j}}{\sqrt{\sigma^2/n}}) &= \frac{1}{\sigma^2/n}Var(\bar{X_j})\\
&= \frac{\sigma^2/n}{\sigma^2/n}\\
&=1
\end{align}
Try shifting and scaling the distribution below with different $m$ and $v$.
```
def shift_normal(m,v):
n = c_df.clicks[0]
x_bar = c_df.conv_per[0]
p = 0.1
mu = p; variance = p*(1-p)/n; sigma = (variance)**(0.5)
x = np.arange(0.05, 0.15, 1e-3)
y = np.array([sp.stats.norm.pdf(i, loc=mu, scale=sigma) for i in x])
sm_df = pd.DataFrame({'x': x, 'y': y})
#normalize process
sm_df['x'] = (sm_df.x - m) / np.sqrt(v)
sm_df['y'] = np.array([sp.stats.norm.pdf(i, loc=mu-m, scale=sigma/np.sqrt(v)) for i in sm_df.x])
print(f'Expectation of sample mean: {mu-m}; Variance of sample mean: {variance/v}')
g = (ggplot(sm_df, aes(x='x', y='y')) + geom_area() +
theme_minimal() + xlab('Sample Means') + ylab('Probability Density Function') +
ggtitle('Shifted Normal Distribution of Sample Mean'))
g.draw()
# interact(shift_normal,
# m=widgets.FloatSlider(min=-1e-1,max=1e-1,value=1e-1,step=1e-2),
# v=widgets.FloatSlider(min=9e-5,max=9e-3,value=9e-5,step=1e-4, readout_format='.5f'))
#@title {run: "auto"}
m = 0.1 #@param {type:"slider", min:-1e-1, max:1e-1, step:1e-2}
v = 9e-5 #@param {type:"slider", min:9e-5, max:9e-3, step:1e-4}
shift_normal(m,v)
```
By shifting and scaling, we can find out where `C`'s sample mean of 0.107 lands on the X-axis of a standard normal distribution:
\begin{align}
\bar{Z_j} &= \frac{\bar{X_j} - \mu}{\sigma / \sqrt{n}} \\
&= \frac{0.107 - 0.1}{0.3 / \sqrt{1000}} \approx 0.7378648\\
\end{align}
With $\bar{Z_j}$ and $-\bar{Z_j}$, we can calculate the probability of falsely rejecting the null hypotheysis, or p-value, as the area in red, summing up to approximately 46%. This is most likely too high a false positive rate anyone is comfortable with (no one believes a pregnancy test that turns out positive for 46% of the people who are not pregnant), so we fail to reject the null hypothesis that conversion rate of `C` is equal to 10%.
If someone asks a frequentist for an opinion, they would probably say that they cannot disprove `C` has conversion rate of 10% in the long run. If they were asked to choose an action, they would probably go with the course of action that assumes `C` has a conversion rate of 10%.
```
n = c_df.clicks[0]
x_bar = c_df.conv_per[0]
p = 0.1; mu = p; variance = p*(1-p)/n; sigma = (variance)**(0.5)
x_bar_norm = (x_bar - mu) / sigma
def standard_normal(x_bar_norm, legend_title):
x_bar_norm = abs(x_bar_norm)
x = np.arange(-3, 3, 1e-2)
y = np.array([sp.stats.norm.pdf(i, loc=0, scale=1) for i in x])
sm_df = pd.DataFrame({'x': x, 'y': y})
#normalize process
sm_df['crit'] = sm_df.x.map(lambda x: False if ((x<-x_bar_norm)|(x>x_bar_norm)) else True)
g = (ggplot(sm_df, aes(x='x', y='y',group='crit')) + geom_area(aes(fill='crit')) +
theme_minimal() + xlab('Sample Means') + ylab('Probability Density Function') +
ggtitle('Standard Normal Distribution of Sample Mean') +
guides(fill=guide_legend(title=legend_title)))
g.draw()
standard_normal(x_bar_norm, "Conversion Rate = 0.1")
```
## Z-test and More
With CLT and standard normal distribution (sometimes called **Z-distribution**), we now have all the tools for one of the most popular and useful statistical hypothesis test, the **Z-test**. In fact we have already done it with the hypothetical campaign `C`. But let us go back to our original problem of comparing the long-run conversion rates of `A` and `B`. Let our null hypothesis be that they are equal to each other and alpha be 0.05 (we are comfortable with false positive rate of 5%).
```
conv_df
```
We already know how to compare a random variable to a fixed value, but now we have two random variables from two ads campaign. We get around this by comparing **the difference of their sample mean** $\bar{X_\Delta} = \bar{X_{A}} - \bar{X_{B}}$ to 0. This way, our null hypothesis states that there is no difference between the long-run conversion rates of these campaigns. Through another useful statistical concept, we also know that the variance of $\bar{X_\Delta}$ is the sum of sample mean variances of $\bar{X_\text{A}}$ and $\bar{X_\text{B}}$ (Normal Sum Theorem; [Lemon, 2002](https://www.goodreads.com/book/show/3415974-an-introduction-to-stochastic-processes-in-physics)).
Thus, we can calculate the **test statistic** or, specifically for Z-test, **Z-value** as follows:
\begin{align}
\bar{Z_\Delta} &= \frac{\bar{X_\Delta}-\mu}{\sqrt{\frac{\sigma^2_\text{A}}{n_\text{A}} + \frac{\sigma^2_\text{B}}{n_\text{B}}}} \\
&= \frac{\bar{X_\Delta}-\mu}{\sqrt{\sigma^2_\text{pooled} * (\frac{1}{n_\text{A}} + \frac{1}{n_\text{B}})}}
\end{align}
Since we are assuming that `A` and `B` has the same conversion rate, their variance is also assumed to be the same:
$$\sigma^2_{A} = \sigma^2_{B} = \sigma_\text{pooled} = p * (1-p)$$
where $p$ is the total conversions of both campaigns divided by their clicks (**pooled probability**).
In light of the Z-value calculated from our data, we found that p-value of rejecting the null hypothesis that conversion rates of `A` and `B` are equal to each other is less than 3%, lower than our acceptable false positive rate of 5%, so we reject the null hypothesis that they perform equally well. The result of the test is **statistically significant**; that is, it is unlikely enough for us given the null hypothesis.
```
def proportion_test(c1: int, c2: int,
n1: int, n2: int,
mode: str = 'one_sided') -> Tuple[float, float]:
'''
:meth: Z-test for difference in proportion
:param int c1: conversions for group 1
:param int c2: conversions for group 2
:param int n1: impressions for group 1
:param int n2: impressions for group 2
:param str mode: mode of test; `one_sided` or `two_sided`
:return: Z-score, p-value
'''
p = (c1 + c2) / (n1 + n2)
p1 = c1 / n1
p2 = c2 / n2
z = (p1 - p2) / np.sqrt(p * (1 - p) * (1 / n1 + 1 / n2))
if mode == 'two_sided':
p = 2 * (1 - sp.stats.norm.cdf(abs(z)))
elif mode == 'one_sided':
p = 1 - sp.stats.norm.cdf(abs(z))
else:
raise ValueError('Available modes are `one_sided` and `two_sided`')
return z, p
z_value, p_value = proportion_test(c1=conv_df.conv_cnt[0], c2=conv_df.conv_cnt[1],
n1=conv_df.clicks[0], n2=conv_df.clicks[1], mode='two_sided')
print(f'Z-value: {z_value}; p-value: {p_value}')
standard_normal(z_value, "No Difference in Conversion Rates")
```
This rationale extends beyond comparing proportions such as conversion rates. For instance, we can also compare revenues of two different stores, assuming they are i.i.d. However in this case, we do not know the variance of the base distribution $\sigma^2$, as it cannot be derived from our assumption (variance of Bernoulli distribution is $p*(1-p)$ but store revenues are not modelled after a coin flip). The test statistic then is created with sample variance $s^2$ based on our sample group and follows a slightly modified version of standard normal distribution (see [Student's t-test](https://en.wikipedia.org/wiki/Student%27s_t-test)). Your test statistics and sample mean distributions may change, but bottom line of frequentist A/B test is exploiting CLT and frequentist reasoning.
## Confidence Intervals
Notice that we can calculate p-value from Z-value and vice versa. This gives us another canny way to look at the problem; that is, we can calculate the intervals where there is an arbitrary probability, say 95%, that sample mean of `A` or `B` will fall into. We call it **confidence interval**. You can see that despite us rejecting the null hypothesis that their difference is zero, the confidence intervals of both campaigns can still overlap.
Try changing the number of conversion rate and clicks of each group as well as the alpha to see what changes in terms of p-value of Z-test and confidence intervals. You will see that the sample mean distribution gets "wider" as we have fewer samples in a group. Intuitively, this makes sense because the fewer clicks you have collected, the less information you have about true performance of an ads campaign and less confident you are about where it should be. So when designing an A/B test, you should plan to have similar number of sample between both sample groups in order to have similarly distributed sample means.
```
def proportion_plot(c1: int, c2: int,
n1: int, n2: int, alpha: float = 0.05,
mode: str = 'one_sided') -> None:
'''
:meth: plot Z-test for difference in proportion and confidence intervals for each campaign
:param int c1: conversions for group 1
:param int c2: conversions for group 2
:param int n1: impressions for group 1
:param int n2: impressions for group 2
:param float alpha: alpha
:param str mode: mode of test; `one_sided` or `two_sided`
:return: None
'''
p = (c1 + c2) / (n1 + n2)
p1 = c1 / n1
p2 = c2 / n2
se1 = np.sqrt(p1 * (1 - p1) / n1)
se2 = np.sqrt(p2 * (1 - p2) / n2)
z = sp.stats.norm.ppf(1 - alpha / 2)
x1 = np.arange(p1 - 3 * se1, p1 + 3 * se1, 1e-4)
x2 = np.arange(p2 - 3 * se2, p2 + 3 * se2, 1e-4)
y1 = np.array([sp.stats.norm.pdf(i, loc=p1, scale=np.sqrt(p1 * (1 - p1) / n1)) for i in x1])
y2 = np.array([sp.stats.norm.pdf(i, loc=p2, scale=np.sqrt(p2 * (1 - p2) / n2)) for i in x2])
sm_df = pd.DataFrame({'campaign_id': ['Campaign A'] * len(x1) + ['Campaign B'] * len(x2),
'x': np.concatenate([x1, x2]), 'y': np.concatenate([y1, y2])})
z_value, p_value = proportion_test(c1, c2, n1, n2, mode)
print(f'Z-value: {z_value}; p-value: {p_value}')
g = (ggplot(sm_df, aes(x='x', y='y', fill='campaign_id')) +
geom_area(alpha=0.5)
+ theme_minimal() + xlab('Sample Mean Distribution of Each Campaign')
+ ylab('Probability Density Function')
+ geom_vline(xintercept=[p1 + se1 * z, p1 - se1 * z], colour='red')
+ geom_vline(xintercept=[p2+se2*z, p2-se2*z], colour='blue')
+ ggtitle(f'Confident Intervals at alpha={alpha}'))
g.draw()
# interact(ci_plot,
# p1 = widgets.FloatSlider(min=0,max=1,value=conv_df.conv_cnt[0] / conv_df.clicks[0],
# step=1e-3,readout_format='.5f'),
# p2 = widgets.FloatSlider(min=0,max=1,value=conv_df.conv_cnt[1] / conv_df.clicks[1],
# step=1e-3,readout_format='.5f'),
# n1 = widgets.IntSlider(min=10,max=70000,value=conv_df.clicks[0]),
# n2 = widgets.IntSlider(min=10,max=70000,value=conv_df.clicks[1]),
# alpha = widgets.FloatSlider(min=0,max=1,value=0.05))
conv_df.clicks[0], conv_df.clicks[1]
#@title {run: "auto"}
c1 = 5950 #@param {type:"slider", min:0, max:70000}
c2 = 6189 #@param {type:"slider", min:0, max:70000}
n1 = 59504 #@param {type:"slider", min:10, max:70000, step:10}
n2 = 58944 #@param {type:"slider", min:10, max:70000, step:10}
alpha = 0.05 #@param {type:"slider", min:0, max:1, step:1e-3}
proportion_plot(c1,c2,n1,n2,alpha)
```
## Any Hypothesis Test Is Statistically Significant with Enough Samples
Because we generated the data, we know that conversion rate of campaign `A` (10%) is about 95% that of campaign `B` (10.5%). If we go with our gut feeling, most of us would say that they are practically the same; yet, our Z-test told us that they are different. The reason for this becomes apparent graphically when we decrease the number of clicks for both campaigns in the plot above. The Z-test stops becoming significant when both campaigns have about 50,000 clicks each, even though they still have exactly the same conversion rate. The culprit is our Z-value calculated as:
\begin{align}
\bar{Z_\Delta} &= \frac{\bar{X_\Delta}-\mu}{\sqrt{\sigma^2_\text{pooled} * (\frac{1}{n_\text{A}} + \frac{1}{n_\text{B}})}}
\end{align}
Notice number of clicks $n_\text{A}$ and $n_\text{B}$ hiding in the denominator. Our test statistics $\bar{Z_\Delta}$ will go infinitely higher as long as we collect more clicks. If both campaigns `A` and `B` have one million clicks each, the difference of as small as 0.1% will be detected as statistically significant. Try adjusting the probabilities $p1$ and $p2$ in the plot below and see if the area of statistical significance expands or contracts as the difference between the two numbers changes.
```
def significance_plot(p1,p2):
n1s = pd.DataFrame({'n1':[10**i for i in range(1,7)],'k':0})
n2s = pd.DataFrame({'n2':[10**i for i in range(1,7)],'k':0})
ns = pd.merge(n1s,n2s,how='outer').drop('k',1)
ns['p_value'] = ns.apply(lambda row: proportion_test(p1*row['n1'], p2*row['n2'],row['n1'],row['n2'])[1], 1)
g = (ggplot(ns,aes(x='factor(n1)',y='factor(n2)',fill='p_value')) + geom_tile(aes(width=.95, height=.95)) +
geom_text(aes(label='round(p_value,3)'), size=10)+ theme_minimal() +
xlab('Number of Samples in A') + ylab('Number of Samples in B') +
guides(fill=guide_legend(title="p-value")))
g.draw()
# interact(significance_plot,
# p1 = widgets.FloatSlider(min=0,max=1,value=conv_df.conv_cnt[0] / conv_df.clicks[0],
# step=1e-3,readout_format='.5f'),
# p2 = widgets.FloatSlider(min=0,max=1,value=conv_df.conv_cnt[1] / conv_df.clicks[1],
# step=1e-3,readout_format='.5f'))
#@title {run: "auto"}
p1 = 0.09898494218876042 #@param {type:"slider", min:0, max:1, step:1e-3}
p2 = 0.10367467426710097 #@param {type:"slider", min:0, max:1, step:1e-3}
significance_plot(p1,p2)
```
More practically, look at cumulative conversion rates and z-values of `A` and `B` on a daily basis. Every day that we check the results based on cumulative clicks and conversions, we will come up with a different test statistic and p-value. Difference in conversion rates seem to stabilize after 20 days; however, notice that if you stop the test at day 25 or so, you would say it is NOT statistically significant, whereas if you wait a little longer, you will get the opposite result. The only thing that changes as time goes on is that we have more samples.
```
g = (ggplot(rates_df, aes(x='timesteps', y='value', color='variable')) + geom_line() + theme_minimal() +
xlab('Days of Experiment Run') + ylab('Cumulative Conversions / Cumulative Clicks'))
g
#test
conv_days['cumu_z_value'] = conv_days.apply(lambda row: proportion_test(row['cumu_conv_a'],
row['cumu_conv_b'],row['cumu_click_a'],
row['cumu_click_b'], mode='two_sided')[0],1)
conv_days['cumu_p_value'] = conv_days.apply(lambda row: proportion_test(row['cumu_conv_a'],
row['cumu_conv_b'],row['cumu_click_a'],
row['cumu_click_b'], mode='two_sided')[1],1)
#plot
g = (ggplot(conv_days, aes(x='timesteps',y='cumu_z_value',color='cumu_p_value')) + geom_line() + theme_minimal() +
xlab('Days of Campaign') + ylab('Z-value Calculated By Cumulative Data') +
geom_hline(yintercept=[sp.stats.norm.ppf(0.95),sp.stats.norm.ppf(0.05)], color=['red','green']) +
annotate("text", label = "Above this line A is better than B", x = 20, y = 2, color = 'red') +
annotate("text", label = "Below this line B is better than A", x = 20, y = -2, color = 'green'))
g
```
## Minimum Detectable Effect, Power and Required Sample Size
We argue that this too-big-to-fail phenomena among sample groups is especially dangerous in the context of today's "big data" society. Gone are the days where statistical tests are done among two control groups of 100 people each using paper survey forms. Now companies are performning A/B testing between ad variations that could have tens of thousands or more samples (impressions or clicks), and potentially all of them will be "statistically significant".
One way to remedy this is to do what frequentists do best: make more assumptions, more specifically **two** more.
First, if we want to find out whether `B` has *better* conversion than `A`, we do not only make assumptions about the mean of the null hypothesis but **minimally by how much**, aka the mean of the alternative hypothesis. We can set **mininum detectable effect** as the smallest possible difference that would be worth investing the time and money in one campaign over the other; let say that from experience we think it is 1%. We then ask:
> What is the mininum number of samples in a sample group (clicks in a campaign) should we have in order to reject the null hypothesis at a **significance level ($\alpha$)** and **power ($1-\beta$)** when the difference in sample means is [1%]()?
The **significance level ($\alpha$)** takes care of the false positive rate promise, for example to be lower than 5% (95% specificity), where as **power ($1-\beta$)** indicates the desired recall, for example to be 80% (20% false negative rate).
```
def power_plot(mean_h0: float,
mean_h1: float,
critical: float) -> None:
'''
:meth: plot Z-test for difference in proportion with power and alpha highlighted
:param float mean1: mean for null hypothesis
:param float mean2: mean for alternative hypothesis
:param float critical: critical value selected
:return: None
'''
x = np.arange(-4,6,0.1)
dat = pd.DataFrame({'x':x,
'y1':sp.stats.norm.pdf(x,mean_h0,1),
'y2':sp.stats.norm.pdf(x,mean_h1,1)})
dat['x1'] = dat.x.map(lambda x: np.where(x>critical,x,None))
dat['x2'] = dat.x.map(lambda x: np.where(x>critical,x,None))
g = (
ggplot(dat, aes(x = 'x')) +
geom_line(aes(y = 'y1'), color='red', size = 1.2) +
geom_line(aes(y = 'y2'), color='blue',size = 1.2) +
geom_vline(xintercept=mean_h0,linetype='dashed',color='red')+
geom_vline(xintercept=mean_h1,linetype='dashed',color='blue')+
geom_area(aes(y = 'y1', x = 'x1'), fill='red') +
geom_area(aes(y = 'y2', x = 'x2'), fill = 'blue', alpha = 0.3) +
ylab('Probability Density Function') + xlab('Z value')+
ggtitle(f'significance level = {sp.stats.norm.pdf(critical,mean_h0,1):.2f}; power ={1-sp.stats.norm.pdf(critical,mean_h1,1):.2f}')+
theme_minimal()
)
g.draw()
#@title {run: "auto"}
mean_h0 = 0 #@param {type:"slider", min:0, max:6, step:1e-3}
mean_h1 = 3.18 #@param {type:"slider", min:0, max:6, step:1e-3}
critical = 2 #@param {type:"slider", min:0, max:3, step:1e-1}
power_plot(mean_h0, mean_h1, critical)
```
Given a minimum detectable effect $\text{MDE}$, significance level $\alpha$ and power $1-\beta$, we can calculate the critical Z value $Z_{critical}$ that satisfies these conditions, where the required number of samples in each group is $n$ and $mn$ (where m is multiplier):
\begin{align}
Z_{critical} &= \mu_{H0} + Z_{\alpha} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}\\
Z_{critical} &= 0 + Z_{\alpha} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}\\
Z_{critical} &= \mu_{H1}-\mu_{H0} - Z_{\beta} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}\\
Z_{critical} &= \text{MDE} - Z_{\beta} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}\\
0 + Z_{\alpha} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})} &= \text{MDE} - Z_{\beta} * \sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}\\
Z_{\alpha} + Z_{\beta} &= \frac{\text{MDE}}{\sqrt{\sigma^2 * (\frac{1}{n} + \frac{1}{mn})}} \\
\frac{(m+1)\sigma^2}{mn} &= (\frac{\text{MDE}}{Z_{\alpha} + Z_{\beta}})^2 \\
n &= \frac{m+1}{m}(\frac{(Z_{\alpha} + Z_{\beta}) \sigma}{\text{MDE}})^2 \\
n &= 2(\frac{(Z_{\alpha} + Z_{\beta}) \sigma}{\text{MDE}})^2; m=1
\end{align}
Second, we make yet another crucial assumption about **the variance $\sigma^2$ we expect**. Remember we used to estimate the variance by using the pooled probability of our sample groups, but here we have not even started the experiments. In a conventional A/B testing scenario, we are testing whether an experimental variation is better than the existing one, so one choice is **using sample variance of a campaign you are currently running**; for instance, if `A` is our current ads and we want to know if we should change to `B`, then we will use conversion rate of `A` from past time period to calculate the variance, say 10%.
Let us go back in time before we even started our 2-month-long test between campaign `A` and `B`. Now we assume not only acceptable false positive rate alpha of 0.05 but also minimum detectable effect of 1% and expected variance of $\sigma^2 = 0.1 * (1-0.1) = 0.09$, then we calculate that the minimum number of samples we should collect for each campaign. You can see that should we have done that we would have not been able to reject the null hypothesis, and stuck with campaign `A` going forward.
The upside is that now we only have to run the test for about 5 days instead of 60 days assuming every day is the same for the campaigns (no peak traffic on weekends, for instance). The downside is that our null hypothesis gets much more specific with not only one but three assumptions:
* Long-run conversion rate of `B` is no better than `A`'s
* The difference that will matter to us is at least 1%
* The expected variance conversion rates is $\sigma^2 = 0.1 * (1-0.1) = 0.09$
This fits many A/B testing scenarios since we might not want to change to a new variation even though it is better but not so much that we are willing to invest our time and money to change our current setup. Try adjusting $\text{MDE}$ and $\sigma$ in the plot below and see how the number of required samples change.
```
def proportion_samples(mde: float, p: float, m: float = 1,
alpha: float = 0.05,
beta: float = 0.8,
mode: str = 'one_sided') -> float:
'''
:meth: get number of required sample based on minimum detectable difference (in absolute terms)
:param float mde: minimum detectable difference
:param float p: pooled probability of both groups
:param float m: multiplier of number of samples; groups are n and nm
:param float alpha: alpha
:param float beta: beta
:param str mode: mode of test; `one_sided` or `two_sided`
:return: estimated number of samples to get significance
'''
variance = p * (1 - p)
z_b = sp.stats.norm.ppf(beta)
if mode == 'two_sided':
z_a = sp.stats.norm.ppf(1 - alpha / 2)
elif mode == 'one_sided':
z_a = sp.stats.norm.ppf(1 - alpha)
else:
raise ValueError('Available modes are `one_sided` and `two_sided`')
return ((m + 1) / m) * variance * ((z_a+z_b) / mde)**2
def plot_proportion_samples(mde, p, m=1, alpha=0.05,beta=0.8, mode='one_sided'):
minimum_samples = proportion_samples(mde, p,m, alpha,beta, mode)
g = (ggplot(conv_days, aes(x='cumu_click_a',y='cumu_z_value',color='cumu_p_value')) + geom_line() +
theme_minimal() +
xlab('Number of Samples per Campaign') + ylab('Z-value Calculated By Cumulative Data') +
geom_hline(yintercept=[sp.stats.norm.ppf(0.95),sp.stats.norm.ppf(0.05)], color=['red','green']) +
annotate("text", label = "Above this line A is better than B", x = 30000, y = 2, color = 'red') +
annotate("text", label = "Below this line B is better than A", x = 30000, y = -2, color = 'green') +
annotate("text", label = f'Minimum required samples at MDE {mde}={int(minimum_samples)}', x = 30000, y = 0,) +
geom_vline(xintercept=minimum_samples))
g.draw()
#@title {run: "auto"}
mde = 0.01 #@param {type:"slider", min:0.001, max:0.01, step:1e-3}
p = 0.1 #@param {type:"slider", min:0, max:1, step:1e-3}
m = 1 #@param {type:"slider", min:0, max:1, step:1e-1}
p_value = 0.05 #@param {type:"slider", min:0.01, max:0.1, step:1e-3}
mode = 'one_sided' #@param ['one_sided','two_sided'] {type:"string"}
plot_proportion_samples(mde, p, m, alpha, mode)
```
## You Will Get A Statistically Significant Result If You Try Enough Times
The concept p-value represents is false positive rate of our test, that is, how unlikely it is to observe our sample groups given that they do not have different conversion rates in the long run. Let us re-simulate our campaigns `A` and `B` to have equal expectation of 10%. If we apply our current method, we can be comfortably sure we will not get statistical significance (unless we have an extremely large number of samples).
```
conv_days = gen_bernoulli_campaign(p1 = 0.10,
p2 = 0.10,
timesteps = 60,
scaler=100,
seed = 1412) #god-mode
conv_days.columns = [i.replace('impression','click') for i in conv_days.columns] #function uses impressions but we use clicks
conv_days['cumu_z_value'] = conv_days.apply(lambda row: proportion_test(row['cumu_conv_a'],
row['cumu_conv_b'],row['cumu_click_a'],
row['cumu_click_b'], mode='two_sided')[0],1)
conv_days['cumu_p_value'] = conv_days.apply(lambda row: proportion_test(row['cumu_conv_a'],
row['cumu_conv_b'],row['cumu_click_a'],
row['cumu_click_b'], mode='two_sided')[1],1)
conv_days['z_value'] = conv_days.apply(lambda row: proportion_test(row['conv_a'],
row['conv_b'],row['click_a'],
row['click_b'], mode='two_sided')[0],1)
conv_days['p_value'] = conv_days.apply(lambda row: proportion_test(row['conv_a'],
row['conv_b'],row['click_a'],
row['click_b'], mode='two_sided')[1],1)
g = (ggplot(conv_days, aes(x='timesteps',y='cumu_z_value',color='cumu_p_value')) + geom_line() + theme_minimal() +
xlab('Days in Campaign') + ylab('Z-value Calculated By Cumulative Data') +
geom_hline(yintercept=[sp.stats.norm.ppf(0.975),sp.stats.norm.ppf(0.025)], color=['red','red']))
g
```
Another approach is instead of doing the test only once, we **do it every day using clicks and conversions of that day alone**. We will have 60 tests where 3 of them give statistically significant results that `A` and `B` have different conversion rates in the long run. The fact that we have exactly 5% of the tests turning positive despite knowing that none of them should is not a coincidence. The Z-value is calculated based on alpha of 5%, which means even if there is no difference at 5% of the time we perform this test with this specific set of assumptions we will still have a positive result ([Obligatory relevant xkcd strip](https://xkcd.com/882/); Munroe, n.d.).
```
g = (ggplot(conv_days, aes(x='timesteps',y='z_value',color='p_value')) + geom_line() + theme_minimal() +
xlab('Each Day in Campaign') + ylab('Z-value Calculated By Daily Data') +
geom_hline(yintercept=[sp.stats.norm.ppf(0.975),sp.stats.norm.ppf(0.025)], color=['red','red']) +
ggtitle(f'We Have {(conv_days.p_value<0.05).sum()} False Positives Out of {conv_days.shape[0]} Days ({100*(conv_days.p_value<0.05).sum()/conv_days.shape[0]}%)'))
g
```
Not many people will test online ads campaigns based on daily data, but many researchers perform repeated experiments and by necessity repeated A/B tests as shown above. If you have a reason to believe that sample groups from different experiments have the same distribution, you might consider grouping them together and perform one large test as usual. Otherwise, you can tinker the assumption of how much false positive you can tolerate. One such approach, among [others](https://en.wikipedia.org/wiki/Multiple_comparisons_problem), is the [Bonferroni correction](http://mathworld.wolfram.com/BonferroniCorrection.html). It scales your alpha down by the number of tests you perform to make sure that your false positive rate stays at most your original alpha. In our case, if we cale our alpha as$\alpha_{\text{new}}=\frac{0.05}{60} \approx 0.0008$, we will have the following statistically non-significant results.
```
g = (ggplot(conv_days, aes(x='timesteps',y='z_value',color='p_value')) + geom_line() + theme_minimal() +
xlab('Each Day in Campaign') + ylab('Z-value Calculated By Daily Data') +
geom_hline(yintercept=[sp.stats.norm.ppf(1-0.0008/2),sp.stats.norm.ppf(0.0008/2)], color=['red','red']) +
ggtitle(f'We Have {(conv_days.p_value<0.05).sum()} False Positives Out of {conv_days.shape[0]} Days ({100*(conv_days.p_value<0.05).sum()/conv_days.shape[0]}%)'))
g
```
## Best Practices
To the best of our knowledge, the most reasonable and practical way to perform a frequentist A/B test is to know your assumptions, including but not limited to:
* What distribution should your data be assumed to be drawn from? In many cases, we use Bernoulli distribution for proportions, Poisson distribution for counts and normal distribution for real numbers.
* Are you comparing your sample group to a fixed value or another sample group?
* Do you want to know if the expectation of the sample group is equal to, more than or less than its counterpart?
* What is the minimum detectable effect and how many samples should you collect? What is a reasonable variance to assume in order to calculated required sample size?
* What is the highest false positive rate $\alpha$ that you can accept?
With these assumptions cleared, you can most likely create a test statistics, then with frequentist reasoning, you can determine if the sample group you collected are unlikely enough that you would reject your null hypothesis because of it.
## References
* Lemons, D. S. (2002). An introduction to stochastic processes in physics. Baltimore: Johns Hopkins University Press.
Normal Sum Theorem; p34
* Munroe, Randall (n.d.). HOW TO Absurd Scientific Answers toCommon Real-world Problems. Retrieved from https://xkcd.com/882/
* Reinhart, A. (2015, March 1). The p value and the base rate fallacy. Retrieved from https://www.statisticsdonewrong.com/p-value.html
* [whuber](https://stats.stackexchange.com/users/919/whuber) (2017). Can a probability distribution value exceeding 1 be OK?. Retrieved from https://stats.stackexchange.com/q/4223
## Appendix
### Bessel's Correction for Sample Variance
Random variables can be thought of as estimation of the real values such as sample variance is an estimation of variance from the "true" distribution. An estimator is said to be **biased** when its expectation is not equal to the true value (not to be confused with LLN where the estimator itself approaches the true value as number of samples grows).
We can repeat the experiment we did for LLN with sample mean and true mean, but this time we compare how biased version ($\frac{1}{n} \sum_{i=1}^{n} (X_i - \bar{X})^2$) and unbiased version ($\frac{1}{n-1} \sum_{i=1}^{n} (X_i - \bar{X})^2$) of sample variance approach true variance as number of sample groups grow. Clearly, we can see that biased sample variance normally underestimates the true variance.
```
def var(x, dof=0):
n = x.shape[0]
mu = np.sum(x)/n
return np.sum((x - mu)**2) / (n-dof)
n_total = 10000 #total number of stuff
n_sample = 100 #number of samples per sample group
sg_range = range(1,100) #number of sample groups to take average of sample variances from
r = np.random.normal(loc=0,scale=1,size=n_total) #generate random variables based on Z distribution
pop_var = var(r) #true variance of the population
mean_s_bs = []
mean_s_us = []
for n_sg in sg_range:
s_bs = []
s_us =[]
for i in range(n_sg):
sg = np.random.choice(r,size=n_sample,replace=False)
s_bs.append(var(sg)) #biased sample variance
s_us.append(var(sg,1)) #unbiased sample variance
mean_s_bs.append(np.mean(s_bs))
mean_s_us.append(np.mean(s_us))
s_df = pd.DataFrame({'nb_var':sg_range,'biased_var':mean_s_bs,
'unbiased_var':mean_s_us}).melt(id_vars='nb_var')
g = (ggplot(s_df,aes(x='nb_var',y='value',color='variable',group='variable')) + geom_line() +
geom_hline(yintercept=pop_var) + theme_minimal() +
xlab('Number of Sample Groups') + ylab('Sample Mean of Sample Variance in Each Group'))
g
```
We derive exactly how much the bias is as follows:
$$B[s_{biased}^2] = E[s_{biased}^2] - \sigma^2 = E[s_{biased}^2 - \sigma^2]$$
where $B[s^2]$ is the bias of estimator (biased sample variance) $s_{biased}^2$ of variance $\sigma^2$. Then we can calculate the bias as:
\begin{align}
E[s_{biased}^2 - \sigma^2] &= E[\frac{1}{n} \sum_{i=1}^n(X_i - \bar{X})^2 - \frac{1}{n} \sum_{i=1}^n(X_i - \mu)^2] \\
&= \frac{1}{n}E[(\sum_{i=1}^n X_i^2 -2\bar{X}\sum_{i=1}^n X_i + n\bar{X^2}) - (\sum_{i=1}^n X_i^2 -2\mu\sum_{i=1}^n X_i + n\mu^2)] \\
&= E[\bar{X^2} - \mu^2 - 2\bar{X^2} + 2\mu\bar{X}] \\
&= -E[\bar{X^2} -2\mu\bar{X} +\mu^2] \\
&= -E[(\bar{X} - \mu)^2] \\
&= -\frac{\sigma^2}{n} \text{; variance of sample mean}\\
E[s_{biased}^2] &= \sigma^2 - \frac{\sigma^2}{n} \\
&= (1-\frac{1}{n})\sigma^2
\end{align}
Therefore if we divide biased estimator $s_{biased}^2$ by $1-\frac{1}{n}$, we will get an unbiased estimator of variance $s_{unbiased}^2$,
\begin{align}
s_{unbiased}^2 &= \frac{s_{biased}^2}{1-\frac{1}{n}} \\
&= \frac{\frac{1}{n} \sum_{i=1}^n(X_i - \bar{X})^2}{1-\frac{1}{n}}\\
&= \frac{1}{n-1} \sum_{i=1}^n(X_i - \bar{X})^2
\end{align}
This is why the sample variance we usually use $s^2$ has $n-1$ instead of $n$. Also, this is not to be confused with the variance of sample means which is $\frac{\sigma^2}{n}$ when variance of the base distribution is known or assumed and $\frac{s^2}{n}$ when it is not.
### Mass vs Density
You might wonder why the sample mean distribution has Y-axis that exceeds 1 even though it seemingly should represents probability of each value of sample mean. The short answer is that it does not represents probability but rather **probability density function**. The long answer is that there are two ways of representing probability distributions depending on whether they describe **discrete** or **continuous** data. See also this excellent [answer on Stack Exchange](https://stats.stackexchange.com/questions/4220/can-a-probability-distribution-value-exceeding-1-be-ok) (whuber, 2017).
**Discrete probability distributions** contain values that are finite (for instance, $1, 2, 3, ...$) or countably infinite (for instance, $\frac{1}{2^i}$ where $i=1, 2, 3, ...$). They include but not limited to distributions we have used to demonstrate CLT namely uniform, Bernoulli and Poisson distribution. In all these distributions, the Y-axis, now called **probability mass function**, represents the exact probability each value in the X-axis will take, such as the Bernouilli distribution we have shown before:
```
flips = np.random.choice([0,1], size=n, p=[1-p,p])
flips_df = pd.DataFrame(flips)
flips_df.columns = ['conv_flag']
g = (ggplot(flips_df,aes(x='factor(conv_flag)')) + geom_bar(aes(y = '(..count..)/sum(..count..)')) +
theme_minimal() + xlab('Value') + ylab('Probability Mass Function') +
ggtitle(f'Bernoulli Distribution'))
g
```
**Continuous probability distribution** contains values that can take infinitely many, uncountable values (for instance, all real numbers between 0 and 1). Since there are infinitely many values, the probability of each individual value is essentially zero (what are the chance of winning the lottery that has infinite number of digits). Therefore, instead of the exact probability of each value (probability mass function), the Y-axis only represents the **probability density function**. This can be thought of as the total probability within an immeasurably small interval around the value. Take an example of a normal distribution with expectation $\mu=0$ and variance $\sigma^2=0.01$. The probability density function of the value 0 is described as:
\begin{align}
f(x) &= \frac{1}{\sqrt{2\pi\sigma^2}} e^{\frac{-(x-\mu)^2}{2\sigma^2}}\\
&= \frac{1}{\sqrt{2\pi(0.01)}} e^{\frac{-(x-0)^2}{2(0.01)}} \text{; }\mu=0;\sigma^2=0.01 \\
&\approx 3.989 \text{; when } x=0
\end{align}
This of course does not mean that there is 398.9% chance that we will draw the value 0 but the density of the probability around the value. The actual probability of that interval around 0 is 3.989 times an immeasurably small number which will be between 0 and 1.
Intuitively, we can think of these intervals as start from relatively large numbers such as 0.1 and gradually decreases to smaller numbers such as 0.005. As you can see from the plot below, the plot becomes more fine-grained and looks more "normal" as the intervals get smaller.
```
def prob_density(step,mu=0,sigma=0.1):
x = np.arange(-0.5, 0.5, step)
y = np.array([sp.stats.norm.pdf(i, loc=mu, scale=sigma) for i in x])
sm_df = pd.DataFrame({'x': x, 'y': y})
g = (ggplot(sm_df, aes(x='x', y='y')) + geom_bar(stat='identity') +
theme_minimal() + xlab('Value') + ylab('Probability Density Function') +
ggtitle(f'Normal Distribution with Expectation={mu} and Variance={sigma**2:2f}'))
g.draw()
# interact(prob_density, step=widgets.FloatSlider(min=5e-3,max=1e-1,value=1e-1,step=1e-3,readout_format='.3f'))
#@title {run: "auto"}
step = 0.1 #@param {type:"slider", min:5e-3, max:0.1, step:1e-3}
prob_density(step)
```
|
github_jupyter
|
## CS536: Perceptrons
#### Done by - Vedant Choudhary, vc389
In the usual way, we need data that we can fit and analyze using perceptrons. Consider generating data points (X, Y) in the following way:
- For $i = 1,....,k-1$, let $X_i ~ N(0, 1)$ (i.e. each $X_i$ is an i.i.d. standard normal)
- For $i = k$, generate $X_k$ in the following way: let $D ~ Exp(1)$, and for a parameter $\epsilon > 0$ take
$X_k = (\epsilon + D)$ with probability 1/2
$X_k = -(\epsilon + D)$ with probability 1/2
The effect of this is that while $X_1,...X_{k-1}$ are i.i.d. standard normals, $X_k$ is distributed randomly with some gap (of size $2\epsilon$ around $X_k = 0$. We can then classify each point according to the following:
$Y = 1$ if $X_k$ > 0
$Y = -1$ if $X_k$ < 0
We see that the class of each data point is determined entirely by the value of the $X_k$ feature
#### 1. Show that there is a perceptron that correctly classifies this data. Is this perceptron unique? What is the ‘best’ perceptron for this data set, theoretically?
**Solution:** The perceptron generated when the data is linearly separable is unique. Best perceptron for a data would be the perceptron that relies heaviliy on the last feature of the dataset, as target value is governed by that.
```
# Importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pprint
from tqdm import tqdm
%matplotlib inline
# Creating X (feature) vectors for the data
def create_data(k, m, D, epsilon):
X_k_minus_1 = np.random.normal(0, 1, (m,k-1))
X_k = []
for i in range(m):
temp = np.random.choice(2, 1, p=[0.5,0.5])
# print(temp)
if temp == 1:
X_k.append(epsilon + D)
else:
X_k.append(-(epsilon + D))
X_k = np.asarray(X_k).reshape((1,m))
# print(X_k_minus_1)
# print(X_k)
return np.concatenate((X_k_minus_1, X_k.T), axis=1)
# Creating target column for the data
def create_y(X, m):
y = []
for i in range(m):
if X[i][-1] > 0:
y.append(1)
else:
y.append(-1)
return y
# Combining all the sub data points into a dataframe
def create_dataset(k, m, epsilon, D):
X = np.asarray(create_data(k, m, epsilon, D))
y = np.asarray(create_y(X, m)).reshape((m,1))
# print(X.shape,y.shape)
# Training data is an appended version of X and y arrays
data = pd.DataFrame(np.append(X, y, axis=1), columns=["X" + str(i) for i in range(1,k+1)]+['Y'])
return data
# Global Variables - k = 20, m = 100, epsilon = 1
k, m, epsilon = 20, 100, 1
D = float(np.random.exponential(1, 1))
train_data = create_dataset(k, m, epsilon, D)
train_data.head()
```
#### 2. We want to consider the problem of learning perceptrons from data sets. Generate a set of data of size m = 100 with k = 20, $\epsilon$ = 1
##### - Implement the perceptron learning algorithm. This data is separable, so the algorithm will terminate. How does the output perceptron compare to your theoretical answer in the previous problem?
```
# Class for Perceptron
class Perceptron():
def __init__(self):
pass
'''
Calculates the sign of the predicted value
Input: dot product (X.w + b)
Return: Predicted sign of f_x
'''
def sign_function(self, data_vec):
return np.array([1 if val >= 1 else -1 for val in data_vec])[:, np.newaxis]
'''
Perceptron learning algorithm according to the notes posted
Input: dataset
Return: final weights and biases, along with number of steps for convergence
and upper bound of theoretical convergence
'''
def pla(self, data):
X = np.asarray(data.iloc[:,:-1])
y = np.asarray(data.iloc[:,-1:])
num_samples, num_features = X.shape
# Initialize weight and bias parameters
self.w = np.zeros(shape=(num_features, 1))
self.bias = 0
count_till_solution = 0
f_x = [0]*num_samples
i = 0
theoretical_termination = []
while True:
mismatch = 0
for i in range(num_samples):
# Calculate the mapping function f(x)
f_x[i] = float(self.sign_function(np.dot(X[i].reshape((num_features, 1)).T, self.w) + self.bias))
# Compute weights if f_x != y
if float(f_x[i]) != float(y[i]):
mismatch += 1
self.w += np.dot(X[i].reshape((num_features, 1)), y[i].reshape((1,1)))
self.bias += y[i]
count_till_solution += 1
min_margin = 99999
for i in range(num_samples):
margin = abs(np.dot(self.w.T, X[i].reshape(-1,1))/(np.linalg.norm(self.w)))
if margin < min_margin:
min_margin = margin
theoretical_termination.append(int(1/(min_margin**2)))
f_x = np.asarray(f_x).reshape((num_samples, 1))
i += 1
if (np.array_equal(y, f_x)) or (mismatch >= 0.3*num_samples and count_till_solution >= 5000):
break
return self.w, self.bias, count_till_solution, max(theoretical_termination)
'''
Predicts the target value based on a data vector
Input - a single row of dataset or a single X vector
Return - predicted value
'''
def predict(self, instance_data):
instance_data = np.asarray(instance_data)
prediction = self.sign_function(np.dot(self.w.T, instance_data.reshape((len(instance_data),1))) + self.bias)
return prediction
'''
Predicts the target value and then calculates error based on the predictions
Input - dataset, decision tree built
Return - error
'''
def fit(self, data):
error = 0
for i in range(len(data)):
prediction = self.predict(data.iloc[i][:-1])
if prediction != data.iloc[i][-1]:
print("Not equal")
error += 1
return error/len(data)
perceptron = Perceptron()
final_w, final_b, num_steps, theoretical_steps = perceptron.pla(train_data)
print("Final weights:\n",final_w)
print("Final bias:\n", final_b)
print("Number of steps till convergence: \n", num_steps)
print("Theoretical number of steps till convergence can be found for linear separation: ", theoretical_steps)
error = perceptron.fit(train_data)
error
plt.plot(np.linspace(0, 20, 20), list(final_w))
plt.title("Weight vector by feature")
plt.xlabel("Feature number")
plt.ylabel("Weights")
plt.show()
```
**Solution:** On implementing the perceptron learning algorithm on the dataset provided, we see that it is similar to our theoretical answer. The last feature has highest weight associated to it (as can be seen from the graph generated above). This is so because the data is created such that the target value depends solely on the last feature value.
#### 3. For any given data set, there may be multiple separators with multiple margins - but for our data set, we can effectively control the size of the margin with the parameter $\epsilon$ - the bigger this value, the bigger the margin of our separator.
#### – For m = 100, k = 20, generate a data set for a given value of $\epsilon$ and run the learning algorithm to completion. Plot, as a function of $\epsilon$ ∈ [0, 1], the average or typical number of steps the algorithm needs to terminate. Characterize the dependence.
```
def varied_margin():
k, m = 20, 100
epsilon = list(np.arange(0, 1.05, 0.02))
avg_steps = []
for i in tqdm(range(len(epsilon))):
steps = []
for j in range(100):
train_data = create_dataset(k, m, epsilon[i], D)
perceptron = Perceptron()
final_w, final_b, num_steps, theoretical_steps = perceptron.pla(train_data)
steps.append(num_steps)
avg_steps.append(sum(steps)/len(steps))
plt.plot(epsilon, avg_steps)
plt.title("Number of steps w.r.t. margin")
plt.xlabel("Margin value")
plt.ylabel("#Steps")
plt.show()
varied_margin()
```
**Solution:** On plotting average number of steps needed for termination of a linearly separable data w.r.t. $\epsilon$, we observe that bigger the margin, lesser the number of steps are needed for the perceptron to terminate. This dependence can be proved by the Perceptron Convergence Theorem - If data is linearly separable, perceptron algorithm will find a linear classifier that classifies all data correctly, whose convergence is inversely proportional to the square of margin.
This means as the margin increases, the convergence steps decrease.
#### 4. One of the nice properties of the perceptron learning algorithm (and perceptrons generally) is that learning the weight vector w and bias value b is typically independent of the ambient dimension. To see this, consider the following experiment:
#### – Fixing m = 100, $\epsilon$ = 1, consider generating a data set on k features and running the learning algorithm on it. Plot, as a function k (for k = 2, . . . , 40), the typical number of steps to learn a perceptron on a data set of this size. How does the number of steps vary with k? Repeat for m = 1000.
```
def varied_features(m):
epsilon = 1
D = float(np.random.exponential(1, 1))
k = list(np.arange(2, 40, 1))
steps = []
for i in range(len(k)):
train_data = create_dataset(k[i], m, epsilon, D)
perceptron = Perceptron()
final_w, final_b, num_steps, theoretical_steps = perceptron.pla(train_data)
steps.append(num_steps)
plt.plot(k, steps)
plt.title("Number of steps w.r.t. features")
plt.xlabel("#Features")
plt.ylabel("#Steps")
plt.show()
varied_features(100)
varied_features(1000)
```
**Solution:** The number of steps needed for convergence of a linearly separable data through perceptrons is usually independent of number of features the data has. This is shown through the above experiment too. For this case, I see no change in number of steps, but some different runs have shown very random change in number of steps, that also by just 1 step more or less. We cannot establish a trend of convergence w.r.t. the number of features.
#### 5. As shown in class, the perceptron learning algorithm always terminates in finite time - if there is a separator. Consider generating non-separable data in the following way: generate each $X_1, . . . , X_k$ as i.i.d. standard normals N(0, 1). Define Y by
$$Y = 1 if \sum_{i=1}^k{X_i^2} \ge k $$
$$Y = -1 else$$
```
def create_non_separable_data(k, m):
X = np.random.normal(0, 1, (m,k))
y = []
for i in range(m):
total = 0
for j in range(k):
total += X[i][j]**2
if total >= k:
y.append(1)
else:
y.append(-1)
return X, y
def create_non_separable_dataset(k, m):
X, y = create_non_separable_data(k, m)
X = np.asarray(X)
y = np.asarray(y).reshape((m,1))
# Training data is an appended version of X and y arrays
data = pd.DataFrame(np.append(X, y, axis=1), columns=["X" + str(i) for i in range(1,k+1)]+['Y'])
return data
k, m = 2, 100
train_ns_data = create_non_separable_dataset(k, m)
train_ns_data.head()
perceptron2 = Perceptron()
final_w2, final_b2, num_steps2, theoretical_steps = perceptron2.pla(train_ns_data)
plt.scatter(X1, X2, c=y2)
plt.title("Dataset")
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.show()
```
The data represented above is the data generated from the new rules of creating a non separable data. As can be seen, this data cannot be linearly separated through Perceptrons. A kernel method has to be applied to this data to find a separable hyper-plane.
```
def plot_hyperplane(x1, x2, y, w, b):
slope = -w[0]/w[1]
intercept = -b/w[1]
x_hyperplane = np.linspace(-3,3,20)
y_hyperplane = slope*x_hyperplane + intercept
plt.scatter(x1, x2, c=y)
plt.plot(x_hyperplane, y_hyperplane, 'b-')
plt.title("Dataset with fitted hyperplane")
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.show()
X2_1 = train_ns_data.iloc[:,:-2]
X2_2 = train_ns_data.iloc[:,1:-1]
y2 = train_ns_data.iloc[:,-1:]
plot_hyperplane(X2_1, X2_2, y2, final_w2, final_b2)
```
**Solution:** For a linearly non-separable data, perceptron is not a good algorithm to use, because it will never converge. Theoretically, it is possible to find an upper bound on number of steps required to converge (if the data is linearly separable). But, it cannot be put into practice easily, as to compute that, we first need to find the weight vector.
Another thing to note is that, even if there is a convergence, the number of steps needed might be too large, which might bring the problem of computation power.
For this assignment, I have established a heurisitc that if the mismatch % is approximately 30% of the total number of samples and the iterations have been more than 10000, then that means that possibly the data is not separable linearly. My reasoning for this is very straight forward, if 30% of data is still mismatched, it is likely that the mismatch will continue to happen for long, which is not computationally feasible.
|
github_jupyter
|
# 5章 線形回帰
```
# 必要ライブラリの導入
!pip install japanize_matplotlib | tail -n 1
!pip install torchviz | tail -n 1
!pip install torchinfo | tail -n 1
# 必要ライブラリのインポート
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
from IPython.display import display
import torch
import torch.nn as nn
import torch.optim as optim
from torchviz import make_dot
# デフォルトフォントサイズ変更
plt.rcParams['font.size'] = 14
# デフォルトグラフサイズ変更
plt.rcParams['figure.figsize'] = (6,6)
# デフォルトで方眼表示ON
plt.rcParams['axes.grid'] = True
# numpyの浮動小数点の表示精度
np.set_printoptions(suppress=True, precision=4)
```
## 5.3 線形関数(nn.Linear)
### 入力:1 出力:1 の線形関数
```
# 乱数の種固定
torch.manual_seed(123)
# 入力:1 出力:1 の線形関数の定義
l1 = nn.Linear(1, 1)
# 線形関数の表示
print(l1)
# パラメータ名、パラメータ値、shapeの表示
for param in l1.named_parameters():
print('name: ', param[0])
print('tensor: ', param[1])
print('shape: ', param[1].shape)
# 初期値設定
nn.init.constant_(l1.weight, 2.0)
nn.init.constant_(l1.bias, 1.0)
# 結果確認
print(l1.weight)
print(l1.bias)
# テスト用データ生成
# x_npをnumpy配列で定義
x_np = np.arange(-2, 2.1, 1)
# Tensor化
x = torch.tensor(x_np).float()
# サイズを(N,1)に変更
x = x.view(-1,1)
# 結果確認
print(x.shape)
print(x)
# 1次関数のテスト
y = l1(x)
print(y.shape)
print(y.data)
```
### 入力:2 出力:1 の線形関数
```
# 入力:2 出力:1 の線形関数の定義
l2 = nn.Linear(2, 1)
# 初期値設定
nn.init.constant_(l2.weight, 1.0)
nn.init.constant_(l2.bias, 2.0)
# 結果確認
print(l2.weight)
print(l2.bias)
# 2次元numpy配列
x2_np = np.array([[0, 0], [0, 1], [1, 0], [1,1]])
# Tensor化
x2 = torch.tensor(x2_np).float()
# 結果確認
print(x2.shape)
print(x2)
# 関数値計算
y2 = l2(x2)
# shape確認
print(y2.shape)
# 値確認
print(y2.data)
```
### 入力:2 出力:3 の線形関数
```
# 入力:2 出力:3 の線形関数の定義
l3 = nn.Linear(2, 3)
# 初期値設定
nn.init.constant_(l3.weight[0,:], 1.0)
nn.init.constant_(l3.weight[1,:], 2.0)
nn.init.constant_(l3.weight[2,:], 3.0)
nn.init.constant_(l3.bias, 2.0)
# 結果確認
print(l3.weight)
print(l3.bias)
# 関数値計算
y3 = l3(x2)
# shape確認
print(y3.shape)
# 値確認
print(y3.data)
```
## 5.4 カスタムクラスを利用したモデル定義
```
# モデルのクラス定義
class Net(nn.Module):
def __init__(self, n_input, n_output):
# 親クラスnn.Modulesの初期化呼び出し
super().__init__()
# 出力層の定義
self.l1 = nn.Linear(n_input, n_output)
# 予測関数の定義
def forward(self, x):
x1 = self.l1(x) # 線形回帰
return x1
# ダミー入力
inputs = torch.ones(100,1)
# インスタンスの生成 (1入力1出力の線形モデル)
n_input = 1
n_output = 1
net = Net(n_input, n_output)
# 予測
outputs = net(inputs)
```
## 5.6 データ準備
UCI公開データセットのうち、回帰でよく使われる「ボストン・データセット」を用いる。
https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html
オリジナルのデーセットは、17項目の入力値から、不動産価格を予測する目的のものだが、
一番単純な「単回帰モデル」(1入力)のモデルを作るため、このうち``RM``の1項目だけを抽出する。
```
# 学習用データ準備
# ライブラリのインポート
from sklearn.datasets import load_boston
# データ読み込み
boston = load_boston()
# 入力データと正解データ取得
x_org, yt = boston.data, boston.target
# 項目名リスト取得
feature_names = boston.feature_names
# 結果確認
print('元データ', x_org.shape, yt.shape)
print('項目名: ', feature_names)
# データ絞り込み (項目 RMのみ)
x = x_org[:,feature_names == 'RM']
print('絞り込み後', x.shape)
print(x[:5,:])
# 正解データ yの表示
print('正解データ')
print(yt[:5])
# 散布図の表示
plt.scatter(x, yt, s=10, c='b')
plt.xlabel('部屋数')
plt.ylabel('価格')
plt.title('部屋数と価格の散布図')
plt.show()
```
## 5.7 モデル定義
```
# 変数定義
# 入力次元数
n_input= x.shape[1]
# 出力次元数
n_output = 1
print(f'入力次元数: {n_input} 出力次元数: {n_output}')
# 機械学習モデル(予測モデル)クラス定義
class Net(nn.Module):
def __init__(self, n_input, n_output):
# 親クラスnn.Modulesの初期化呼び出し
super().__init__()
# 出力層の定義
self.l1 = nn.Linear(n_input, n_output)
# 初期値を全部1にする
# 「ディープラーニングの数学」と条件を合わせる目的
nn.init.constant_(self.l1.weight, 1.0)
nn.init.constant_(self.l1.bias, 1.0)
# 予測関数の定義
def forward(self, x):
x1 = self.l1(x) # 線形回帰
return x1
# インスタンスの生成
# 1入力1出力の線形モデル
net = Net(n_input, n_output)
# モデル内のパラメータの確認
# モデル内の変数取得にはnamed_parameters関数を利用する
# 結果の第1要素が名前、第2要素が値
#
# predict.weightとpredict.biasがあることがわかる
# 初期値はどちらも1.0になっている
for parameter in net.named_parameters():
print(f'変数名: {parameter[0]}')
print(f'変数値: {parameter[1].data}')
# パラメータのリスト取得にはparameters関数を利用する
for parameter in net.parameters():
print(parameter)
```
### モデル確認
```
# モデルの概要表示
print(net)
# モデルのサマリー表示
from torchinfo import summary
summary(net, (1,))
```
### 損失関数と最適化関数
```
# 損失関数: 平均2乗誤差
criterion = nn.MSELoss()
# 学習率
lr = 0.01
# 最適化関数: 勾配降下法
optimizer = optim.SGD(net.parameters(), lr=lr)
```
## 5.8 勾配降下法
```
# 入力変数x と正解値 ytのテンソル変数化
inputs = torch.tensor(x).float()
labels = torch.tensor(yt).float()
# 次元数確認
print(inputs.shape)
print(labels.shape)
# 損失値計算用にlabels変数を(N,1)次元の行列に変換する
labels1 = labels.view((-1, 1))
# 次元数確認
print(labels1.shape)
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels1)
# 損失値の取得
print(f'{loss.item():.5f}')
# 損失の計算グラフ可視化
g = make_dot(loss, params=dict(net.named_parameters()))
display(g)
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels1)
# 勾配計算
loss.backward()
# 勾配の結果が取得可能に
print(net.l1.weight.grad)
print(net.l1.bias.grad)
# パラメータ修正
optimizer.step()
# パラメータ値が変わる
print(net.l1.weight)
print(net.l1.bias)
# 勾配値の初期化
optimizer.zero_grad()
# 勾配値がすべてゼロになっている
print(net.l1.weight.grad)
print(net.l1.bias.grad)
```
### 繰り返し計算
```
# 学習率
lr = 0.01
# インスタンス生成 (パラメータ値初期化)
net = Net(n_input, n_output)
# 損失関数: 平均2乗誤差
criterion = nn.MSELoss()
# 最適化関数: 勾配降下法
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し回数
num_epochs = 50000
# 評価結果記録用 (損失関数値のみ記録)
history = np.zeros((0,2))
# 繰り返し計算メインループ
for epoch in range(num_epochs):
# 勾配値初期化
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 損失計算
# 「ディープラーニングの数学」に合わせて2で割った値を損失とした
loss = criterion(outputs, labels1) / 2.0
# 勾配計算
loss.backward()
# パラメータ修正
optimizer.step()
# 100回ごとに途中経過を記録する
if ( epoch % 100 == 0):
history = np.vstack((history, np.array([epoch, loss.item()])))
print(f'Epoch {epoch} loss: {loss.item():.5f}')
```
## 5.9 結果確認
```
# 損失初期値と最終値
print(f'損失初期値: {history[0,1]:.5f}')
print(f'損失最終値: {history[-1,1]:.5f}')
# 学習曲線の表示 (損失)
# 最初の1つを除く
plt.plot(history[1:,0], history[1:,1], 'b')
plt.xlabel('繰り返し回数')
plt.ylabel('損失')
plt.title('学習曲線(損失)')
plt.show()
# 回帰直線の算出
# xの最小値、最大値
xse = np.array((x.min(), x.max())).reshape(-1,1)
Xse = torch.tensor(xse).float()
with torch.no_grad():
Yse = net(Xse)
print(Yse.numpy())
# 散布図と回帰直線の描画
plt.scatter(x, yt, s=10, c='b')
plt.xlabel('部屋数')
plt.ylabel('価格')
plt.plot(Xse.data, Yse.data, c='k')
plt.title('散布図と回帰直線')
plt.show()
```
## 5.10 重回帰モデルへの拡張
```
# 列(LSTAT: 低所得者率)の追加
x_add = x_org[:,feature_names == 'LSTAT']
x2 = np.hstack((x, x_add))
# shapeの表示
print(x2.shape)
# 入力データxの表示
print(x2[:5,:])
# 今度は入力次元数=2
n_input = x2.shape[1]
print(n_input)
# モデルインスタンスの生成
net = Net(n_input, n_output)
# モデル内のパラメータの確認
# predict.weight が2次元に変わった
for parameter in net.named_parameters():
print(f'変数名: {parameter[0]}')
print(f'変数値: {parameter[1].data}')
# モデルの概要表示
print(net)
# モデルのサマリー表示
from torchinfo import summary
summary(net, (2,))
# 入力変数x2 のテンソル変数化
# labels, labels1は前のものをそのまま利用
inputs = torch.tensor(x2).float()
```
### くり返し計算
```
# 初期化処理
# 学習率
lr = 0.01
# インスタンス生成 (パラメータ値初期化)
net = Net(n_input, n_output)
# 損失関数: 平均2乗誤差
criterion = nn.MSELoss()
# 最適化関数: 勾配降下法
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し回数
num_epochs = 50000
# 評価結果記録用 (損失関数値のみ記録)
history = np.zeros((0,2))
# 繰り返し計算メインループ
for epoch in range(num_epochs):
# 勾配値初期化
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 誤差計算
# 「ディープラーニングの数学」に合わせて2で割った値を損失とした
loss = criterion(outputs, labels1) / 2.0
# 勾配計算
loss.backward()
# パラメータ修正
optimizer.step()
# 100回ごとに途中経過を記録する
if ( epoch % 100 == 0):
history = np.vstack((history, np.array([epoch, loss.item()])))
print(f'Epoch {epoch} loss: {loss.item():.5f}')
```
## 5.11 学習率の変更
```
# 繰り返し回数
#num_epochs = 50000
num_epochs = 2000
# 学習率
#l r = 0.01
lr = 0.001
# モデルインスタンスの生成
net = Net(n_input, n_output)
# 損失関数: 平均2乗誤差
criterion = nn.MSELoss()
# 最適化関数: 勾配降下法
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し計算メインループ
# 評価結果記録用 (損失関数値のみ記録)
history = np.zeros((0,2))
for epoch in range(num_epochs):
# 勾配値初期化
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 誤差計算
loss = criterion(outputs, labels1) / 2.0
#勾配計算
loss.backward()
# パラメータ修正
optimizer.step()
# 100回ごとに途中経過を記録する
if ( epoch % 100 == 0):
history = np.vstack((history, np.array([epoch, loss.item()])))
print(f'Epoch {epoch} loss: {loss.item():.5f}')
# 損失初期値、最終値
print(f'損失初期値: {history[0,1]:.5f}')
print(f'損失最終値: {history[-1,1]:.5f}')
# 学習曲線の表示 (損失)
plt.plot(history[:,0], history[:,1], 'b')
plt.xlabel('繰り返し回数')
plt.ylabel('損失')
plt.title('学習曲線(損失)')
plt.show()
```
|
github_jupyter
|
# XGBoost vs LightGBM
In this notebook we collect the results from all the experiments and reports the comparative difference between XGBoost and LightGBM
```
import matplotlib.pyplot as plt
import nbformat
import json
from toolz import pipe, juxt
import pandas as pd
import seaborn
from toolz import curry
from bokeh.io import show, output_notebook
from bokeh.charts import Bar
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.glyphs import Rect
from bokeh.models import Range1d
from toolz import curry
from bokeh.io import export_svgs
from IPython.display import SVG, display
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
output_notebook()
```
We are going to read the results from the following notebooks
```
notebooks = {
'Airline':'01_airline.ipynb',
'Airline_GPU': '01_airline_GPU.ipynb',
'BCI': '02_BCI.ipynb',
'BCI_GPU': '02_BCI_GPU.ipynb',
'Football': '03_football.ipynb',
'Football_GPU': '03_football_GPU.ipynb',
'Planet': '04_PlanetKaggle.ipynb',
'Plannet_GPU': '04_PlanetKaggle_GPU.ipynb',
'Fraud': '05_FraudDetection.ipynb',
'Fraud_GPU': '05_FraudDetection_GPU.ipynb',
'HIGGS': '06_HIGGS.ipynb',
'HIGGS_GPU': '06_HIGGS_GPU.ipynb'
}
def read_notebook(notebook_name):
with open(notebook_name) as f:
return nbformat.read(f, as_version=4)
def results_cell_from(nb):
for cell in nb.cells:
if cell['cell_type']=='code' and cell['source'].startswith('# Results'):
return cell
def extract_text(cell):
return cell['outputs'][0]['text']
@curry
def remove_line_with(match_str, json_string):
return '\n'.join(filter(lambda x: match_str not in x, json_string.split('\n')))
def process_nb(notebook_name):
return pipe(notebook_name,
read_notebook,
results_cell_from,
extract_text,
remove_line_with('total RAM usage'),
json.loads)
```
Here we collect the results from all the exeperiment notebooks. The method simply searches the notebooks for a cell that starts with # Results. It then reads that cells output in as JSON.
```
results = {nb_key:process_nb(nb_name) for nb_key, nb_name in notebooks.items()}
results
datasets = [k for k in results.keys()]
print(datasets)
algos = [a for a in results[datasets[0]].keys()]
print(algos)
```
We wish to compare LightGBM and XGBoost both in terms of performance as well as how long they took to train.
```
def average_performance_diff(dataset):
lgbm_series = pd.Series(dataset['lgbm']['performance'])
try:
perf = 100*((lgbm_series-pd.Series(dataset['xgb']['performance']))/lgbm_series).mean()
except KeyError:
perf = None
return perf
def train_time_ratio(dataset):
try:
val = dataset['xgb']['train_time']/dataset['lgbm']['train_time']
except KeyError:
val = None
return val
def train_time_ratio_hist(dataset):
try:
val = dataset['xgb_hist']['train_time']/dataset['lgbm']['train_time']
except KeyError:
val = None
return val
def test_time_ratio(dataset):
try:
val = dataset['xgb']['test_time']/dataset['lgbm']['test_time']
except KeyError:
val = None
return val
metrics = juxt(average_performance_diff, train_time_ratio, train_time_ratio_hist, test_time_ratio)
res_per_dataset = {dataset_key:metrics(dataset) for dataset_key, dataset in results.items()}
results_df = pd.DataFrame(res_per_dataset, index=['Perf. Difference(%)',
'Train Time Ratio',
'Train Time Ratio Hist',
'Test Time Ratio']).T
results_df
results_gpu = results_df.ix[[idx for idx in results_df.index if idx.endswith('GPU')]]
results_cpu = results_df.ix[~results_df.index.isin(results_gpu.index)]
```
Plot of train time ratio for CPU experiments.
```
data = {
'Ratio': results_cpu['Train Time Ratio'].values.tolist() + results_cpu['Train Time Ratio Hist'].values.tolist(),
'label': results_cpu.index.values.tolist()*2,
'group': ['xgb/lgb']*len(results_cpu.index.values) + ['xgb_hist/lgb']*len(results_cpu.index.values)
}
bar = Bar(data, values='Ratio', agg='mean', label='label', group='group',
plot_width=600, plot_height=400, bar_width=0.7, color=['#5975a4','#99ccff'], legend='top_right')
bar.axis[0].axis_label=''
bar.axis[1].axis_label='Train Time Ratio (XGBoost/LightGBM)'
bar.axis[1].axis_label_text_font_size='12pt'
bar.y_range = Range1d(0, 30)
bar.toolbar_location='above'
bar.legend[0].visible=True
show(bar)
bar.output_backend = "svg"
export_svgs(bar, filename="xgb_vs_lgbm_train_time.svg")
display(SVG('xgb_vs_lgbm_train_time.svg'))
```
Plot of train time ratio for GPU experiments.
```
data = {
'Ratio': results_gpu['Train Time Ratio'].values.tolist() + results_gpu['Train Time Ratio Hist'].values.tolist(),
'label': results_gpu.index.values.tolist()*2,
'group': ['xgb/lgb']*len(results_gpu.index.values) + ['xgb_hist/lgb']*len(results_gpu.index.values)
}
bar = Bar(data, values='Ratio', agg='mean', label='label', group='group',
plot_width=600, plot_height=400, bar_width=0.5, color=['#ff8533','#ffd1b3'], legend='top_right')
bar.axis[0].axis_label=''
bar.y_range = Range1d(0, 30)
bar.axis[1].axis_label='Train Time Ratio (XGBoost/LightGBM)'
bar.axis[1].axis_label_text_font_size='12pt'
bar.toolbar_location='above'
bar.legend[0].visible=True
show(bar)
bar.output_backend = "svg"
export_svgs(bar, filename="xgb_vs_lgbm_train_time_gpu.svg")
display(SVG('xgb_vs_lgbm_train_time_gpu.svg'))
data = {
'Perf. Difference(%)': results_df['Perf. Difference(%)'].values,
'label': results_df.index.values
}
bar = Bar(data, values='Perf. Difference(%)', agg='mean', label=['label'],
plot_width=600, plot_height=400, bar_width=0.7, color='#5975a4')
bar.axis[0].axis_label=''
bar.axis[1].axis_label='Perf. Difference(%)'
bar.toolbar_location='above'
bar.legend[0].visible=False
show(bar)
bar.output_backend = "svg"
export_svgs(bar, filename="xgb_vs_lgbm_performance.svg")
display(SVG('xgb_vs_lgbm_performance.svg'))
```
For the speed results we can see that LightGBM is on average 5 times faster than the CPU and GPU versions of XGBoost and XGBoost histogram. In regards to the performance, we can see that LightGBM is sometimes better and sometimes worse.
Analyzing the results of XGBoost in CPU we can see that XGBoost histogram is faster than XGBoost in the Airline, Fraud and HIGGS datasets, but much slower in Planet and BCI dataset. In these two cases there is a memory overhead due to the high number of features. In the case of football dataset, the histogram implementation is slightly slower, we believe that there could be a slight principle of memory overhead.
Finally, if we look at the results of XGBoost in GPU we see that there are several values missing. This is due to an out of memory of the standard version. In our experiments we observed that XGBoost's memory consumption is around 10 times higher than LightGBM and 5 times higher than XGBoost histogram. We see that the histogram version is faster except in the BCI dataset, where there could be a memory overhead like in the CPU version.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D3_NetworkCausality/W3D3_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy 2020 -- Week 3 Day 3 Tutorial 3
# Causality Day - Simultaneous fitting/regression
**Content creators**: Ari Benjamin, Tony Liu, Konrad Kording
**Content reviewers**: Mike X Cohen, Madineh Sarvestani, Ella Batty, Michael Waskom
---
# Tutorial objectives
This is tutorial 3 on our day of examining causality. Below is the high level outline of what we'll cover today, with the sections we will focus on in this notebook in bold:
1. Master definitions of causality
2. Understand that estimating causality is possible
3. Learn 4 different methods and understand when they fail
1. perturbations
2. correlations
3. **simultaneous fitting/regression**
4. instrumental variables
### Notebook 3 objectives
In tutorial 2 we explored correlation as an approximation for causation and learned that correlation $\neq$ causation for larger networks. However, computing correlations is a rather simple approach, and you may be wondering: will more sophisticated techniques allow us to better estimate causality? Can't we control for things?
Here we'll use some common advanced (but controversial) methods that estimate causality from observational data. These methods rely on fitting a function to our data directly, instead of trying to use perturbations or correlations. Since we have the full closed-form equation of our system, we can try these methods and see how well they work in estimating causal connectivity when there are no perturbations. Specifically, we will:
- Learn about more advanced (but also controversial) techniques for estimating causality
- conditional probabilities (**regression**)
- Explore limitations and failure modes
- understand the problem of **omitted variable bias**
---
# Setup
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import Lasso
#@title Figure settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper functions
def sigmoid(x):
"""
Compute sigmoid nonlinearity element-wise on x.
Args:
x (np.ndarray): the numpy data array we want to transform
Returns
(np.ndarray): x with sigmoid nonlinearity applied
"""
return 1 / (1 + np.exp(-x))
def logit(x):
"""
Applies the logit (inverse sigmoid) transformation
Args:
x (np.ndarray): the numpy data array we want to transform
Returns
(np.ndarray): x with logit nonlinearity applied
"""
return np.log(x/(1-x))
def create_connectivity(n_neurons, random_state=42, p=0.9):
"""
Generate our nxn causal connectivity matrix.
Args:
n_neurons (int): the number of neurons in our system.
random_state (int): random seed for reproducibility
Returns:
A (np.ndarray): our 0.1 sparse connectivity matrix
"""
np.random.seed(random_state)
A_0 = np.random.choice([0, 1], size=(n_neurons, n_neurons), p=[p, 1 - p])
# set the timescale of the dynamical system to about 100 steps
_, s_vals, _ = np.linalg.svd(A_0)
A = A_0 / (1.01 * s_vals[0])
# _, s_val_test, _ = np.linalg.svd(A)
# assert s_val_test[0] < 1, "largest singular value >= 1"
return A
def get_regression_estimate_full_connectivity(X):
"""
Estimates the connectivity matrix using lasso regression.
Args:
X (np.ndarray): our simulated system of shape (n_neurons, timesteps)
neuron_idx (int): optionally provide a neuron idx to compute connectivity for
Returns:
V (np.ndarray): estimated connectivity matrix of shape (n_neurons, n_neurons).
if neuron_idx is specified, V is of shape (n_neurons,).
"""
n_neurons = X.shape[0]
# Extract Y and W as defined above
W = X[:, :-1].transpose()
Y = X[:, 1:].transpose()
# apply inverse sigmoid transformation
Y = logit(Y)
# fit multioutput regression
reg = MultiOutputRegressor(Lasso(fit_intercept=False,
alpha=0.01, max_iter=250 ), n_jobs=-1)
reg.fit(W, Y)
V = np.zeros((n_neurons, n_neurons))
for i, estimator in enumerate(reg.estimators_):
V[i, :] = estimator.coef_
return V
def get_regression_corr_full_connectivity(n_neurons, A, X, observed_ratio, regression_args):
"""
A wrapper function for our correlation calculations between A and the V estimated
from regression.
Args:
n_neurons (int): number of neurons
A (np.ndarray): connectivity matrix
X (np.ndarray): dynamical system
observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.
regression_args (dict): dictionary of lasso regression arguments and hyperparameters
Returns:
A single float correlation value representing the similarity between A and R
"""
assert (observed_ratio > 0) and (observed_ratio <= 1)
sel_idx = np.clip(int(n_neurons*observed_ratio), 1, n_neurons)
sel_X = X[:sel_idx, :]
sel_A = A[:sel_idx, :sel_idx]
sel_V = get_regression_estimate_full_connectivity(sel_X)
return np.corrcoef(sel_A.flatten(), sel_V.flatten())[1,0], sel_V
def see_neurons(A, ax, ratio_observed=1, arrows=True):
"""
Visualizes the connectivity matrix.
Args:
A (np.ndarray): the connectivity matrix of shape (n_neurons, n_neurons)
ax (plt.axis): the matplotlib axis to display on
Returns:
Nothing, but visualizes A.
"""
n = len(A)
ax.set_aspect('equal')
thetas = np.linspace(0, np.pi * 2, n, endpoint=False)
x, y = np.cos(thetas), np.sin(thetas),
if arrows:
for i in range(n):
for j in range(n):
if A[i, j] > 0:
ax.arrow(x[i], y[i], x[j] - x[i], y[j] - y[i], color='k', head_width=.05,
width = A[i, j] / 25,shape='right', length_includes_head=True,
alpha = .2)
if ratio_observed < 1:
nn = int(n * ratio_observed)
ax.scatter(x[:nn], y[:nn], c='r', s=150, label='Observed')
ax.scatter(x[nn:], y[nn:], c='b', s=150, label='Unobserved')
ax.legend(fontsize=15)
else:
ax.scatter(x, y, c='k', s=150)
ax.axis('off')
def simulate_neurons(A, timesteps, random_state=42):
"""
Simulates a dynamical system for the specified number of neurons and timesteps.
Args:
A (np.array): the connectivity matrix
timesteps (int): the number of timesteps to simulate our system.
random_state (int): random seed for reproducibility
Returns:
- X has shape (n_neurons, timeteps).
"""
np.random.seed(random_state)
n_neurons = len(A)
X = np.zeros((n_neurons, timesteps))
for t in range(timesteps - 1):
# solution
epsilon = np.random.multivariate_normal(np.zeros(n_neurons), np.eye(n_neurons))
X[:, t + 1] = sigmoid(A.dot(X[:, t]) + epsilon)
assert epsilon.shape == (n_neurons,)
return X
def correlation_for_all_neurons(X):
"""Computes the connectivity matrix for the all neurons using correlations
Args:
X: the matrix of activities
Returns:
estimated_connectivity (np.ndarray): estimated connectivity for the selected neuron, of shape (n_neurons,)
"""
n_neurons = len(X)
S = np.concatenate([X[:, 1:], X[:, :-1]], axis=0)
R = np.corrcoef(S)[:n_neurons, n_neurons:]
return R
def get_sys_corr(n_neurons, timesteps, random_state=42, neuron_idx=None):
"""
A wrapper function for our correlation calculations between A and R.
Args:
n_neurons (int): the number of neurons in our system.
timesteps (int): the number of timesteps to simulate our system.
random_state (int): seed for reproducibility
neuron_idx (int): optionally provide a neuron idx to slice out
Returns:
A single float correlation value representing the similarity between A and R
"""
A = create_connectivity(n_neurons, random_state)
X = simulate_neurons(A, timesteps)
R = correlation_for_all_neurons(X)
return np.corrcoef(A.flatten(), R.flatten())[0, 1]
def get_regression_corr(n_neurons, A, X, observed_ratio, regression_args, neuron_idx=None):
"""
A wrapper function for our correlation calculations between A and the V estimated
from regression.
Args:
n_neurons (int): the number of neurons in our system.
A (np.array): the true connectivity
X (np.array): the simulated system
observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.
regression_args (dict): dictionary of lasso regression arguments and hyperparameters
neuron_idx (int): optionally provide a neuron idx to compute connectivity for
Returns:
A single float correlation value representing the similarity between A and R
"""
assert (observed_ratio > 0) and (observed_ratio <= 1)
sel_idx = np.clip(int(n_neurons * observed_ratio), 1, n_neurons)
selected_X = X[:sel_idx, :]
selected_connectivity = A[:sel_idx, :sel_idx]
estimated_selected_connectivity = get_regression_estimate(selected_X, neuron_idx=neuron_idx)
if neuron_idx is None:
return np.corrcoef(selected_connectivity.flatten(),
estimated_selected_connectivity.flatten())[1, 0], estimated_selected_connectivity
else:
return np.corrcoef(selected_connectivity[neuron_idx, :],
estimated_selected_connectivity)[1, 0], estimated_selected_connectivity
def plot_connectivity_matrix(A, ax=None):
"""Plot the (weighted) connectivity matrix A as a heatmap
Args:
A (ndarray): connectivity matrix (n_neurons by n_neurons)
ax: axis on which to display connectivity matrix
"""
if ax is None:
ax = plt.gca()
lim = np.abs(A).max()
ax.imshow(A, vmin=-lim, vmax=lim, cmap="coolwarm")
```
---
# Section 1: Regression
```
#@title Video 1: Regression approach
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Av4LaXZdgDo", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
You may be familiar with the idea that correlation only implies causation when there no hidden *confounders*. This aligns with our intuition that correlation only implies causality when no alternative variables could explain away a correlation.
**A confounding example**:
Suppose you observe that people who sleep more do better in school. It's a nice correlation. But what else could explain it? Maybe people who sleep more are richer, don't work a second job, and have time to actually do homework. If you want to ask if sleep *causes* better grades, and want to answer that with correlations, you have to control for all possible confounds.
A confound is any variable that affects both the outcome and your original covariate. In our example, confounds are things that affect both sleep and grades.
**Controlling for a confound**:
Confonds can be controlled for by adding them as covariates in a regression. But for your coefficients to be causal effects, you need three things:
1. **All** confounds are included as covariates
2. Your regression assumes the same mathematical form of how covariates relate to outcomes (linear, GLM, etc.)
3. No covariates are caused *by* both the treatment (original variable) and the outcome. These are [colliders](https://en.wikipedia.org/wiki/Collider_(statistics)); we won't introduce it today (but Google it on your own time! Colliders are very counterintuitive.)
In the real world it is very hard to guarantee these conditions are met. In the brain it's even harder (as we can't measure all neurons). Luckily today we simulated the system ourselves.
```
#@title Video 2: Fitting a GLM
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="GvMj9hRv5Ak", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
## Section 1.1: Recovering connectivity by model fitting
Recall that in our system each neuron effects every other via:
$$
\vec{x}_{t+1} = \sigma(A\vec{x}_t + \epsilon_t),
$$
where $\sigma$ is our sigmoid nonlinearity from before: $\sigma(x) = \frac{1}{1 + e^{-x}}$
Our system is a closed system, too, so there are no omitted variables. The regression coefficients should be the causal effect. Are they?
We will use a regression approach to estimate the causal influence of all neurons to neuron #1. Specifically, we will use linear regression to determine the $A$ in:
$$
\sigma^{-1}(\vec{x}_{t+1}) = A\vec{x}_t + \epsilon_t ,
$$
where $\sigma^{-1}$ is the inverse sigmoid transformation, also sometimes referred to as the **logit** transformation: $\sigma^{-1}(x) = \log(\frac{x}{1-x})$.
Let $W$ be the $\vec{x}_t$ values, up to the second-to-last timestep $T-1$:
$$
W =
\begin{bmatrix}
\mid & \mid & ... & \mid \\
\vec{x}_0 & \vec{x}_1 & ... & \vec{x}_{T-1} \\
\mid & \mid & ... & \mid
\end{bmatrix}_{n \times (T-1)}
$$
Let $Y$ be the $\vec{x}_{t+1}$ values for a selected neuron, indexed by $i$, starting from the second timestep up to the last timestep $T$:
$$
Y =
\begin{bmatrix}
x_{i,1} & x_{i,2} & ... & x_{i, T} \\
\end{bmatrix}_{1 \times (T-1)}
$$
You will then fit the following model:
$$
\sigma^{-1}(Y^T) = W^TV
$$
where $V$ is the $n \times 1$ coefficient matrix of this regression, which will be the estimated connectivity matrix between the selected neuron and the rest of the neurons.
**Review**: As you learned Friday of Week 1, *lasso* a.k.a. **$L_1$ regularization** causes the coefficients to be sparse, containing mostly zeros. Think about why we want this here.
## Exercise 1: Use linear regression plus lasso to estimate causal connectivities
You will now create a function to fit the above regression model and V. We will then call this function to examine how close the regression vs the correlation is to true causality.
**Code**:
You'll notice that we've transposed both $Y$ and $W$ here and in the code we've already provided below. Why is that?
This is because the machine learning models provided in scikit-learn expect the *rows* of the input data to be the observations, while the *columns* are the variables. We have that inverted in our definitions of $Y$ and $W$, with the timesteps of our system (the observations) as the columns. So we transpose both matrices to make the matrix orientation correct for scikit-learn.
- Because of the abstraction provided by scikit-learn, fitting this regression will just be a call to initialize the `Lasso()` estimator and a call to the `fit()` function
- Use the following hyperparameters for the `Lasso` estimator:
- `alpha = 0.01`
- `fit_intercept = False`
- How do we obtain $V$ from the fitted model?
```
def get_regression_estimate(X, neuron_idx):
"""
Estimates the connectivity matrix using lasso regression.
Args:
X (np.ndarray): our simulated system of shape (n_neurons, timesteps)
neuron_idx (int): a neuron index to compute connectivity for
Returns:
V (np.ndarray): estimated connectivity matrix of shape (n_neurons, n_neurons).
if neuron_idx is specified, V is of shape (n_neurons,).
"""
# Extract Y and W as defined above
W = X[:, :-1].transpose()
Y = X[[neuron_idx], 1:].transpose()
# Apply inverse sigmoid transformation
Y = logit(Y)
############################################################################
## TODO: Insert your code here to fit a regressor with Lasso. Lasso captures
## our assumption that most connections are precisely 0.
## Fill in function and remove
raise NotImplementedError("Please complete the regression exercise")
############################################################################
# Initialize regression model with no intercept and alpha=0.01
regression = ...
# Fit regression to the data
regression.fit(...)
V = regression.coef_
return V
# Parameters
n_neurons = 50 # the size of our system
timesteps = 10000 # the number of timesteps to take
random_state = 42
neuron_idx = 1
A = create_connectivity(n_neurons, random_state)
X = simulate_neurons(A, timesteps)
# Uncomment below to test your function
# V = get_regression_estimate(X, neuron_idx)
#print("Regression: correlation of estimated connectivity with true connectivity: {:.3f}".format(np.corrcoef(A[neuron_idx, :], V)[1, 0]))
#print("Lagged correlation of estimated connectivity with true connectivity: {:.3f}".format(get_sys_corr(n_neurons, timesteps, random_state, neuron_idx=neuron_idx)))
# to_remove solution
def get_regression_estimate(X, neuron_idx):
"""
Estimates the connectivity matrix using lasso regression.
Args:
X (np.ndarray): our simulated system of shape (n_neurons, timesteps)
neuron_idx (int): a neuron index to compute connectivity for
Returns:
V (np.ndarray): estimated connectivity matrix of shape (n_neurons, n_neurons).
if neuron_idx is specified, V is of shape (n_neurons,).
"""
# Extract Y and W as defined above
W = X[:, :-1].transpose()
Y = X[[neuron_idx], 1:].transpose()
# Apply inverse sigmoid transformation
Y = logit(Y)
# Initialize regression model with no intercept and alpha=0.01
regression = Lasso(fit_intercept=False, alpha=0.01)
# Fit regression to the data
regression.fit(W, Y)
V = regression.coef_
return V
# Parameters
n_neurons = 50 # the size of our system
timesteps = 10000 # the number of timesteps to take
random_state = 42
neuron_idx = 1
A = create_connectivity(n_neurons, random_state)
X = simulate_neurons(A, timesteps)
# Uncomment below to test your function
V = get_regression_estimate(X, neuron_idx)
print("Regression: correlation of estimated connectivity with true connectivity: {:.3f}".format(np.corrcoef(A[neuron_idx, :], V)[1, 0]))
print("Lagged correlation of estimated connectivity with true connectivity: {:.3f}".format(get_sys_corr(n_neurons, timesteps, random_state, neuron_idx=neuron_idx)))
```
You should find that using regression, our estimated connectivity matrix has a correlation of 0.865 with the true connectivity matrix. With correlation, our estimated connectivity matrix has a correlation of 0.703 with the true connectivity matrix.
We can see from these numbers that multiple regression is better than simple correlation for estimating connectivity.
---
# Section 2: Omitted Variable Bias
If we are unable to observe the entire system, **omitted variable bias** becomes a problem. If we don't have access to all the neurons, and so therefore can't control for them, can we still estimate the causal effect accurately?
## Section 2.1: Visualizing subsets of the connectivity matrix
We first visualize different subsets of the connectivity matrix when we observe 75% of the neurons vs 25%.
Recall the meaning of entries in our connectivity matrix: $A[i,j] = 1$ means a connectivity **from** neuron $i$ **to** neuron $j$ with strength $1$.
```
#@markdown Execute this cell to visualize subsets of connectivity matrix
# Run this cell to visualize the subsets of variables we observe
n_neurons = 25
A = create_connectivity(n_neurons)
fig, axs = plt.subplots(2, 2, figsize=(10, 10))
ratio_observed = [0.75, 0.25] # the proportion of neurons observed in our system
for i, ratio in enumerate(ratio_observed):
sel_idx = int(n_neurons * ratio)
offset = np.zeros((n_neurons, n_neurons))
axs[i,1].title.set_text("{}% neurons observed".format(int(ratio * 100)))
offset[:sel_idx, :sel_idx] = 1 + A[:sel_idx, :sel_idx]
im = axs[i, 1].imshow(offset, cmap="coolwarm", vmin=0, vmax=A.max() + 1)
axs[i, 1].set_xlabel("Connectivity from")
axs[i, 1].set_ylabel("Connectivity to")
plt.colorbar(im, ax=axs[i, 1], fraction=0.046, pad=0.04)
see_neurons(A,axs[i, 0],ratio)
plt.suptitle("Visualizing subsets of the connectivity matrix", y = 1.05)
plt.show()
```
## Section 2.2: Effects of partial observability
```
#@title Video 3: Omitted variable bias
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5CCib6CTMac", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
**Video correction**: the labels "connectivity from"/"connectivity to" are swapped in the video but fixed in the figures/demos below
### Interactive Demo: Regression performance as a function of the number of observed neurons
We will first change the number of observed neurons in the network and inspect the resulting estimates of connectivity in this interactive demo. How does the estimated connectivity differ?
**Note:** the plots will take a moment or so to update after moving the slider.
```
#@markdown Execute this cell to enable demo
n_neurons = 50
A = create_connectivity(n_neurons, random_state=42)
X = simulate_neurons(A, 4000, random_state=42)
reg_args = {
"fit_intercept": False,
"alpha": 0.001
}
@widgets.interact
def plot_observed(n_observed=(5, 45, 5)):
to_neuron = 0
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
sel_idx = n_observed
ratio = (n_observed) / n_neurons
offset = np.zeros((n_neurons, n_neurons))
axs[0].title.set_text("{}% neurons observed".format(int(ratio * 100)))
offset[:sel_idx, :sel_idx] = 1 + A[:sel_idx, :sel_idx]
im = axs[1].imshow(offset, cmap="coolwarm", vmin=0, vmax=A.max() + 1)
plt.colorbar(im, ax=axs[1], fraction=0.046, pad=0.04)
see_neurons(A,axs[0], ratio, False)
corr, R = get_regression_corr_full_connectivity(n_neurons,
A,
X,
ratio,
reg_args)
#rect = patches.Rectangle((-.5,to_neuron-.5),n_observed,1,linewidth=2,edgecolor='k',facecolor='none')
#axs[1].add_patch(rect)
big_R = np.zeros(A.shape)
big_R[:sel_idx, :sel_idx] = 1 + R
#big_R[to_neuron, :sel_idx] = 1 + R
im = axs[2].imshow(big_R, cmap="coolwarm", vmin=0, vmax=A.max() + 1)
plt.colorbar(im, ax=axs[2],fraction=0.046, pad=0.04)
c = 'w' if n_observed<(n_neurons-3) else 'k'
axs[2].text(0,n_observed+3,"Correlation : {:.2f}".format(corr), color=c, size=15)
#axs[2].axis("off")
axs[1].title.set_text("True connectivity")
axs[1].set_xlabel("Connectivity from")
axs[1].set_ylabel("Connectivity to")
axs[2].title.set_text("Estimated connectivity")
axs[2].set_xlabel("Connectivity from")
#axs[2].set_ylabel("Connectivity to")
```
Next, we will inspect a plot of the correlation between true and estimated connectivity matrices vs the percent of neurons observed over multiple trials.
What is the relationship that you see between performance and the number of neurons observed?
**Note:** the cell below will take about 25-30 seconds to run.
```
#@title
#@markdown Plot correlation vs. subsampling
import warnings
warnings.filterwarnings('ignore')
# we'll simulate many systems for various ratios of observed neurons
n_neurons = 50
timesteps = 5000
ratio_observed = [1, 0.75, 0.5, .25, .12] # the proportion of neurons observed in our system
n_trials = 3 # run it this many times to get variability in our results
reg_args = {
"fit_intercept": False,
"alpha": 0.001
}
corr_data = np.zeros((n_trials, len(ratio_observed)))
for trial in range(n_trials):
A = create_connectivity(n_neurons, random_state=trial)
X = simulate_neurons(A, timesteps)
print("simulating trial {} of {}".format(trial + 1, n_trials))
for j, ratio in enumerate(ratio_observed):
result,_ = get_regression_corr_full_connectivity(n_neurons,
A,
X,
ratio,
reg_args)
corr_data[trial, j] = result
corr_mean = np.nanmean(corr_data, axis=0)
corr_std = np.nanstd(corr_data, axis=0)
plt.plot(np.asarray(ratio_observed) * 100, corr_mean)
plt.fill_between(np.asarray(ratio_observed) * 100,
corr_mean - corr_std,
corr_mean + corr_std,
alpha=.2)
plt.xlim([100, 10])
plt.xlabel("Percent of neurons observed")
plt.ylabel("connectivity matrices correlation")
plt.title("Performance of regression as a function of the number of neurons observed");
```
---
# Summary
```
#@title Video 4: Summary
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="T1uGf1H31wE", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
In this tutorial, we explored:
1) Using regression for estimating causality
2) The problem of ommitted variable bias, and how it arises in practice
|
github_jupyter
|
```
# Setup Sets
cities = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
power_plants = ["P1", "P2", "P3", "P4", "P5", "P6"]
connections = [("C1", "P1"), ("C1", "P3"), ("C1","P5"), \
("C2", "P1"), ("C2", "P2"), ("C2","P4"), \
("C3", "P2"), ("C3", "P3"), ("C3","P4"), \
("C4", "P2"), ("C4", "P4"), ("C4","P6"), \
("C5", "P2"), ("C5", "P5"), ("C5","P6"), \
("C6", "P3"), ("C6", "P4"), ("C6","P6"), \
("C7", "P1"), ("C7", "P3"), ("C7","P6"), \
("C8", "P2"), ("C8", "P3"), ("C8","P4"), \
("C9", "P3"), ("C9", "P5"), ("C9","P6")]
# Setup Parameters
max_power_generation = {"P1":100, "P2":150, "P3":250, "P4":125, "P5": 175, "P6":165}
startup_cost = {"P1":50, "P2":80, "P3":90, "P4":60, "P5": 60, "P6":70}
power_cost = {"P1":2, "P2":1.5, "P3":1.2, "P4":1.8, "P5": 0.8, "P6":1.1}
power_required = {"C1":25, "C2":35, "C3":30, "C4":29, "C5":40, "C6":35, "C7":50, "C8":45, "C9":38}
# Import PuLP Library
from pulp import *
# Create Decision Variables
run_power_plant = LpVariable.dicts("StartPlant", power_plants, 0, 1, LpInteger)
power_generation = LpVariable.dicts("PowerGeneration", power_plants, 0, None, LpContinuous)
power_sent = LpVariable.dicts("PowerSent", connections, 0, None, LpContinuous)
# Create Problem object
problem = LpProblem("PowerPlanning", LpMinimize)
# Add the Objective Function
problem += lpSum([run_power_plant[p] * startup_cost[p] + power_generation[p] * power_cost[p] for p in power_plants])
# Add Power Capacity Constraints
for p in power_plants:
problem += power_generation[p] <= max_power_generation[p] * run_power_plant[p], f"PowerCapacity_{p}"
# Add Power Balance Constraints
for p in power_plants:
problem += power_generation[p] == lpSum([power_sent[(c,p)] for c in cities if (c, p) in connections]), f"PowerSent_{p}"
# Add Cities Powered Constraints
for c in cities:
problem += power_required[c] == lpSum([power_sent[(c,p)] for p in power_plants if (c, p) in connections]), f"PowerRequired_{c}"
# Solve the problem
problem.solve()
# Check the status of the solution
status = LpStatus[problem.status]
print(status)
# Print the results
for v in problem.variables():
if v.varValue != 0:
print(v.name, "=", v.varValue)
# Let's look at the Plant Utilization
for p in power_plants:
if power_generation[p].varValue > 0:
utilization = (power_generation[p].varValue / max_power_generation[p]) * 100
print(f"Plant: {p} Generation: {power_generation[p].varValue} Utilization: {utilization:.2f}%")
```
|
github_jupyter
|
```
import pandas as pd
disp_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/disp.csv'
trans_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/trans.csv'
account_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/account.csv'
client_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter12/Dataset/client.csv'
df_disp = pd.read_csv(disp_url, sep=';')
df_trans = pd.read_csv(trans_url, sep=';')
df_account = pd.read_csv(account_url, sep=';')
df_client = pd.read_csv(client_url, sep=';')
df_trans.head()
df_trans.shape
df_account.head()
df_trans_acc = pd.merge(df_trans, df_account, how='left', on='account_id')
df_trans_acc.shape
df_disp.head()
df_disp_owner = df_disp[df_disp['type'] == 'OWNER']
df_disp_owner.duplicated(subset='account_id').sum()
df_trans_acc_disp = pd.merge(df_trans_acc, df_disp_owner, how='left', on='account_id')
df_trans_acc_disp.shape
df_client.head()
df_merged = pd.merge(df_trans_acc_disp, df_client, how='left', on=['client_id', 'district_id'])
df_merged.shape
df_merged.columns
df_merged.rename(columns={'date_x': 'trans_date', 'type_x': 'trans_type', 'date_y':'account_creation', 'type_y':'client_type'}, inplace=True)
df_merged.head()
df_merged.dtypes
df_merged['trans_date'] = pd.to_datetime(df_merged['trans_date'], format="%y%m%d")
df_merged['account_creation'] = pd.to_datetime(df_merged['account_creation'], format="%y%m%d")
df_merged.dtypes
df_merged['is_female'] = (df_merged['birth_number'] % 10000) / 5000 > 1
df_merged['birth_number'].head()
df_merged.loc[df_merged['is_female'] == True, 'birth_number'] -= 5000
df_merged['birth_number'].head()
pd.to_datetime(df_merged['birth_number'], format="%y%m%d", errors='coerce')
df_merged['birth_number'] = df_merged['birth_number'].astype(str)
df_merged['birth_number'].head()
import numpy as np
df_merged.loc[df_merged['birth_number'] == 'nan', 'birth_number'] = np.nan
df_merged['birth_number'].head()
df_merged.loc[~df_merged['birth_number'].isna(), 'birth_number'] = '19' + df_merged.loc[~df_merged['birth_number'].isna(), 'birth_number']
df_merged['birth_number'].head()
df_merged['birth_number'] = pd.to_datetime(df_merged['birth_number'], format="%Y%m%d", errors='coerce')
df_merged['birth_number'].head(20)
df_merged['age_at_creation'] = df_merged['account_creation'] - df_merged['birth_number']
df_merged['age_at_creation'] = df_merged['age_at_creation'] / np.timedelta64(1,'Y')
df_merged['age_at_creation'] = df_merged['age_at_creation'].round()
df_merged.head()
```
|
github_jupyter
|
```
import os
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import GCode
import GRBL
# Flip a 2D array. Effectively reversing the path.
flip2 = np.array([
[0, 1],
[1, 0],
])
flip2
# Flip a 2x3 array. Effectively reversing the path.
flip3 = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
flip3
A = np.array([
[1, 2],
[3, 4],
])
A
np.matmul(flip2, A)
B = np.array([
[1, 2],
[3, 4],
[5, 6],
])
B
np.matmul(flip3, B)
B.shape[0]
np.eye(B.shape[0])
flip_n_reverseit = np.eye(B.shape[0])[:, ::-1]
flip_n_reverseit
def reverse(self, points):
flip_n_reverseit = np.eye(points.shape[0])[:, ::-1]
return np.matmul(flip_n_reverseit, points)
reverse(None, B)
```
# Code:
Draw a 10 mm line from (0, 0) to (10, 0).
```
line_len = 10
line_n_points = 2
p = np.linspace(0, line_len, line_n_points, endpoint=True)
p
line_n_points = 3
p = np.linspace(0, line_len, line_n_points, endpoint=True)
p
line_n_points = 4
p = np.linspace(0, line_len, line_n_points, endpoint=True)
p
p
Y=0
for X in np.linspace(0, line_len, line_n_points, endpoint=True):
def HorzLine(X0=0, Xf=10, Y=0, n_points=2):
p = np.linspace(X0, Xf, n_points, endpoint=True)
line_points = np.array([
p,
Y*np.ones(p.shape),
])
return line_points.transpose()
HorzLine()
def VertLine(X=0, Y0=0, Yf=10, n_points=2):
p = np.linspace(Y0, Yf, n_points, endpoint=True)
line_points = np.array([
X*np.ones(p.shape),
p,
])
return line_points.transpose()
VertLine()
points = HorzLine(X0=0, Xf=10, Y=0, n_points=2)
points
line = GCode.Line(points=points)
line
line.__repr__()
prog_cfg={
"points": points
}
prog_cfg
line_cfg = {
"X0": 0,
"Xf": 10,
"Y": 0,
"n_points": 2
}
line_cfg
help(GCode.Line)
help(GCode.Program)
progs = list()
for n_points in range(2, 10):
line_cfg = {
"X0": 0,
"Xf": 10,
"Y": 0,
"n_points": n_points
}
points = HorzLine(**line_cfg)
line_cfg = {
"points": points,
"feed":120,
"power":128,
"dynamic_power": True,
}
line = GCode.Line(points=points)
prog_cfg={
"lines": [line, line],
"feed": 120
}
prog = GCode.Program(**prog_cfg)
progs.append(prog)
progs
for prog in progs:
print(len(prog.buffer))
for prog in progs:
prog.generate_gcode()
print(len(prog.buffer))
list(map(lambda prog: prog.generate_gcode(), progs))
list(map(lambda prog: len(prog.buffer), progs))
import threading
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
concurrent_map(lambda prog: prog.generate_gcode(), progs)
concurrent_map(lambda prog: len(prog.buffer), progs)
concurrent_map(lambda prog: prog.__repr__(), progs)
concurrent_map(lambda prog: prog.dist, progs)
concurrent_map(lambda prog: prog.jog_dist, progs)
concurrent_map(lambda prog: prog.laserin_dist, progs)
m=concurrent_map(lambda prog: prog.laserin_dist, progs)
np.diff(m)
np.diff(m)==0
np.all(np.diff(m)==0)
assert(np.all(np.diff(m)==0))
flip2
reverse(None, progs[1].lines[0].points)
progs
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
raw_data = pd.read_excel("hydrogen_test_classification.xlsx")
raw_data.head()
# 分开特征值和标签值
X = raw_data.drop("TRUE VALUE", axis=1).copy()
y = raw_data["TRUE VALUE"]
y.unique()
from sklearn.model_selection import train_test_split
# 分训练集、验证集和测试集
X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train, Y_valid, X_label, Y_label = train_test_split(X_train_full, y_train_full, test_size=0.2, random_state=42)
print(X_train.shape)
print(Y_valid.shape)
print(X_label.shape)
print(Y_label.shape)
#将数据进行转化成四个通道,方便使用卷积神经网络进行分类
def transform(X):
X=X.values
X=X.reshape(X.shape[0:1][0],5,5)
X=np.expand_dims(X,-1)
print("转化后的维度大小为:")
print(X.shape)
return X
#首先对X—train进行维度转换
X_train=transform(X_train)
X_train.shape
Y_valid=transform(Y_valid)
Y_valid.shape
Y_valid.shape
#将label的标签值,转化为1和0
def only_one_and_zero(y):
y=y.values#将pandas当中的dateframe对象转化为numpy-ndarray对象
length=y.shape[0:1][0]
i=0
while i<length:
if(y[i]==-1):
y[i]=0
i+=1
print("当前的y为",y)
print(type(y))
return y
X_label=only_one_and_zero(X_label)
Y_label=only_one_and_zero(Y_label)
X_train
Y_label
X_train.shape
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
#初步想法,将5*5的数据进行卷积操作,然后使用卷积神经网络进行图像识别
# input_shape 填特征的维度,将特征变为一维特征的形式
model = Sequential()
# model.add(Flatten(input_shape=[25]))、
model.add(tf.keras.layers.Conv2D(50,(2,2),input_shape=X_train.shape[1:],activation="relu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Conv2D(100,(2,2),activation="relu"))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Flatten())
#model.add(Dense(1024, activation="relu", input_shape=X_train.shape[1:]))
model.add(Dense(500, activation="relu"))
model.add(Dense(250, activation="relu"))
model.add(Dense(125, activation="relu"))
model.add(Dense(50, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
model.summary()
#from tensorflow.keras.utils import plot_model
#plot_model(model, to_file='model.png',show_shapes=True)
#如果在本机安装pydot和pyfotprint这两个库的情况下,可以直接将神经网络进行可视化
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
loss='binary_crossentropy',
metrics=['acc']
)
#创建checkpoint,在模型训练是进行回调,这样可以让训练完之后的模型得以保存
import os
checkpoint_path = "training_1/cnn.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# 创建一个保存模型权重的回调
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
#verbose=1,表示在模型训练的时候返回some imformation
history = model.fit(X_train,X_label, epochs=200,
validation_data=(Y_valid, Y_label),
callbacks=[cp_callback])
# 在一张图上画出loss和accuracy的
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.xlabel("epoch")
plt.grid(True)
# plt.gca().set_ylim(0, 1)
#save_fig("keras_learning_curves_plot")
plt.show()
plt.plot(history.epoch,history.history.get('loss'),label="loss")
plt.plot(history.epoch,history.history.get('val_loss'),label="val_loss")
plt.legend()
#络的重要信息
history.params
history.history.keys()
a = ["acc", "val_acc"]
plt.figure(figsize=(8, 5))
for i in a:
plt.plot(history.history[i], label=i)
plt.legend()
plt.grid(True)
model.evaluate(Y_valid, Y_label)
```
|
github_jupyter
|
# Supervised Learning
Supervised learning consists in learning the link between two datasets: the observed data X and an external variable y that we are trying to predict, usually called “target” or “labels”. Most often, y is a 1D array of length n_samples.
If the prediction task is to classify the observations in a set of finite labels, in other words to “name” the objects observed, the task is said to be a **classification** task. On the other hand, if the goal is to predict a continuous target variable, it is said to be a **regression** task.
Clustering, which we've just done with K means, is a type of *unsupervised* learning similar to classification. Here, the difference is that we'll be using the labels in our data in our algorithm.
## Classification
"The problem of identifying to which of a set of categories (sub-populations) a new observation belongs, on the basis of a training set of data containing observations (or instances) whose category membership is known." (Wikipedia)
We've seen one classification example already, the iris dataset. In this dataset, iris flowers are classified based on their petal and sepal geometries.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
def pca_plot(data):
pca = PCA(n_components=2)
pca.fit(data.data)
data_pca = pca.transform(data.data)
for label in range(len(data.target_names)):
plt.scatter(data_pca[data.target==label, 0],
data_pca[data.target==label, 1],
label=data.target_names[label])
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
from sklearn.datasets import load_iris
iris = load_iris()
pca_plot(iris)
```
Another dataset with more features is the wine classification dataset, which tries to determine the original cultivar, or plant family, of three different Italian wines. A chemical analysis determined the following samples:
1. Alcohol
2. Malic acid
3. Ash
4. Alcalinity of ash
5. Magnesium
6. Total phenols
7. Flavanoids
8. Nonflavanoid phenols
9. Proanthocyanins
10. Color intensity
11. Hue
12. OD280/OD315 of diluted wines
13. Proline
```
from sklearn.datasets import load_wine
wine = load_wine()
pca_plot(wine)
```
A final and more difficult dataset is a sample from the National Institute of Standards and Technology (NIST) dataset on handwritten numbers. A modified and larger version of this, Modified NIST or MNIST, is a current standard benchmark for state of the art machine learning algorithms. In this problem, each datapoint is an 8x8 pixel image (64 features) and the classification task is to label each image as the correct number.
```
from sklearn.datasets import load_digits
digits = load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:8]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Label: %i' % label)
plt.show()
pca_plot(digits)
```
## Regression
"In statistical modeling, regression analysis is a set of statistical processes for estimating the relationships among variables. It includes many techniques for modeling and analyzing several variables, when the focus is on the relationship between a dependent variable and one or more independent variables (or 'predictors'). More specifically, regression analysis helps one understand how the typical value of the dependent variable (or 'criterion variable') changes when any one of the independent variables is varied, while the other independent variables are held fixed." (Wikipedia)
In regression, each set of features doesn't correspond to a label but rather to a value. The task of the regression algorithm is to correctly predict this value based on the feature data. One way to think about regression and classification is that regression is continuous while classification is discrete.
Scikit learn also comes with a number of sample regression datasets.
In our example regression dataset, health metrics of diabetes patients were measured and then the progress of their diabetes was quantitatively measured after 1 year. The features are:
1. age
2. sex
3. body mass index
4. average blood pressure
+ 5-10 six blood serum measurements
```
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
y = diabetes.target
features = ["AGE", "SEX", "BMI", "BP", "BL1", "BL2", "BL3", "BL4", "BL5", "BL6"]
plt.figure(figsize=(20,20))
for i in range(10):
plt.subplot(4, 4, i + 1)
plt.scatter(diabetes.data[:, i], y, edgecolors=(0, 0, 0));
plt.title('Feature: %s' % features[i])
```
<div class="alert alert-success">
<b>EXERCISE: UCI datasets</b>
<ul>
<li>
Many of these datasets originally come from the UCI Machine Learning Repository. Visit https://archive.ics.uci.edu/ml/index.php and select a dataset. What is the dataset describing? What are the features? Is it classification or regression? How many data samples are there?
</li>
</ul>
</div>
|
github_jupyter
|
# Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
## Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
```
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
```
## Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
```
rides[:24*10].plot(x='dteday', y='cnt')
```
### Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
```
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
```
### Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
```
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
```
### Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
```
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
```
We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
```
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
```
## Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.
2. Implement the forward pass in the `train` method.
3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.
4. Implement the forward pass in the `run` method.
```
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1. / (1. + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)
hidden_grad = hidden_outputs * (1 - hidden_outputs)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)
self.weights_input_to_hidden += self.lr * np.dot(hidden_errors * hidden_grad, inputs.T)
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
```
## Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
### Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
### Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
### Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
```
import sys
### Set the hyperparameters here ###
epochs = 1000
learning_rate = 0.1
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
```
## Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
```
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
```
## Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
> **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
#### Your answer below
Before Dec 21 model fits well. From Dec 22 model fits bad, because the amount of data decrease.
## Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
```
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
```
|
github_jupyter
|
```
# Author: Robert Guthrie
from copy import copy
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.item()
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim // 2),
torch.randn(2, 1, self.hidden_dim // 2))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1)
tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tagset_size), -10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# Make up some training data
training_data = [(
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
)]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# Check predictions before training
with torch.no_grad():
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)
print(model(precheck_sent))
# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in range(
300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Get our inputs ready for the network, that is,
# turn them into Tensors of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)
# Step 3. Run our forward pass.
loss = model.neg_log_likelihood(sentence_in, targets)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss.backward()
optimizer.step()
# Check predictions after training
with torch.no_grad():
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print(model(precheck_sent))
# We got it!
```
# model
```
import sys
sys.path.append('..')
from utils.dataset.ec import ECDataset
from utils.dataloader.ec import ECDataLoader
from models.han.word_model import WordAttention
from models.han.sentence_model import SentenceWithPosition
device = torch.device('cuda: 0')
batch_size = 16
vocab_size = 23071
num_classes = 2
sequence_length = 41
embedding_dim = 300
dropout = 0.5
word_rnn_size = 300
word_rnn_layer = 2
sentence_rnn_size = 300
sentence_rnn_layer = 2
pos_size = 103
pos_embedding_dim = 300
pos_embedding_file= '/data/wujipeng/ec/data/embedding/pos_embedding.pkl'
train_dataset = ECDataset(data_root='/data/wujipeng/ec/data/test/', vocab_root='/data/wujipeng/ec/data/raw_data/', train=True)
test_dataset = ECDataset(data_root='/data/wujipeng/ec/data/test/', vocab_root='/data/wujipeng/ec/data/raw_data/', train=False)
train_loader = ECDataLoader(dataset=train_dataset, clause_length=sequence_length, batch_size=16, shuffle=True, sort=True, collate_fn=train_dataset.collate_fn)
for batch in train_loader:
clauses, keywords, poses = ECDataset.batch2input(batch)
labels = ECDataset.batch2target(batch)
clauses = torch.from_numpy(clauses).to(device)
keywords = torch.from_numpy(keywords).to(device)
poses = torch.from_numpy(poses).to(device)
labels = torch.from_numpy(labels).to(device)
targets = labels
break
class HierachicalAttentionModelCRF:
def __init__(self,
vocab_size,
num_classes,
embedding_dim,
hidden_size,
word_model,
sentence_model,
dropout=0.5,
fix_embed=True,
name='HAN'):
super(HierachicalAttentionModelCRF, self).__init__()
self.num_classes = num_classes
self.fix_embed = fix_embed
self.name = name
self.Embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
self.word_rnn = WordAttention(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
batch_size=batch_size,
sequence_length=sequence_length,
rnn_size=word_rnn_size,
rnn_layers=word_rnn_layer,
dropout=dropout)
self.sentence_rnn = SentenceAttention(
batch_size=batch_size,
word_rnn_size = word_rnn_size,
rnn_size = sentence_rnn_size,
rnn_layers=sentence_rnn_layer,
pos_size=pos_size,
pos_embedding_dim=pos_embedding_dim,
pos_embedding_file=pos_embedding_file
)
self.fc = nn.Linear(
2 * self.word_rnn_size + 2 * self.sentence_rnn_size, num_classes)
self.dropout = nn.Dropout(dropout)
# self.fc = nn.Sequential(
# nn.Linear(2 * self.sentence_rnn_size, linear_hidden_dim),
# nn.ReLU(inplace=True),
# nn.Dropout(dropout),
# nn.Linear(linear_hidden_dim, num_classes)
# )
def init_weights(self, embeddings):
if embeddings is not None:
self.Embedding = self.Embedding.from_pretrained(embeddings)
def forward(self, clauses, keywords, poses):
inputs = self.linear(self.Embedding(clauses))
queries = self.linear(self.Embedding(keywords))
documents, word_attn = self.word_rnn(inputs, queries)
outputs, sentence_attn = self.sentence_rnn(documents, poses)
# outputs = self.fc(outputs)
s_c = torch.cat((documents, outputs), dim=-1)
outputs = self.fc(self.dropout(s_c))
return outputs, word_attn, sentence_attn
```
## init
```
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.item()
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
START_TAG = "<START>"
STOP_TAG = "<STOP>"
tag_to_ix = {0: 0, 1: 1, START_TAG: 2, STOP_TAG: 3}
tagsize = len(tag_to_ix)
Embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0).to(device)
word_rnn = WordAttention(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
batch_size=batch_size,
sequence_length=sequence_length,
rnn_size=word_rnn_size,
rnn_layers=word_rnn_layer,
dropout=dropout).to(device)
sentence_rnn = SentenceWithPosition(
batch_size=batch_size,
word_rnn_size = word_rnn_size,
rnn_size = sentence_rnn_size,
rnn_layers=sentence_rnn_layer,
pos_size=pos_size,
pos_embedding_dim=pos_embedding_dim,
pos_embedding_file=pos_embedding_file
).to(device)
fc = nn.Linear(2 * word_rnn_size + 2 * sentence_rnn_size, num_classes+2).to(device)
drop = nn.Dropout(dropout).to(device)
transitions = nn.Parameter(torch.randn(tagset_size, tagset_size)).to(device)
transitions.data[tag_to_ix[START_TAG], :] = -10000
transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
```
## forward
```
inputs = Embedding(clauses)
queries = Embedding(keywords)
documents, word_attn = word_rnn(inputs, queries)
outputs, sentence_attn = sentence_rnn(documents, poses)
s_c = torch.cat((documents, outputs), dim=-1)
outputs = fc(drop(s_c))
outputs.size()
lstm_feats = copy(outputs)
lstm_feats.size()
```
### _forward_alg
```
init_alphas = torch.full((1, tagset_size), -10000.).to(device)
# START_TAG has all of the score.
init_alphas[0][tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence
for feat in lstm_feats:
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(1, -1).expand(1, tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + transitions[tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
forward_score = alpha
init_alphas
```
### _score_sentence
```
lstm_feats.size()
tags = copy(targets)
score = torch.zeros(1).to(device)
tags = torch.cat((torch.full((tags.size(0), 1), tag_to_ix[START_TAG], dtype=torch.long).to(device), tags), dim=-1)
for feats, tag in zip(lstm_feats, tags):
score = torch.zeros(1).to(device)
tag = torch.cat([torch.LongTensor([tag_to_ix[START_TAG]]).to(device), tag])
for i, feat in enumerate(feats):
score = score + transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + transitions[tag_to_ix[STOP_TAG], tags[-1]]
tag
score = forward_score - gold_score
forward_score, gold_score, score
```
### _viterbi_decode
```
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, tagset_size), -10000.).to(device)
init_vvars[0][tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for feat in lstm_feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + transitions[tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
path_score.data, len(best_path)
best_path
```
|
github_jupyter
|
Lambda School Data Science
*Unit 2, Sprint 1, Module 3*
---
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
```
# Module Project: Ridge Regression
For this project, you'll return to the Tribecca Condo dataset. But this time, you'll look at the _entire_ dataset and try to predict property sale prices.
The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
## Directions
The tasks for this project are the following:
- **Task 1:** Import `csv` file using `wrangle` function.
- **Task 2:** Conduct exploratory data analysis (EDA), and modify `wrangle` function to engineer two subset your dataset to one-family dwellings whose price is between \\$100,000 and \\$2,000,000.
- **Task 3:** Split data into feature matrix `X` and target vector `y`.
- **Task 4:** Split feature matrix `X` and target vector `y` into training and test sets.
- **Task 5:** Establish the baseline mean absolute error for your dataset.
- **Task 6:** Build and train a `OneHotEncoder`, and transform `X_train` and `X_test`.
- **Task 7:** Build and train a `LinearRegression` model.
- **Task 8:** Build and train a `Ridge` model.
- **Task 9:** Calculate the training and test mean absolute error for your `LinearRegression` model.
- **Task 10:** Calculate the training and test mean absolute error for your `Ridge` model.
- **Task 11:** Create a horizontal bar chart showing the 10 most influencial features for your `Ridge` model.
**Note**
You should limit yourself to the following libraries for this project:
- `category_encoders`
- `matplotlib`
- `pandas`
- `sklearn`
# I. Wrangle Data
```
def wrangle(filepath):
# Import csv file
cols = ['BOROUGH', 'NEIGHBORHOOD',
'BUILDING CLASS CATEGORY', 'GROSS SQUARE FEET',
'YEAR BUILT', 'SALE PRICE', 'SALE DATE']
df = pd.read_csv(filepath, usecols=cols)
return df
filepath = DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv'
```
**Task 1:** Use the above `wrangle` function to import the `NYC_Citywide_Rolling_Calendar_Sales.csv` file into a DataFrame named `df`.
```
df = ...
```
**Task 2:** Modify the above `wrangle` function so that:
- The column `'SALE DATE'` becomes the `DatetimeIndex`.
- The dtype for the `'BOROUGH'` column is `object`, not `int`.
- The dtype for the `'SALE PRICE'` column is `int`, not `object`.
- The dataset includes only one-family dwellings (`BUILDING CLASS CATEGORY == '01 ONE FAMILY DWELLINGS'`).
- The dataset includes only properties whose sale price is between \\$100,000 and \\$2,000,000.
```
# Perform your exploratory data analysis here and
# modify the wrangle function above
```
# II. Split Data
**Task 3:** Split your dataset into the feature matrix `X` and the target vector `y`. You want to predict `'SALE_PRICE'`.
```
X = ...
y = ...
```
**Task 4:** Split `X` and `y` into a training set (`X_train`, `y_train`) and a test set (`X_test`, `y_test`).
- Your training set should include data from January to March 2019.
- Your test set should include data from April 2019.
```
X_train, y_train = ..., ...
X_test, y_test = ..., ...
```
# III. Establish Baseline
**Task 5:** Since this is a **regression** problem, you need to calculate the baseline mean absolute error for your model.
```
baseline_mae = ...
print('Baseline MAE:', baseline_mae)
```
# IV. Build Model
**Task 6:** Build and train a `OneHotEncoder` and then use it to transform `X_train` and `X_test`.
```
ohe = ...
XT_train = ...
XT_test = ...
```
**Task 7:** Build and train a `LinearRegression` model named `model_lr`. Remember to train your model using your _transformed_ feature matrix.
```
model_lr = ...
```
**Task 8:** Build and train a `Ridge` model named `model_r`. Remember to train your model using your _transformed_ feature matrix.
```
model_r = ...
```
# V. Check Metrics
**Task 9:** Check the training and test metrics for `model_lr`.
```
training_mae_lr = ...
test_mae_lr = ...
print('Linear Training MAE:', training_mae_lr)
print('Linear Test MAE:', test_mae_lr)
```
**Task 10:** Check the training and test metrics for `model_r`.
```
training_mae_r = ...
test_mae_r = ...
print('Ridge Training MAE:', training_mae_r)
print('Ridge Test MAE:', test_mae_r)
```
**Stretch Goal:** Calculate the training and test $R^2$ scores `model_r`.
```
# Caculate R^2 score
```
# IV. Communicate Results
**Task 11:** Create a horizontal barchart that plots the 10 most important coefficients for `model_r`, sorted by absolute value. Your figure should look like our example from class:

**Note:** Your figure shouldn't be identical to the one above. Your model will have different coefficients since it's been trained on different data. Only the formatting should be the same.
|
github_jupyter
|
# Simple ARIMAX
This code template is for Time Series Analysis and Forecasting to make scientific predictions based on historical time stamped data with the help of ARIMAX algorithm
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from sklearn.metrics import mean_absolute_error, mean_squared_error
warnings.filterwarnings("ignore")
```
### Initialization
Filepath of CSV file
```
file_path = ""
```
Variable containing the date time column name of the Time Series data
```
date = ""
```
Target feature for prediction.
```
target = ""
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df = pd.read_csv(file_path)
df.head()
```
### Data Preprocessing
Since the majority of the machine learning models for Time Series Forecasting doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippets have functions, which removes the rows containing null value if any exists. And convert the string classes date column in the datasets to proper Date-time classes.
After the proper date conversions are done and null values are dropped, we set the Date column as the index value.
```
def data_preprocess(df, target, date):
df = df.dropna(axis=0, how = 'any')
df[date] = pd.to_datetime(df[date])
df = df.set_index(date)
return df
df = data_preprocess(df,target,date)
df.head()
df.plot(figsize = (15,8))
plt.show()
```
### Seasonality decomposition
Since Simple ARIMAX for non-seasonal data, we need to check for any seasonality in our time series and decompose it.
We use the Dickey Fuller Test for testing the seasonality and if the ADF Statistic value is positive, it means that the data has seasonality.
#### Dickey Fuller Test
The Dickey Fuller test is a common statistical test used to test whether a given Time series is stationary or not. The Augmented Dickey Fuller (ADF) test expands the Dickey-Fuller test equation to include high order regressive process in the model. We can implement the ADF test via the **adfuller()** function. It returns the following outputs:
1. adf : float
> The test statistic.
2. pvalue : float
> MacKinnon's approximate p-value based on MacKinnon(1994, 2010). It is used alongwith the test statistic to reject or accept the null hypothesis.
3. usedlag : int
> Number of lags considered for the test
4. critical values : dict
> Critical values for the test statistic at the 1 %, 5 %, and 10 % levels. Based on MacKinnon (2010).
For more information on the adfuller() function [click here](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.adfuller.html)
```
def dickeyFuller(df,target):
# Applying Dickey Fuller Test
X = df.values
result = adfuller(X)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Number of lags used: %d' % result[2])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
# Decomposing Seasonality if it exists
if result[0]>0:
df[target] = df[target].rolling(12).mean()
return df
```
To remove the seasonality we use the rolling mean technique for smoothing our data and decomposing any seasonality.
This method provides rolling windows over the data. On the resulting windows, we can perform calculations using a statistical function (in this case the mean) in order to decompose the seasonality.
For more information about rolling function [click here](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rolling.html)
```
df = dickeyFuller(df,target)
```
### Autocorrelation Plot
We can calculate the correlation for time series observations with observations with previous time steps, called lags. Because the correlation of the time series observations is calculated with values of the same series at previous times, this is called a serial correlation, or an autocorrelation.
A plot of the autocorrelation of a time series by lag is called the AutoCorrelation Function, or the acronym ACF.
An autocorrelation plot shows whether the elements of a time series are positively correlated, negatively correlated, or independent of each other.
The plot shows the value of the autocorrelation function (acf) on the vertical axis ranging from –1 to 1.
There are vertical lines (a “spike”) corresponding to each lag and the height of each spike shows the value of the autocorrelation function for the lag.
[API](https://www.statsmodels.org/stable/generated/statsmodels.graphics.tsaplots.plot_acf.html)
```
x = plot_acf(df, lags=40)
x.set_size_inches(15, 10, forward=True)
plt.show()
```
### Partial Autocorrelation Plot
A partial autocorrelation is a summary of the relationship between an observation in a time series with observations at prior time steps with the relationships of intervening observations removed.
The partial autocorrelation at lag k is the correlation that results after removing the effect of any correlations due to the terms at shorter lags. By examining the spikes at each lag we can determine whether they are significant or not. A significant spike will extend beyond the significant limits, which indicates that the correlation for that lag doesn't equal zero.
[API](https://www.statsmodels.org/stable/generated/statsmodels.graphics.tsaplots.plot_pacf.html)
```
y = plot_pacf(df, lags=40)
y.set_size_inches(15, 10, forward=True)
plt.show()
```
### Data Splitting
Since we are using a univariate dataset, we can directly split our data into training and testing subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
size = int(len(df)*0.9)
df_train, df_test = df.iloc[:size], df.iloc[size:]
```
### Model
The ARIMAX model is an extended version of the ARIMA model. It includes also other independent (predictor) variables. The model is also referred to as the vector ARIMA or the dynamic regression model.
The ARIMAX model is similar to a multivariate regression model, but allows to take advantage of autocorrelation that may be present in residuals of the regression to improve the accuracy of a forecast.
The API used here is from the statsmodels library. Statsmodels does not have a dedicated API for ARIMAX but the model can be created via <Code>SARIMAX</Code> API by setting the parameter <Code>seasonal_order</Code> = (0,0,0,0) i.e., no seasonality
#### Model Tuning Parameters
1. endog: array_like
>The observed time-series process
2. exog: array_like, optional
>Array of exogenous regressors, shaped nobs x k.
3. order: iterable or iterable of iterables, optional
>The (p,d,q) order of the model for the number of AR parameters, differences, and MA parameters. d must be an integer indicating the integration order of the process, while p and q may either be an integers indicating the AR and MA orders (so that all lags up to those orders are included) or else iterables giving specific AR and / or MA lags to include. Default is an AR(1) model: (1,0,0).
4. seasonal_order: iterable, optional
>The (P,D,Q,s) order of the seasonal component of the model for the AR parameters, differences, MA parameters, and periodicity. D must be an integer indicating the integration order of the process, while P and Q may either be an integers indicating the AR and MA orders (so that all lags up to those orders are included) or else iterables giving specific AR and / or MA lags to include. s is an integer giving the periodicity (number of periods in season), often it is 4 for quarterly data or 12 for monthly data. Default is no seasonal effect.
5. trend: str{‘n’,’c’,’t’,’ct’} or iterable, optional
>Parameter controlling the deterministic trend polynomial . Can be specified as a string where ‘c’ indicates a constant (i.e. a degree zero component of the trend polynomial), ‘t’ indicates a linear trend with time, and ‘ct’ is both. Can also be specified as an iterable defining the non-zero polynomial exponents to include, in increasing order. For example, [1,1,0,1] denotes
. Default is to not include a trend component.
6. measurement_error: bool, optional
>Whether or not to assume the endogenous observations endog were measured with error. Default is False.
7. time_varying_regression: bool, optional
>Used when an explanatory variables, exog, are provided provided to select whether or not coefficients on the exogenous regressors are allowed to vary over time. Default is False.
8. mle_regression: bool, optional
>Whether or not to use estimate the regression coefficients for the exogenous variables as part of maximum likelihood estimation or through the Kalman filter (i.e. recursive least squares). If time_varying_regression is True, this must be set to False. Default is True.
Refer to the official documentation at [statsmodels](https://www.statsmodels.org/dev/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html) for more parameters and information
```
model=SARIMAX(df[target],order=(1, 0, 0),seasonal_order=(0,0,0,0))
result=model.fit()
```
### Model Summary
After fitting the training data into our ARIMAX and training it, we can take a look at a brief summary of our model by using the **summary()** function. The followings aspects are included in our model summary:
1. Basic Model Details: The first column of our summary table contains the basic details regarding our model such as:
a. Name of dependent variable
b. Model used along with parameters
c. Date and time of model deployment
d. Time Series sample used to train the model
2. Probablistic Statistical Measures: The second column gives the values of the probablistic measures obtained by our model:
a. Number of observations
b. Log-likelihood, which comes from Maximum Likelihood Estimation, a technique for finding or optimizing the
parameters of a model in response to a training dataset.
c. Standard Deviation of the innovations
d. Akaike Information Criterion (AIC), which is derived from frequentist probability.
e. Bayesian Information Criterion (BIC), which is derived from Bayesian probability.
f. Hannan-Quinn Information Criterion (HQIC), which is an alternative to AIC and is derived using the log-likelihood and
the number of observartions.
3. Statistical Measures and Roots: The summary table also consists of certain other statistical measures such as z-value, standard error as well as the information on the characteristic roots of the model.
```
result.summary()
```
#### Simple Forecasting
```
df_train.tail()
```
### Predictions
By specifying the start and end time for our predictions, we can easily predict the future points in our time series with the help of our model.
```
d = df.drop([target], axis = 1)
start_date = d.iloc[size].name
end_date = d.iloc[len(df)-1].name
df_pred = result.predict(start = start_date, end = end_date)
df_pred.head()
```
## Model Accuracy
We will use the three most popular metrics for model evaluation: Mean absolute error (MAE), Mean squared error (MSE), or Root mean squared error (RMSE).
```
test = df_test[target]
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(test,df_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(test,df_pred)))
print("Root Mean Squared Error {:.2f}".format(np.sqrt(mean_squared_error(test,df_pred))))
```
## Predictions Plot
First we make use of plot to plot the predicted values returned by our model based on the test data.
After that we plot the actual test data to compare our predictions.
```
plt.figure(figsize=(18,5))
plt.plot(df_pred[start_date:end_date], color = "red")
plt.plot(df_test, color = "blue")
plt.title("Predictions vs Actual", size = 24)
plt.plot(fontsize="x-large")
plt.show()
```
#### Creator: Viraj Jayant, Github: [Profile](https://github.com/Viraj-Jayant)
|
github_jupyter
|
# Plotting with matplotlib
### Setup
```
%matplotlib inline
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 10)
```
### Getting the pop2019 DataFrame
```
csv ='../csvs/nc-est2019-agesex-res.csv'
pops = pd.read_csv(csv, usecols=['SEX', 'AGE', 'POPESTIMATE2019'])
def fix_sex(sex):
if sex == 0:
return 'T'
elif sex == 1:
return 'M'
else: # 2
return 'F'
pops.SEX = pops.SEX.apply(fix_sex)
pops = pops.pivot(index='AGE', columns='SEX', values='POPESTIMATE2019')
pops
pops.plot();
```
### Create a Line Plot
```
# Create the plot.
plt_pop = pops.plot(
title = "Population by Age: 2019",
style=['b--', 'm^', 'k-'],
figsize=(12, 6),
lw=2
)
# Include gridlines.
plt_pop.grid(True)
# Set the x and y labels.
plt_pop.set_xlabel('Age')
plt_pop.set_ylabel('Population')
# Create the legend.
plt_pop.legend(['M', 'F', 'A'], loc="lower left")
# Set x and y ticks.
plt_pop.set_xticks(np.arange(0, 101, 10))
yticks = np.arange(500000, 5000001, 500000)
ytick_labels = pd.Series(yticks).apply(lambda y: "{:,}".format(y))
plt_pop.set_yticks(yticks)
plt_pop.set_yticklabels(ytick_labels);
```
### Create a Bar Plot
```
csv ='../csvs/mantle.csv'
mantle = pd.read_csv(csv, index_col='Year',
usecols=['Year', '2B', '3B', 'HR'])
mantle
# Create the plot.
plt_mantle = mantle.plot(
kind='bar',
title = 'Mickey Mantle: Doubles, Triples, and Home Runs',
figsize=(12, 6),
width=.8,
fontsize=16
)
# Include gridlines.
plt_mantle.grid(True)
# Set the x and y labels.
plt_mantle.set_ylabel('Number', fontsize=20)
plt_mantle.set_xlabel('Year', fontsize=20)
# Hatch the bars.
bars = plt_mantle.patches
for i in np.arange(0, 18):
bars[i].set_hatch('+')
for i in np.arange(18, 36):
bars[i].set_hatch('o')
for i in np.arange(36, 54):
bars[i].set_hatch('/')
# Create the legend.
plt_mantle.legend(['Doubles', 'Triples', 'Home Runs'],
loc="upper right", fontsize='xx-large');
plt_mantle = mantle.plot(kind='bar',
title = 'Mickey Mantle: Doubles, Triples, and Home Runs',
figsize=(12, 6),
width=.8,
fontsize=16,
stacked=True)
plt_mantle.set_ylabel('Number', fontsize=20)
plt_mantle.set_xlabel('Year', fontsize=20)
plt_mantle.grid(True)
bars = plt_mantle.patches
for i in np.arange(0, 18):
bars[i].set_hatch('-')
for i in np.arange(18, 36):
bars[i].set_hatch('o')
for i in np.arange(36, 54):
bars[i].set_hatch('/')
plt_mantle.legend(['Doubles','Triples','Home Runs'],
loc="upper right", fontsize='xx-large');
```
|
github_jupyter
|
```
import string
import random
from deap import base, creator, tools
## Create a Finess base class which is to be minimized
# weights is a tuple -sign tells to minimize, +1 to maximize
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
```
This will define a class ```FitnessMax``` which inherits the Fitness class of deep.base module. The attribute weight which is a tuple is used to specify whether fitness function is to be maximized (weights=1.0) or minimized weights=-1.0. The DEAP library allows multi-objective Fitness function.
### Individual
Next we create a ```Individual``` class, which inherits the class ```list``` and has the ```FitnessMax``` class in its Fitness attribute.
```
# Now we create a individual class
creator.create("Individual", list, fitness=creator.FitnessMax)
```
# Population
Once the individuals are created we need to create population and define gene pool, to do this we use DEAP toolbox. All the objects that we will need now onwards- an individual, the population, the functions, the operators and the arguments are stored in the container called ```Toolbox```
We can add or remove content in the container ```Toolbox``` using ```register()``` and ```unregister()``` methods
```
toolbox = base.Toolbox()
# Gene Pool
toolbox.register("attr_string", random.choice, string.ascii_letters + string.digits )
#Number of characters in word
word = list('hello')
N = len(word)
# Initialize population
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_string, N )
toolbox.register("population",tools.initRepeat, list, toolbox.individual)
def evalWord(individual, word):
#word = list('hello')
return sum(individual[i] == word[i] for i in range(len(individual))),
toolbox.register("evaluate", evalWord, word)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
```
We define the other operators/functions we will need by registering them in the toolbox. This allows us to easily switch between the operators if desired.
## Evolving the Population
Once the representation and the genetic operators are chosen, we will define an algorithm combining all the individual parts and performing the evolution of our population until the One Max problem is solved. It is good style in programming to do so within a function, generally named main().
Creating the Population
First of all, we need to actually instantiate our population. But this step is effortlessly done using the population() method we registered in our toolbox earlier on.
```
def main():
random.seed(64)
# create an initial population of 300 individuals (where
# each individual is a list of integers)
pop = toolbox.population(n=300)
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
CXPB, MUTPB = 0.5, 0.2
print("Start of evolution")
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
#print(ind, fit)
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(pop))
# Extracting all the fitnesses of
fits = [ind.fitness.values[0] for ind in pop]
# Variable keeping track of the number of generations
g = 0
# Begin the evolution
while max(fits) < 5 and g < 1000:
# A new generation
g = g + 1
print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(invalid_ind))
# The population is entirely replaced by the offspring
pop[:] = offspring
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (''.join(best_ind), best_ind.fitness.values))
main()
```
|
github_jupyter
|
# 06_Business_Insights
In this section, we will expend upon the features used by the model and attempt to explain its significance as well as contributions to the pricing model.
Accordingly, in Section Four, we identified the following key features that that are strong predictors of housing price based upon a combination of feature engineering coupled with recursive feature elimination.
$$
\hat{y} = \beta_0 + \begin{align} \beta_1\textit{(age_since_built)} + \beta_2\textit{(Gr_Liv_Area)} + \beta_3\textit{(Total_Bsmt_SF)} + \beta_4\textit{(house_exter_score)} + \beta_j\textit{(Land_Contour)}_j
\end{align}$$
Where:
$\textit{house_exter_score}$ = ['Overall Qual'] + ['Overall Cond'] +['Exter Qual'] + ['Exter Cond']
| Score |
| --- |
|age_since_built |
|Total Bsmt SF |
|Land Contour_Lvl |
|house_exter_score |
|Gr Liv Area |
|Land Contour_Low |
|Land Contour_HLS |
```
# model coefficients
prod_model_rfe
```
## Import Libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as ticker
%matplotlib inline
rfe_columns = ['Total Bsmt SF', 'Gr Liv Area', 'age_since_built', 'house_exter_score',
'Land Contour_HLS', 'Land Contour_Low', 'Land Contour_Lvl']
```
## Load in Training Set for Exploration
```
train = pd.read_csv('./datasets/imputed_train.csv')
new_train = train[(train['Total Bsmt SF']<4000) & (train['Total Bsmt SF']>0)]
plt.figure(figsize=(15,10))
ax = sns.scatterplot(x='Total Bsmt SF',y='SalePrice',data=new_train,hue='Total Bsmt SF')
ax.set_title("Total Basement Area against Sale Prices", fontname='Helvetica', fontsize=18,loc='left')
ax.set_xlabel('Total Basement Area / SqFt',fontname='Helvetica',fontsize=12)
ax.set_ylabel('Sale Price / $',fontname='Helvetica',fontsize=12)
plt.savefig("./img/bsmt_area.png",dpi=300)
plt.figure(figsize=(15,10))
ax = sns.scatterplot(x='age_since_built',y='SalePrice',data=new_train,hue='age_since_built')
ax.set_title("Building Age against Sale Prices", fontname='Helvetica', fontsize=18,loc='left')
ax.set_xlabel('Building Age / yr',fontname='Helvetica',fontsize=12)
ax.set_ylabel('Sale Price / $',fontname='Helvetica',fontsize=12)
plt.savefig("./img/building_age.png",dpi=300)
train.columns
plt.figure(figsize=(15,10))
ax = sns.pointplot(x='house_exter_score',y='SalePrice',data=new_train)
ax.set_title("Housing Scores against Sale Prices", fontname='Helvetica', fontsize=18,loc='left')
ax.set_xlabel('Score',fontname='Helvetica',fontsize=12)
ax.set_ylabel('Sale Price / $',fontname='Helvetica',fontsize=12)
for ind, label in enumerate(ax.get_xticklabels()):
if ind % 5 == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
plt.savefig("./img/housing_score.png",dpi=300)
plt.figure(figsize=(15,10))
ax = sns.scatterplot(x='Gr Liv Area',y='SalePrice',data=new_train,hue='Gr Liv Area')
ax.set_title("Living Area against Sale Prices", fontname='Helvetica', fontsize=18,loc='left')
ax.set_xlabel('Living Area / Sqft',fontname='Helvetica',fontsize=12)
ax.set_ylabel('Sale Price / $',fontname='Helvetica',fontsize=12)
plt.savefig("./img/living_area.png",dpi=300)
new_train_melt = new_train[['Land Contour_Low', 'Land Contour_HLS','Land Contour_Lvl','SalePrice']].melt(id_vars='SalePrice' ,value_vars=['Land Contour_Low', 'Land Contour_HLS','Land Contour_Lvl'])
plt.figure(figsize=(15,10))
ax = sns.boxplot(x='variable',y='SalePrice',order=['Land Contour_Lvl','Land Contour_Low','Land Contour_HLS'],data=new_train_melt[new_train_melt['value']!=0])
ax.set_title("Land Contours and relationship to Sale Prices", fontname='Helvetica', fontsize=18,loc='left')
ax.set_xlabel('Score',fontname='Helvetica',fontsize=12)
ax.set_ylabel('Sale Price / $',fontname='Helvetica',fontsize=12)
plt.savefig("./img/contour_plot.png",dpi=300)
```
## Key Takeaways
## Conclusion And Recommendations
House age, Land Contours, Housing Scores as well as Gross floor areas are strong predictors of housing prices. Using these few variables, a prospective home seller can look into improving areas such as home quality, condition as well as look at expanding gross floor areas via careful remodelling of their homes.
To make this model location agnostic, we may impute features such as accessibility to the city (via distances) and crime rates which can affect buyer's judgement.
|
github_jupyter
|
# Bulk RNA-seq eQTL analysis
This notebook provide a command generator on the XQTL workflow so it can automate the work for data preprocessing and association testing on multiple data collection as proposed.
```
%preview ../images/eqtl_command.png
```
This master control notebook is mainly to serve the 8 tissues snuc_bulk_expression analysis, but should be functional on all analysis where expression data are are a tsv table in a bed.gz like format.
Input:
A recipe file,each row is a data collection and with the following column:
Theme
name of dataset, must be different, each uni_study analysis will be performed in a folder named after each, meta analysis will be performed in a folder named as {study1}_{study2}
The column name must contain the # and be the first column
genotype_file
{Path to a whole genome genotype file}
molecular_pheno
{Path to file}
covariate_file
{Path to file}
### note: Only data collection from the same Populations and conditions will me merged to perform Fix effect meta analysis
A genotype list, with two column, `#chr` and `path`
This can be generated by the genotype session of this command generator.
Output:
1 set of association_scan result for each tissue (each row in the recipe)
```
pd.DataFrame({"Theme":"MWE","molecular_pheno":"MWE.log2cpm.tsv","genotype_file":"MWE.bed","covariate_file":"MWE.covariate.cov.gz"}).to_csv("/mnt/vast/hpc/csg/snuc_pseudo_bulk/eight_tissue_analysis/MWE/command_generator",sep = "\t",index = 0)
```
| Theme | molecular_pheno | genotype_file |covariate_file|
| ----------- | ----------- |-----------||
| MWE | MWE.log2cpm.tsv | /data/genotype_data/GRCh38_liftedover_sorted_all.add_chr.leftnorm.filtered.bed |MWE.covariate.cov.gz|
## Minimal Working Example
### Genotype
The MWE for the genotype session can be ran with the following commands, please be noted that a [seperated MWE genoFile]( https://drive.google.com/file/d/1zaacRlZ63Nf_oEUv2nIiqekpQmt2EDch/view?usp=sharing) was needed.
```
sos run pipeline/eQTL_analysis_commands.ipynb plink_per_chrom \
--ref_fasta reference_data/GRCh38_full_analysis_set_plus_decoy_hla.noALT_noHLA_noDecoy_ERCC.fasta \
--genoFile mwe_genotype.vcf.gz \
--dbSNP_vcf reference_data/00-All.vcf.gz \
--sample_participant_lookup reference_data/sampleSheetAfterQC.txt -n
```
### Per tissue analysis
A MWE for the core per tissue analysis can be ran with the following commands, a complete collection of input file as well as intermediate output of the analysis can be found at [here](https://drive.google.com/drive/folders/16ZUsciZHqCeeEWwZQR46Hvh5OtS8lFtA?usp=sharing).
```
sos run pipeline/eQTL_analysis_commands.ipynb sumstat_merge \
--recipe MWE.recipe \
--genotype_list plink_files_list.txt \
--annotation_gtf reference_data/genes.reformatted.gene.gtf \
--sample_participant_lookup reference_data/sampleSheetAfterQC.txt \
--Association_option "TensorQTL" -n
sos run pipeline/eQTL_analysis_commands.ipynb sumstat_merge \
--recipe MWE.recipe \
--genotype_list plink_files_list.txt \
--annotation_gtf /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/genes.reformatted.gene.gtf \
--sample_participant_lookup /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/sampleSheetAfterQC.txt \
--Association_option "APEX" -n
```
## Example for running the workflow
This will run the workflow from via several submission
```
sos run ~/GIT/xqtl-pipeline/pipeline/eQTL_analysis_commands.ipynb sumstat_merge \
--recipe /mnt/vast/hpc/csg/snuc_pseudo_bulk//data/recipe_8tissue_new \
--genotype_list /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/genotype_qced/plink_files_list.txt \
--annotation_gtf /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/genes.reformatted.gene.gtf \
--sample_participant_lookup /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/sampleSheetAfterQC.txt \
--Association_option "TensorQTL" --run &
sos run ~/GIT/xqtl-pipeline/pipeline/eQTL_analysis_commands.ipynb sumstat_merge \
--recipe <(cat /mnt/vast/hpc/csg/snuc_pseudo_bulk//data/recipe_8tissue_new | head -2) \
--genotype_list /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/genotype_qced/plink_files_list.txt \
--annotation_gtf /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/genes.reformatted.gene.gtf \
--sample_participant_lookup /mnt/vast/hpc/csg/snuc_pseudo_bulk/data/reference_data/sampleSheetAfterQC.txt \
--factor_option "PEER" --Association_option "TensorQTL" -n
[global]
## The aforementioned input recipe
parameter: recipe = path(".") # Added option to run genotype part without the recipe input, which was not used.
## Overall wd, the file structure of analysis is wd/[steps]/[sub_dir for each steps]
parameter: cwd = path("output")
## Diretory to the excutable
parameter: exe_dir = path("~/GIT/xqtl-pipeline/")
parameter: container_base_bioinfo = 'containers/bioinfo.sif'
parameter: container_apex = 'containers/apex.sif'
parameter: container_PEER = 'containers/PEER.sif'
parameter: container_TensorQTL = 'containers//TensorQTL.sif'
parameter: container_rnaquant = 'containers/rna_quantification.sif'
parameter: container_flashpca = 'containers/flashpcaR.sif'
parameter: container_susie = 'containers/stephenslab.sif'
parameter: sample_participant_lookup = path
parameter: phenotype_id_type = "gene_name"
parameter: yml = path("csg.yml")
parameter: run = False
interpreter = 'cat' if not run else 'bash'
import pandas as pd
if recipe.is_file():
input_inv = pd.read_csv(recipe, sep = "\t").to_dict("records")
import os
parameter: jobs = 50 # Number of jobs that are submitted to the cluster
parameter: queue = "csg" # The queue that jobs are submitted to
submission = f'-J {jobs} -c {yml} -q {queue}'
## Control of the workflow
### Factor option (PEER vs BiCV)
parameter: factor_option = "PEER"
### Association scan option (APEX vs TensorQTL)
parameter: Association_option = "TensorQTL"
```
## Data Preprocessing
### Genotype Preprocessing (Once for all tissues)
```
[dbSNP]
parameter: dbSNP_vcf = path
input: dbSNP_vcf
parameter: add_chr = True
output: f'{cwd}/reference_data/{_input:bnn}.add_chr.variants.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/pipeline//VCF_QC.ipynb dbsnp_annotate \
--genoFile $[_input] \
--cwd $[_output:d] \
--container $[container_base_bioinfo] \
$[submission if yml.is_file() else "" ] $["--add_chr" if add_chr else "--no-add_chr" ]
[VCF_QC]
parameter: genoFile = path
parameter: ref_fasta = path
parameter: add_chr = True
input: genoFile, output_from("dbSNP")
output: f'{cwd}/data_preprocessing/{_input[0]:bnn}.{"add_chr." if add_chr else False}leftnorm.filtered.bed'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]//pipeline/VCF_QC.ipynb qc \
--genoFile $[_input[0]] \
--dbsnp-variants $[_input[1]] \
--reference-genome $[ref_fasta] \
--cwd $[_output:d] \
--container $[container_base_bioinfo] \
--walltime "24h" \
$[submission if yml.is_file() else "" ] $["--add_chr" if add_chr else "--no-add_chr" ]
[plink_QC]
# minimum MAF filter to use. 0 means do not apply this filter.
parameter: maf_filter = 0.05
# maximum MAF filter to use. 0 means do not apply this filter.
parameter: maf_max_filter = 0.0
# Maximum missingess per-variant
parameter: geno_filter = 0.1
# Maximum missingness per-sample
parameter: mind_filter = 0.1
# HWE filter
parameter: hwe_filter = 1e-06
input: output_from("VCF_QC")
output: f'{_input:n}.filtered.bed'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]//pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd $[_output:d] \
--genoFile $[_input] \
--maf-filter $[maf_filter] \
--geno-filter $[geno_filter] \
--mind-filter $[mind_filter] \
--hwe-filter $[hwe_filter] \
--mem 40G \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ]
[plink_per_chrom]
input: output_from("plink_QC")
output: f'{cwd:a}/data_preprocessing/{_input:bn}.plink_files_list.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]//pipeline/genotype_formatting.ipynb plink_by_chrom \
--genoFile $[_input] \
--cwd $[_output:d] \
--chrom `cut -f 1 $[_input:n].bim | uniq | sed "s/chr//g"` \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ]
[plink_to_vcf]
parameter: genotype_list = path
input: genotype_list
import pandas as pd
parameter: genotype_file_name = pd.read_csv(_input,"\t",nrows = 1).values.tolist()[0][1]
output: f'{cwd:a}/data_preprocessing/{path(genotype_file_name):bnn}.vcf_files_list.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]//pipeline/genotype_formatting.ipynb plink_to_vcf \
--genoFile $[_input] \
--cwd $[_output:d] \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ]
[plink_per_gene]
# The plink genotype file
parameter: genoFile = path
input: output_from("region_list_concat"),genoFile
output: f'{cwd:a}/{_input[1]:bn}.plink_files_list.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/pipeline//genotype_formatting.ipynb plink_by_gene \
--genoFile $[_input[1]] \
--cwd $[_output:d] \
--region_list $[_input[0]] \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ]
```
### Molecular Phenotype Processing
```
[annotation]
stop_if(not recipe.is_file(), msg = "Please specify a valid recipe as input")
import os
parameter: annotation_gtf = path
input: for_each = "input_inv"
output: f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/phenotype_data/{path(_input_inv["molecular_pheno"]):bn}.bed.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/pipeline/gene_annotation.ipynb annotate_coord \
--cwd $[_output:d] \
--phenoFile $[_input_inv["molecular_pheno"]] \
--annotation-gtf $[annotation_gtf] \
--sample-participant-lookup $[sample_participant_lookup] \
--container $[container_rnaquant] \
--phenotype-id-type $[phenotype_id_type] $[submission if yml.is_file() else "" ]
[region_list_generation]
parameter: annotation_gtf = path
input: output_from("annotation"), group_with = "input_inv"
output: pheno_mod = f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/phenotype_data/{_input:bnn}.region_list'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/gene_annotation.ipynb region_list_generation \
--cwd $[_output:d] \
--phenoFile $[_input]\
--annotation-gtf $[annotation_gtf] \
--sample-participant-lookup $[sample_participant_lookup] \
--container $[container_rnaquant] \
--phenotype-id-type $[phenotype_id_type] $[submission if yml.is_file() else "" ]
[region_list_concat]
input: output_from("region_list_generation"), group_by = "all"
output: f'{cwd:a}/data_preprocessing/phenotype_data/concat.region_list'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
cat $[_input:a] | sort | uniq > $[_output:a]
[phenotype_partition_by_chrom]
input: output_from("annotation"),output_from("region_list_generation"), group_with = "input_inv"
output: per_chrom_pheno_list = f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/phenotype_data/{_input[0]:bn}.processed_phenotype.per_chrom.recipe'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/phenotype_formatting.ipynb partition_by_chrom \
--cwd $[_output:d] \
--phenoFile $[_input[0]:a] \
--region-list $[_input[1]:a] \
--container $[container_rnaquant] \
--mem 4G $[submission if yml.is_file() else "" ]
```
### Genotype Processing
Since genotype is shared among the eight tissue, the QC of whole genome file is not needed. Only pca needed to be run again.
```
[sample_match]
input: for_each = "input_inv"
output: f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/{sample_participant_lookup:bn}.filtered.txt',
geno = f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/{sample_participant_lookup:bn}.filtered_geno.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/sample_matcher.ipynb filtered_sample_list \
--cwd $[_output[0]:d] \
--phenoFile $[_input_inv["molecular_pheno"]] \
--genoFile $[path(_input_inv["genotype_file"]):n].fam \
--sample-participant-lookup $[sample_participant_lookup] \
--container $[container_rnaquant] \
--translated_phenoFile $[submission if yml.is_file() else "" ]
[king]
parameter: maximize_unrelated = False
input:output_from("sample_match")["geno"], group_with = "input_inv"
output: related = f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/genotype_data/{path(_input_inv["genotype_file"]):bn}.{_input_inv["Theme"]}.related.bed',
unrelated = f'{cwd:a}/data_preprocessing/{_input_inv["Theme"]}/genotype_data/{path(_input_inv["genotype_file"]):bn}.{_input_inv["Theme"]}.unrelated.bed'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/GWAS_QC.ipynb king \
--cwd $[_output[0]:d] \
--genoFile $[_input_inv["genotype_file"]] \
--name $[_input_inv["Theme"]] \
--keep-samples $[_input] \
--container $[container_base_bioinfo] \
--walltime 48h $[submission if yml.is_file() else "" ] $["--maximize_unrelated" if maximize_unrelated else "--no-maximize_unrelated"]
[unrelated_QC]
input: output_from("king")["unrelated"]
output: unrelated_bed = f'{_input:n}.filtered.prune.bed',
prune = f'{_input:n}.filtered.prune.in'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/GWAS_QC.ipynb qc \
--cwd $[_output[0]:d] \
--genoFile $[_input] \
--exclude-variants /mnt/vast/hpc/csg/snuc_pseudo_bulk/Ast/genotype/dupe_snp_to_exclude \
--maf-filter 0.05 \
--container $[container_base_bioinfo] \
--mem 40G $[submission if yml.is_file() else "" ]
[related_QC]
input: output_from("king")["related"],output_from("unrelated_QC")["prune"]
output: f'{_input[0]:n}.filtered.extracted.bed'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/GWAS_QC.ipynb qc_no_prune \
--cwd $[_output[0]:d] \
--genoFile $[_input[0]] \
--maf-filter 0 \
--geno-filter 0 \
--mind-filter 0.1 \
--hwe-filter 0 \
--keep-variants $[_input[1]] \
--container $[container_base_bioinfo] \
--mem 40G $[submission if yml.is_file() else "" ]
```
## Factor Analysis
```
[pca]
input: output_from("unrelated_QC")["unrelated_bed"],group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/pca/{_input:bn}.pca.rds',
f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/pca/{_input:bn}.pca.scree.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/PCA.ipynb flashpca \
--cwd $[_output:d] \
--genoFile $[_input] \
--container $[container_flashpca] $[submission if yml.is_file() else "" ]
[projected_sample]
# The percentage of PVE explained
parameter: PVE_treshold = 0.7
input: output_from("related_QC"),output_from("pca"), group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/pca/{_input[0]:bn}.pca.projected.rds',
f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/pca/{_input[0]:bn}.pca.projected.scree.txt'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/PCA.ipynb project_samples \
--cwd $[_output:d] \
--genoFile $[_input[0]] \
--pca-model $[_input[1]] \
--maha-k `awk '$3 < $[PVE_treshold]' $[_input[2]] | tail -1 | cut -f 1 ` \
--container $[container_flashpca] $[submission if yml.is_file() else "" ]
[merge_pca_covariate]
# The percentage of PVE explained
parameter: PVE_treshold = 0.7
input: output_from("projected_sample"),group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/covariates/{path(_input_inv["covariate_file"]):bn}.pca.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/covariate_formatting.ipynb merge_pca_covariate \
--cwd $[_output:d] \
--pcaFile $[_input[0]:a] \
--covFile $[path(_input_inv["covariate_file"])] \
--tol_cov 0.3 \
--k `awk '$3 < $[PVE_treshold]' $[_input[1]] | tail -1 | cut -f 1 ` \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ] --name $[_output:bn] --outliersFile $[_input[0]:an].outliers
[resid_exp]
input: output_from("merge_pca_covariate"),output_from("annotation"),group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/resid_phenotype/{_input[1]:bnn}.{_input[0]:bn}.resid.bed.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/covariate_formatting.ipynb compute_residual \
--cwd $[_output:d] \
--phenoFile $[_input[1]:a] \
--covFile $[_input[0]:a] \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ]
[factor]
parameter: N = 0
input: output_from("resid_exp"),group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/covariates/{_input[0]:bnn}.{factor_option}.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/pipeline/$[factor_option]_factor.ipynb $[factor_option] \
--cwd $[_output:d] \
--phenoFile $[_input[0]:a] \
--container $[container_apex if factor_option == "BiCV" else container_PEER] \
--walltime 24h \
--numThreads 8 \
--iteration 1000 \
--N $[N] $[submission if yml.is_file() else "" ]
[merge_factor_covariate]
# The percentage of PVE explained
parameter: PVE_treshold = 0.7
input: output_from("factor"),output_from("merge_pca_covariate"),group_with = "input_inv"
output: f'{cwd}/data_preprocessing/{_input_inv["Theme"]}/covariates/{_input[0]:bn}.cov.gz'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output}.stderr', stdout = f'{_output}.stdout'
sos run $[exe_dir]/pipeline/covariate_formatting.ipynb merge_factor_covariate \
--cwd $[_output:d] \
--factorFile $[_input[0]:a] \
--covFile $[_input[1]:a] \
--container $[container_base_bioinfo] $[submission if yml.is_file() else "" ] --name $[_output:bn]
```
## Association Scan
```
[TensorQTL]
# The number of minor allele count as treshold for the analysis
parameter: MAC = 0
# The minor allele frequency as treshold for the analysis, overwrite MAC
parameter: maf_threshold = 0
parameter: genotype_list = path
input: genotype_list, output_from("phenotype_partition_by_chrom"),output_from("merge_factor_covariate"),group_with = "input_inv"
output: f'{cwd:a}/association_scan/{_input_inv["Theme"]}/TensorQTL/TensorQTL.cis._recipe.tsv'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/TensorQTL.ipynb cis \
--genotype-list $[_input[0]] \
--phenotype-list $[_input[1]] \
--covariate-file $[_input[2]] \
--cwd $[_output:d] \
--container $[container_TensorQTL] $[submission if yml.is_file() else "" ] $[f'--MAC {MAC}' if MAC else ""] $[f'--maf_threshold {maf_threshold}' if maf_threshold else ""]
[APEX]
parameter: genotype_list = path
input: output_from("plink_to_vcf"), output_from("phenotype_partition_by_chrom"),output_from("merge_factor_covariate"),group_with = "input_inv"
output: f'{cwd:a}/association_scan/{_input_inv["Theme"]}/APEX/APEX_QTL_recipe.tsv'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/APEX.ipynb cis \
--genotype-list $[_input[0]] \
--phenotype-list $[_input[1]] \
--covariate-file $[_input[2]] \
--cwd $[_output:d] \
--container $[container_apex] $[submission if yml.is_file() else "" ] --name $[_input[1]:bnn]
```
## Trans Association Scan
```
[TensorQTL_Trans]
parameter: MAC = 0
# The minor allele frequency as treshold for the analysis, overwrite MAC
parameter: maf_threshold = 0
parameter: genotype_list = path
parameter: region_list = path
input: genotype_list, output_from("phenotype_partition_by_chrom"),output_from("merge_factor_covariate"),group_with = "input_inv"
output: f'{cwd:a}/association_scan/{_input_inv["Theme"]}/Trans/TensorQTL.trans._recipe.tsv'
script: interpreter = interpreter, expand = "$[ ]", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
sos run $[exe_dir]/pipeline/TensorQTL.ipynb trans \
--genotype-list $[_input[0]] \
--phenotype-list $[_input[1]] \
--covariate-file $[_input[2]] \
--cwd $[_output:d] \
--region_list $[region_list] \
--container $[container_TensorQTL] $[submission if yml.is_file() else "" ] $[f'--MAC {MAC}' if MAC else ""] $[f'--maf_threshold {maf_threshold}' if maf_threshold else ""]
```
## SuSiE
```
[UniSuSiE]
input: output_from("plink_per_gene"), output_from("annotation"),output_from("factor"), output_from("region_list_concat"), group_by = "all"
output: f'{cwd:a}/Fine_mapping/UniSuSiE/UniSuSiE_recipe.tsv'
script: interpreter = interpreter, expand = "$[ ]"
sos run $[exe_dir]/pipeline/SuSiE.ipynb uni_susie \
--genoFile $[_input[0]] \
--phenoFile $[" ".join([str(x) for x in _input[1:len(input_inv)+1]])] \
--covFile $[" ".join([str(x) for x in _input[len(input_inv)+1:len(input_inv)*2+1]])] \
--cwd $[_output:d] \
--tissues $[" ".join([x["Theme"] for x in input_inv])] \
--region-list $[_input[3]] \
--container $[container_susie] $[submission if yml.is_file() else "" ]
```
## Sumstat Merger
```
[yml_generation]
parameter: TARGET_list = path("./")
input: output_from(Association_option), group_by = "all"
output: f'{cwd:a}/data_intergration/{Association_option}/qced_sumstat_list.txt',f'{cwd:a}/data_intergration/{Association_option}/yml_list.txt'
script: interpreter = interpreter, expand = "$[ ]"
sos run $[exe_dir]/pipeline/yml_generator.ipynb yml_list \
--sumstat-list $[_input] \
--cwd $[_output[1]:d] --name $[" ".join([str(x).split("/")[-3] for x in _input])] --TARGET_list $[TARGET_list]
[sumstat_merge]
input: output_from("yml_generation")
script: interpreter = interpreter, expand = "$[ ]"
sos run $[exe_dir]/pipeline/summary_stats_merger.ipynb \
--sumstat-list $[_input[0]] \
--yml-list $[_input[1]] \
--cwd $[_input[0]:d] $[submission if yml.is_file() else "" ] --mem 50G --walltime 48h
```
|
github_jupyter
|
```
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import plotly.plotly as py
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
init_notebook_mode(connected=True)
%matplotlib inline
data_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\simtime_prediction\data'
excel_file = os.path.join(data_folder, 'sim_time.xlsx')
data = pd.read_excel(excel_file)
data.shape
plt.figure(figsize=(16, 8), dpi= 80, facecolor='w', edgecolor='k')
(data['time'][data['time'] < 1500 * 60] / 60).plot('hist', bins=50, color='#003399')
plt.xlabel('Simulation Time in minutes')
#plt.savefig('simulation_time_histogram.pdf')
(data['time'][data['time'] < 1500 * 60] / 60).describe()
hist, edges = np.histogram((data['time'][data['time'] < 1500 * 60] / 60), density=True, bins=50)
dx = edges[1] - edges[0]
cdf = np.cumsum(hist) * dx
plt.figure(figsize=(16, 8), dpi= 80, facecolor='w', edgecolor='k')
plt.plot(edges[:-1], cdf)
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.neighbors import KNeighborsRegressor
y_data = data['time']
x_data = data.loc[:, data.columns != 'time']
x_data.loc[:, 'exterior_climate'] = np.ones(len(x_data['exterior_climate']))
x_data = x_data.fillna(0.0)
x_data.loc[x_data.loc[:, 'interior_climate'] == 'a', 'interior_climate'] = 0.0
x_data.loc[x_data.loc[:, 'interior_climate'] == 'b', 'interior_climate'] = 1.0
x_data.loc[x_data.loc[:, 'system_name'] == 'ClimateBoard', 'system_name'] = 1.0
x_data.head()
x_data.columns
processed_data = x_data.assign(time=y_data/60)
plt_data = [
go.Parcoords(
line = dict(color = processed_data['time'],
colorscale = 'Jet',
showscale = True,
cmin = 0,
cmax = 1500),
dimensions = list([
dict(range = [0,1440],
label = 'Time', values = processed_data['time'],
tickformat='r'),
dict(range = [0, 5],
label = 'Ext. Heat\nTransfer Coef. Slope',
values = processed_data['exterior_heat_transfer_coefficient_slope']),
dict(range = [4 * 10 ** -9, 10 ** -8],
label = 'Ext. Moisture Transfer Coef.',
values = processed_data['exterior_moisture_transfer_coefficient'],
tickformat='e'),
dict(range = [0.4, 0.8],
label = 'Solar Absorption', values = processed_data['solar_absorption'],
tickformat='.1f'),
dict(range = [0.0, 2.0],
label = 'Rain Scale Factor', values = processed_data['rain_scale_factor']),
dict(range = [0.0, 1.0],
label = 'Int. Climate', values = processed_data['interior_climate']),
dict(range = [4.0, 11.0],
label = 'Int. Heat Transfer Coef.',
values = processed_data['interior_heat_transfer_coefficient']),
dict(range = [4 * 10 ** -9, 10 ** -8],
label = 'Int. Moisture Transfer Coef.',
values = processed_data['interior_moisture_transfer_coefficient'],
tickformat='e'),
dict(range = [0.0, 0.6],
label = 'Int. Sd Value', values = processed_data['interior_sd_value'],
tickformat='.1f'),
dict(range = [0.0, 360.0],
label = 'Wall Orientation', values = processed_data['wall_orientation']),
dict(range = [0.0, 1.0],
label = 'Wall Core Width', values = processed_data['wall_core_width']),
dict(range = [0.0, 1000],
label = 'Wall Core Material', values = processed_data['wall_core_material'],
tickformat='r'),
dict(range = [0.01, 0.02],
label = 'Plaster Width', values = processed_data['plaster_width'],
tickformat='.2f'),
dict(range = [0.0, 1000],
label = 'Plaster Material', values = processed_data['plaster_material'],
tickformat='r'),
dict(range = [0.0, 1.0],
label = 'Ext. Plaster', values = processed_data['exterior_plaster']),
dict(range = [0.0, 1.0],
label = 'System', values = processed_data['system_name']),
dict(range = [0.0, 1000],
label = 'Insulation Material', values = processed_data['insulation_material'],
tickformat='r'),
dict(range = [0.0, 1000],
label = 'Finish Material', values = processed_data['finish_material'],
tickformat='r'),
dict(range = [0.0, 1000],
label = 'Detail Material', values = processed_data['detail_material'],
tickformat='r'),
dict(range = [0.0, 200],
label = 'Insulation Thickness', values = processed_data['insulation_thickness']),
])
)
]
layout = go.Layout(
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5'
)
fig = go.Figure(data = plt_data, layout = layout)
plot(fig, filename = 'sim_time.html')
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, random_state=0)
# Linear Model
linreg = linear_model.LinearRegression(normalize=True)
linreg.fit(X_train, y_train)
print('linear model intercept: {}'.format(linreg.intercept_))
print('linear model coeff:\n{}'.format(linreg.coef_))
print('R-squared score (training): {:.3f}'.format(linreg.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'.format(linreg.score(X_test, y_test)))
print('Number of non-zero features: {}'.format(np.sum(linreg.coef_ != 0)))
# Ridge Model
linridge = linear_model.Ridge(alpha=20.0).fit(X_train, y_train)
print('ridge regression linear model intercept: {}'.format(linridge.intercept_))
print('ridge regression linear model coeff:\n{}'.format(linridge.coef_))
print('R-squared score (training): {:.3f}'.format(linridge.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'.format(linridge.score(X_test, y_test)))
print('Number of non-zero features: {}'.format(np.sum(linridge.coef_ != 0)))
# Ridge Model Normalized
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
linridge_normal = linear_model.Ridge(alpha=20.0).fit(X_train_scaled, y_train)
print('ridge regression linear model intercept: {}'.format(linridge_normal.intercept_))
print('ridge regression linear model coeff:\n{}'.format(linridge_normal.coef_))
print('R-squared score (training): {:.3f}'.format(linridge_normal.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}'.format(linridge_normal.score(X_test_scaled, y_test)))
print('Number of non-zero features: {}'.format(np.sum(linridge_normal.coef_ != 0)))
# K-nearest regression - 5 neighbors
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
knn_reg5_uni = KNeighborsRegressor(n_neighbors=5).fit(X_train_scaled, y_train)
#print(knn_reg5_uni.predict(X_test_scaled))
print('R-squared train score: {:.5f}'.format(knn_reg5_uni.score(X_train_scaled, y_train)))
print('R-squared test score: {:.5f}'.format(knn_reg5_uni.score(X_test_scaled, y_test)))
# K-nearest regression - 3 neighbors
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
knn_reg5_uni = KNeighborsRegressor(n_neighbors=3).fit(X_train_scaled, y_train)
#print(knn_reg5_uni.predict(X_test_scaled))
print('R-squared train score: {:.5f}'.format(knn_reg5_uni.score(X_train_scaled, y_train)))
print('R-squared test score: {:.5f}'.format(knn_reg5_uni.score(X_test_scaled, y_test)))
# K-nearest regression - 5 neighbors, weights = distance
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
knn_reg5 = KNeighborsRegressor(n_neighbors=3, weights='distance').fit(X_train_scaled, y_train)
#print(knn_reg5.predict(X_test_scaled))
print('R-squared train score: {:.5f}'.format(knn_reg5.score(X_train_scaled, y_train)))
print('R-squared test score: {:.5f}'.format(knn_reg5.score(X_test_scaled, y_test)))
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=5, test_size=0.25, random_state=47)
scaler = MinMaxScaler()
test_scores = []
for train_index, test_index in ss.split(x_data):
x_train = scaler.fit_transform(x_data.iloc[train_index, :])
x_test = scaler.transform(x_data.iloc[test_index, :])
y_train = y_data.iloc[train_index]
y_test = y_data.iloc[test_index]
knn_reg = KNeighborsRegressor(n_neighbors=5, weights='distance').fit(x_train, y_train)
#knn_reg = KNeighborsRegressor(n_neighbors=5).fit(x_train, y_train)
test_scores.append(knn_reg.score(x_test, y_test))
mean_score = np.mean(test_scores)
print(f'Average R-squared test score: {mean_score:.5f}')
# Cross Validation Score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
ss = ShuffleSplit(n_splits=5, test_size=0.25, random_state=47)
scaler = MinMaxScaler()
knn_reg = KNeighborsRegressor(n_neighbors=5, weights='distance')
#knn_reg = KNeighborsRegressor(n_neighbors=5)
validated_test_scores = cross_val_score(knn_reg, scaler.fit_transform(x_data), y_data, cv=ss)
print(f'Accuracy: {validated_test_scores.mean():.5f} (+/- {validated_test_scores.std()*2:.5f})')
# Feature Importance
features = x_data.columns
col_del = []
feature_scores = []
for feat in features:
feature_less_data = x_data.loc[:, x_data.columns != feat]
test_scores = cross_val_score(knn_reg, scaler.fit_transform(feature_less_data), y_data, cv=ss, scoring='r2')
feature_scores.append((feat, test_scores.mean()))
if test_scores.mean() >= validated_test_scores.mean():
col_del.append(feat)
feature_scores = sorted(feature_scores, key=lambda x: x[1])
width = len('exterior heat transfer coefficient slope')
print('Feature'.ljust(width, ' ') + ' Accuracy')
for i in feature_scores:
print(f'{i[0].ljust(width, " ")} - {i[1]:.5f}')
print('Columns to delete:\n')
for col in col_del:
print(f'\t{col}')
clean_col = x_data.columns[[c not in col_del for c in x_data.columns.tolist()]]
cleaned_data = x_data.loc[:, clean_col]
clean_scores = cross_val_score(knn_reg, scaler.fit_transform(cleaned_data), y_data, cv=ss, scoring='r2')
print(f'Accuracy: {clean_scores.mean():.5f} (+/- {clean_scores.std()*2:.5f})')
```
|
github_jupyter
|
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
---
In this notebook, we visualize four activation maps in a CNN layer.
### 1. Import the Image
```
import cv2
import scipy.misc
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'part12/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# resize to smaller
small_img = scipy.misc.imresize(gray_img, 0.3)
# rescale entries to lie in [0,1]
small_img = small_img.astype("float32")/255
# plot image
plt.imshow(small_img, cmap='gray')
plt.show()
```
### 2. Specify the Filters
```
import numpy as np
# TODO: Feel free to modify the numbers here, to try out another filter!
# Please don't change the size of the array ~ :D
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
### do not modify the code below this line ###
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = [filter_1, filter_2, filter_3, filter_4]
# visualize all filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
```
### 3. Visualize the Activation Maps for Each Filter
```
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
import matplotlib.cm as cm
# plot image
plt.imshow(small_img, cmap='gray')
# define a neural network with a single convolutional layer with one filter
model = Sequential()
model.add(Convolution2D(1, (4, 4), activation='relu', input_shape=(small_img.shape[0], small_img.shape[1], 1)))
# apply convolutional filter and return output
def apply_filter(img, index, filter_list, ax):
# set the weights of the filter in the convolutional layer to filter_list[i]
model.layers[0].set_weights([np.reshape(filter_list[i], (4,4,1,1)), np.array([0])])
# plot the corresponding activation map
ax.imshow(np.squeeze(model.predict(np.reshape(img, (1, img.shape[0], img.shape[1], 1)))), cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# visualize all activation maps
fig = plt.figure(figsize=(20, 20))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
apply_filter(small_img, i, filters, ax)
ax.set_title('Activation Map for Filter %s' % str(i+1))
```
|
github_jupyter
|
# Part - 2: COVID-19 Time Series Analysis and Prediction using ML.Net framework
## COVID-19
- As per [Wiki](https://en.wikipedia.org/wiki/Coronavirus_disease_2019) **Coronavirus disease 2019** (**COVID-19**) is an infectious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). The disease was first identified in 2019 in Wuhan, the capital of China's Hubei province, and has since spread globally, resulting in the ongoing 2019–20 coronavirus pandemic.
- The virus had caused a pandemic across the globe and spreading/affecting most of the nations.
- The purpose of notebook is to visualize the trends of virus spread in various countries and explore features present in ML.Net such as DataFrame.
### Acknowledgement
- [Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data) for dataset
- [COVID-19 data visualization](https://www.kaggle.com/akshaysb/covid-19-data-visualization) by Akshay Sb
### Dataset
- [2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE - Time Series](https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series).
### Introduction
This is **Part-2** of our analysis on the COVID-19 dataset provided by Johns Hopkins CSSE. In [**Part-1**](https://github.com/praveenraghuvanshi1512/TechnicalSessions/tree/31052020-virtualmlnet/31052020-virtualmlnet/src/part-1), I did data analysis on the dataset and created some tables and plots for getting insights from it. In Part-2, I'll focus on applying machine learning for making a prediction using time-series API's provided by ML.Net framework. I'll be building a model from scratch on the number of confirmed cases and predicting for the next 7 days. Later on, I'll plot these numbers for better visualization.
[**ML.Net**](https://dotnet.microsoft.com/apps/machinelearning-ai/ml-dotnet) is a cross-platform framework from Microsoft for developing Machine learning models in the .Net ecosystem. It allows .Net developers to solve business problems using machine learning algorithms leveraging their preferred language such as C#/F#. It's highly scalable and used within Microsoft in many of its products such as Bing, Powerpoint, etc.
**Disclaimer**: This is an exercise to explore different features present in ML.Net. The actual and predicted numbers might vary due to several factors such as size and features in a dataset.
### Summary
Below is the summary of steps we'll be performing
1. Define application level items
- Nuget packages
- Namespaces
- Constants
2. Utility Functions
- Formatters
3. Dataset and Transformations
- Actual from [Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series)
- Transformed [time_series_covid19_confirmed_global_transposed.csv](time_series_covid19_confirmed_global_transposed.csv)
4. Data Classes
- ConfirmedData : Provides a map between columns in a dataset
- ConfirmedForecast : Holds predicted values
5. Data Analysis
- Visualize Data using DataFrame API
- Display Top 10 Rows - dataframe.Head(10)
- Display Last 10 Rows - dataframe.Tail(10)
- Display Dataset Statistics - dataframe.Description()
- Plot of TotalConfimed cases vs Date
6. Load Data - MLContext
7. ML Pipeline
8. Train Model
9. Prediction/Forecasting
10. Prediction Visualization
11. Prediction Analysis
12. Conclusion
**Note** : Graphs/Plots may not render in GitHub due to secutiry reasons, however if you run this notebook locally/binder they will render.
```
#!about
```
### 1. Define Application wide Items
#### Nuget Packages
```
// ML.NET Nuget packages installation
#r "nuget:Microsoft.ML"
#r "nuget:Microsoft.ML.TimeSeries"
#r "nuget:Microsoft.Data.Analysis"
// Install XPlot package
#r "nuget:XPlot.Plotly"
```
#### Namespaces
```
using System;
using System.Collections.Generic;
using System.Linq;
using Microsoft.ML;
using Microsoft.ML.Data;
using Microsoft.Data.Analysis;
using Microsoft.ML.Transforms.TimeSeries;
using Microsoft.AspNetCore.Html;
using XPlot.Plotly;
```
#### Constants
```
const string CONFIRMED_DATASET_FILE = "time_series_covid19_confirmed_global_transposed.csv";
// Forecast API
const int WINDOW_SIZE = 5;
const int SERIES_LENGTH = 10;
const int TRAIN_SIZE = 100;
const int HORIZON = 7;
// Dataset
const int DEFAULT_ROW_COUNT = 10;
const string TOTAL_CONFIRMED_COLUMN = "TotalConfirmed";
const string DATE_COLUMN = "Date";
```
### 2. Utility Functions - TBR
#### Formatters
By default the output of DataFrame is not proper and in order to display it as a table, we need to have a custom formatter implemented as shown in next cell.
```
Formatter<DataFrame>.Register((df, writer) =>
{
var headers = new List<IHtmlContent>();
headers.Add(th(i("index")));
headers.AddRange(df.Columns.Select(c => (IHtmlContent) th(c.Name)));
var rows = new List<List<IHtmlContent>>();
var take = DEFAULT_ROW_COUNT;
for (var i = 0; i < Math.Min(take, df.Rows.Count); i++)
{
var cells = new List<IHtmlContent>();
cells.Add(td(i));
foreach (var obj in df.Rows[i])
{
cells.Add(td(obj));
}
rows.Add(cells);
}
var t = table(
thead(
headers),
tbody(
rows.Select(
r => tr(r))));
writer.Write(t);
}, "text/html");
```
### 3. Dataset and Transformations
#### Download Dataset
- Actual Dataset: [Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series)
- Transformed Dataset: [time_series_covid19_confirmed_global_transposed.csv](time_series_covid19_confirmed_global_transposed.csv)
I'll be using COVID-19 time series dataset from [Johns Hopkins CSSE](https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series) and will be performing predictions using **time_series_covid19_confirmed_global.csv** file.
The data present in these files have name of the countries as Rows and dates as columns which makes it difficult to map to our classes while loading data from csv. Also, it contains data per country wise. In order to keep things simple I'll work with global count of COVID-19 cases and not specific country.
I have done few transformations to the dataset as below and created transformed csv's
- Sum cases from all the countries for a specific date
- Just have two rows with Date and Total
- Applied transformation to the csv for converting Rows into Columns and vice-versa. [Refer](https://support.office.com/en-us/article/transpose-rotate-data-from-rows-to-columns-or-vice-versa-3419f2e3-beab-4318-aae5-d0f862209744) for transformation.
- Below transposed files have been saved in the current github directory. There is no change in dataset. The files have data till 05-27-2020
- [time_series_covid19_confirmed_global_transposed.csv](time_series_covid19_confirmed_global_transposed.csv) : Columns - **Date, TotalConfirmed**
##### Before transformation
<img src=".\assets\time-series-before-transformation.png" alt="Time Series data before transofmation" style="zoom: 80%;" />
#### After transformation
<img src=".\assets\time-series-after-transformation.png" alt="Time Series data after transofmation" style="zoom: 80%;" />
### 4. Data Classes
Now, we need to create few data structures to map to columns within our dataset.
#### Confirmed cases
```
/// <summary>
/// Represent data for confirmed cases with a mapping to columns in a dataset
/// </summary>
public class ConfirmedData
{
/// <summary>
/// Date of confirmed case
/// </summary>
[LoadColumn(0)]
public DateTime Date;
/// <summary>
/// Total no of confirmed cases on a particular date
/// </summary>
[LoadColumn(1)]
public float TotalConfirmed;
}
/// <summary>
/// Prediction/Forecast for Confirmed cases
/// </summary>
internal class ConfirmedForecast
{
/// <summary>
/// No of predicted confirmed cases for multiple days
/// </summary>
public float[] Forecast { get; set; }
}
```
### 5. Data Analysis
For loading data from csv, first we need to create MLContext that acts as a starting point for creating a machine learning model in ML.Net. Few things to note
- Set hasHeader as true as our dataset has header
- Add separatorChar to ',' as its a csv
#### Visualize Data - DataFrame
```
var predictedDf = DataFrame.LoadCsv(CONFIRMED_DATASET_FILE);
predictedDf.Head(DEFAULT_ROW_COUNT)
predictedDf.Tail(DEFAULT_ROW_COUNT)
predictedDf.Description()
```
##### Number of Confirmed cases over Time
```
// Number of confirmed cases over time
var totalConfirmedDateColumn = predictedDf.Columns[DATE_COLUMN];
var totalConfirmedColumn = predictedDf.Columns[TOTAL_CONFIRMED_COLUMN];
var dates = new List<string>();
var totalConfirmedCases = new List<string>();
for (int index = 0; index < totalConfirmedDateColumn.Length; index++)
{
dates.Add(totalConfirmedDateColumn[index].ToString());
totalConfirmedCases.Add(totalConfirmedColumn[index].ToString());
}
var title = "Number of Confirmed Cases over Time";
var confirmedTimeGraph = new Graph.Scattergl()
{
x = dates.ToArray(),
y = totalConfirmedCases.ToArray(),
mode = "lines+markers"
};
var chart = Chart.Plot(confirmedTimeGraph);
chart.WithTitle(title);
display(chart);
```
**Analysis**
- Duration: 1/22/2020 through 5/27/2020
- Total records: 127
- Case on first day: 555
- Case on last day: 5691790
- No of confirmed cases was low in the beginning, there was first jump around 2/12/2020 and an exponential jump around 3/22/2020.
- Cases have been increasing at an alarming rate in the past two months.
### 6. Load Data - MLContext
```
var context = new MLContext();
var data = context.Data.LoadFromTextFile<ConfirmedData>(CONFIRMED_DATASET_FILE, hasHeader: true, separatorChar: ',');
```
### 7. ML Pipeline
For creating ML Pipeline for a time-series analysis, we'll use [Single Spectrum Analysis](https://en.wikipedia.org/wiki/Singular_spectrum_analysis). ML.Net provides built in API for same, more details could be found at [TimeSeriesCatalog.ForecastBySsa](https://docs.microsoft.com/en-us/dotnet/api/microsoft.ml.timeseriescatalog.forecastbyssa?view=ml-dotnet)
```
var pipeline = context.Forecasting.ForecastBySsa(
nameof(ConfirmedForecast.Forecast),
nameof(ConfirmedData.TotalConfirmed),
WINDOW_SIZE,
SERIES_LENGTH,
TRAIN_SIZE,
HORIZON);
```
### 8. Train Model
We are ready with our pipeline and ready to train the model
```
var model = pipeline.Fit(data);
```
### 9. Prediction/Forecasting - 7 days
Our model is trained and we need to do prediction for next 7(Horizon) days.
Time-series provides its own engine for making prediction which is similar to PredictionEngine present in ML.Net. Predicted values show an increasing trend which is in alignment with recent past values.
```
var forecastingEngine = model.CreateTimeSeriesEngine<ConfirmedData, ConfirmedForecast>(context);
var forecasts = forecastingEngine.Predict();
display(forecasts.Forecast.Select(x => (int) x))
```
### 10. Prediction Visualization
```
var lastDate = DateTime.Parse(dates.LastOrDefault());
var predictionStartDate = lastDate.AddDays(1);
for (int index = 0; index < HORIZON; index++)
{
dates.Add(lastDate.AddDays(index + 1).ToShortDateString());
totalConfirmedCases.Add(forecasts.Forecast[index].ToString());
}
var title = "Number of Confirmed Cases over Time";
var layout = new Layout.Layout();
layout.shapes = new List<Graph.Shape>
{
new Graph.Shape
{
x0 = predictionStartDate.ToShortDateString(),
x1 = predictionStartDate.ToShortDateString(),
y0 = "0",
y1 = "1",
xref = 'x',
yref = "paper",
line = new Graph.Line() {color = "red", width = 2}
}
};
var chart1 = Chart.Plot(
new []
{
new Graph.Scattergl()
{
x = dates.ToArray(),
y = totalConfirmedCases.ToArray(),
mode = "lines+markers"
}
},
layout
);
chart1.WithTitle(title);
display(chart1);
```
### 11. Analysis
Comparing the plots before and after prediction, it seems our ML model has performed reasonably well. The red line represents the data on future date(5/8/2020). Beyond this, we predicted for 7 days. Looking at the plot, there is a sudden drop on 5/8/2020 which could be accounted due to insufficient data as we have only 127 records. However we see an increasing trend for next 7 days in alignment with previous confirmed cases. We can extend this model for predicting confirmed cases for any number of days by changing HORIZON constant value. This plot is helpful in analysing the increased number of cases and allow authorities to take precautionary measures to keep the numbers low.
## Conclusion
I hope you have enjoyed reading the notebook, and might have got some idea on the powerful framework ML.Net. ML.Net is a very fast emerging framework for .Net developers which abstracts lot of complexity present in the field of Data science and Machine Learning. The focus of Part-2 notebook is leverage ML.Net for making predictions using time-series API. The model generated can be saved as a zip file and used in different applications.
Feedback/Suggestion are welcome. Please reach out to me through below channels
**Contact**
**Email :** [email protected]
**LinkedIn :** https://in.linkedin.com/in/praveenraghuvanshi
**Github :** https://github.com/praveenraghuvanshi1512
**Twitter :** @praveenraghuvan
## References
- [Tutorial: Forecast bike rental service demand with time series analysis and ML.NET](https://docs.microsoft.com/en-us/dotnet/machine-learning/tutorials/time-series-demand-forecasting#evaluate-the-model)
- [Time Series Forecasting in ML.NET and Azure ML notebooks](https://github.com/gvashishtha/time-series-mlnet/blob/master/time-series-forecast.ipynb) by Gopal Vashishtha
# ******************** Be Safe **********************
|
github_jupyter
|
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/transformers/HuggingFace%20in%20Spark%20NLP%20-%20RoBertaForTokenClassification.ipynb)
## Import RoBertaForTokenClassification models from HuggingFace 🤗 into Spark NLP 🚀
Let's keep in mind a few things before we start 😊
- This feature is only in `Spark NLP 3.3.x` and after. So please make sure you have upgraded to the latest Spark NLP release
- You can import RoBERTa models trained/fine-tuned for token classification via `RobertaForTokenClassification` or `TFRobertaForTokenClassification`. These models are usually under `Token Classification` category and have `roberta` in their labels
- Reference: [TFRobertaForTokenClassification](https://huggingface.co/transformers/model_doc/roberta.html#tfrobertafortokenclassification)
- Some [example models](https://huggingface.co/models?filter=roberta&pipeline_tag=token-classification)
## Export and Save HuggingFace model
- Let's install `HuggingFace` and `TensorFlow`. You don't need `TensorFlow` to be installed for Spark NLP, however, we need it to load and save models from HuggingFace.
- We lock TensorFlow on `2.4.1` version and Transformers on `4.10.0`. This doesn't mean it won't work with the future releases, but we wanted you to know which versions have been tested successfully.
```
!pip install -q transformers==4.10.0 tensorflow==2.4.1
```
- HuggingFace comes with a native `saved_model` feature inside `save_pretrained` function for TensorFlow based models. We will use that to save it as TF `SavedModel`.
- We'll use [philschmid/distilroberta-base-ner-wikiann-conll2003-3-class](https://huggingface.co/philschmid/distilroberta-base-ner-wikiann-conll2003-3-class) model from HuggingFace as an example
- In addition to `TFRobertaForTokenClassification` we also need to save the `RobertaTokenizer`. This is the same for every model, these are assets needed for tokenization inside Spark NLP.
```
from transformers import TFRobertaForTokenClassification, RobertaTokenizer
MODEL_NAME = 'philschmid/distilroberta-base-ner-wikiann-conll2003-3-class'
tokenizer = RobertaTokenizer.from_pretrained(MODEL_NAME)
tokenizer.save_pretrained('./{}_tokenizer/'.format(MODEL_NAME))
# just in case if there is no TF/Keras file provided in the model
# we can just use `from_pt` and convert PyTorch to TensorFlow
try:
print('try downloading TF weights')
model = TFRobertaForTokenClassification.from_pretrained(MODEL_NAME)
except:
print('try downloading PyTorch weights')
model = TFRobertaForTokenClassification.from_pretrained(MODEL_NAME, from_pt=True)
model.save_pretrained("./{}".format(MODEL_NAME), saved_model=True)
```
Let's have a look inside these two directories and see what we are dealing with:
```
!ls -l {MODEL_NAME}
!ls -l {MODEL_NAME}/saved_model/1
!ls -l {MODEL_NAME}_tokenizer
```
- as you can see, we need the SavedModel from `saved_model/1/` path
- we also be needing `vocab.json` and `merges.txt` files from the tokenizer
- all we need is to first convert `vocab.json` to `vocab.txt` and copy both `vocab.txt` and `merges.txt` into `saved_model/1/assets` which Spark NLP will look for
- in addition to vocabs, we also need `labels` and their `ids` which is saved inside the model's config. We will save this inside `labels.txt`
```
asset_path = '{}/saved_model/1/assets'.format(MODEL_NAME)
# let's save the vocab as txt file
with open('{}_tokenizer/vocab.txt'.format(MODEL_NAME), 'w') as f:
for item in tokenizer.get_vocab().keys():
f.write("%s\n" % item)
# let's copy both vocab.txt and merges.txt files to saved_model/1/assets
!cp {MODEL_NAME}_tokenizer/vocab.txt {asset_path}
!cp {MODEL_NAME}_tokenizer/merges.txt {asset_path}
# get label2id dictionary
labels = model.config.label2id
# sort the dictionary based on the id
labels = sorted(labels, key=labels.get)
with open(asset_path+'/labels.txt', 'w') as f:
f.write('\n'.join(labels))
```
Voila! We have our `vocab.txt` and `labels.txt` inside assets directory
```
!ls -l {MODEL_NAME}/saved_model/1/assets
```
## Import and Save RobertaForTokenClassification in Spark NLP
- Let's install and setup Spark NLP in Google Colab
- This part is pretty easy via our simple script
```
! wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
```
Let's start Spark with Spark NLP included via our simple `start()` function
```
import sparknlp
# let's start Spark with Spark NLP
spark = sparknlp.start()
```
- Let's use `loadSavedModel` functon in `RoBertaForTokenClassification` which allows us to load TensorFlow model in SavedModel format
- Most params can be set later when you are loading this model in `RoBertaForTokenClassification` in runtime like `setMaxSentenceLength`, so don't worry what you are setting them now
- `loadSavedModel` accepts two params, first is the path to the TF SavedModel. The second is the SparkSession that is `spark` variable we previously started via `sparknlp.start()`
- NOTE: `loadSavedModel` only accepts local paths and not distributed file systems such as `HDFS`, `S3`, `DBFS`, etc. That is why we use `write.save` so we can use `.load()` from any file systems
```
from sparknlp.annotator import *
tokenClassifier = RoBertaForTokenClassification\
.loadSavedModel('{}/saved_model/1'.format(MODEL_NAME), spark)\
.setInputCols(["sentence",'token'])\
.setOutputCol("ner")\
.setCaseSensitive(True)\
.setMaxSentenceLength(128)
```
- Let's save it on disk so it is easier to be moved around and also be used later via `.load` function
```
tokenClassifier.write().overwrite().save("./{}_spark_nlp".format(MODEL_NAME))
```
Let's clean up stuff we don't need anymore
```
!rm -rf {MODEL_NAME}_tokenizer {MODEL_NAME}
```
Awesome 😎 !
This is your RoBertaForTokenClassification model from HuggingFace 🤗 loaded and saved by Spark NLP 🚀
```
! ls -l {MODEL_NAME}_spark_nlp
```
Now let's see how we can use it on other machines, clusters, or any place you wish to use your new and shiny RoBertaForTokenClassification model 😊
```
tokenClassifier_loaded = RoBertaForTokenClassification.load("./{}_spark_nlp".format(MODEL_NAME))\
.setInputCols(["sentence",'token'])\
.setOutputCol("ner")
tokenClassifier_loaded.getCaseSensitive()
```
That's it! You can now go wild and use hundreds of `RoBertaForTokenClassification` models from HuggingFace 🤗 in Spark NLP 🚀
|
github_jupyter
|
# Section 2.1 `xarray`, `az.InferenceData`, and NetCDF for Markov Chain Monte Carlo
_How do we generate, store, and save Markov chain Monte Carlo results_
```
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import arviz as az
import pystan
import xarray as xr
from IPython.display import Video
np.random.seed(0)
plt.style.use('arviz-white')
```
## Learning Objectives
* Understand Markov chain Monte Carlo fundamentals
* Recognize the meaning of sample, draws, and chains in MCMC context
* Understand relationship between Xarray, az.InferenceData, and NetCDF
* Gain profiency with Xarray, NetCDF, and az.InferenceData objects
## Markov Chain Monte Carlo
**Pop quiz**: Why do we use Markov chain Monte Carlo in Bayesian inference?
**Highlight for answer:** C<span style="color:white">alculating the posterior distribution is hard</span>!
**Example:** If a flight has cancellation rate $r$, alternate tickets cost you $c$, and these distributions are modelled by $p(r, c)$, then expected cost of insuring a flight is
$$
\text{risk} = \int_{r=0}^{1}\int_{c=0}^{\infty} r\cdot c~dp(r, c)
$$
This can be hard to calculate for any number of reasons! If, instead, we have samples
$$
\{r_j, c_j\}_{j=1}^N \sim p(r, c)
$$
then
$$
\text{risk} \approx \frac{1}{N}\sum_{j=1}^N r_j \cdot c_j
$$
In python code, this would just be
```
risk = np.dot(r, c) / N
```
## Markov Chain Monte Carlo algorithm (greatly simplified)
Step 1: Start at a random spot
Step 2: Propose a new spot, possibly based on the previous spot
Step 3: Accept or reject this proposal based on some mathematical book keeping
Step 4: If accepted, move to proposed spot, if rejected, stay where you are
Step 5: Write down where you're standing
Step 6: Go back to step 2
The accepted proposals are called draws (or samples).
When animated this algorithm looks like this:
```
Video("../../img/medium_steps.mp4")
```
In MCMC Step 2 and Step 4 is where most MCMC variants differentiate themselves. Algorithms like Hamiltonian Monte Carlo and Sequential Monte Carlo are better at picking that next step for certain tasks. Richard McElreath has a great visual explainer [on his blog]([http://elevanth.org/blog/2017/11/28/build-a-better-markov-chain/)
Chain: A Markov chain
Sample/Draw: A single element of that chain
Regardless of algorithm in MCMC we end up with the same thing, a chain of accepted proposals with a fixed size. There is a rich literature to show that these algorithms produce samples that are eventually distributed according to the distribution we care about.
## Markov chain Monte Carlo with Metropolis-Hastings
Below is a working Metropolis-Hastings sampler, taken from [Thomas Wiecki's blog](https://twiecki.io/blog/2015/11/10/mcmc-sampling/). For the purposes of this tutorial focus more on the return value than the algorithm details.
It is important to note that this for simplicity's sake we have also hard coded the likelihood and prior in the sampler below. In mathematical notation our model looks like this. We are adding 20 to the estimation of mu to make it easier to recognize the distribution of **parameters** from the distribution of **observed data**
$$
\mu \sim \mathcal{N}(0, 1) \\
y \sim \mathcal{N}(\mu+20, 1)
$$
```
def mh_sampler(data, samples=4, mu_init=.5):
mu_current = mu_init
posterior = []
prior_logpdf = stats.norm(0, 1).logpdf
for i in range(samples):
# suggest new position
mu_proposal = stats.norm(mu_current, 0.5).rvs()
# Compute likelihood by multiplying probabilities of each data point
likelihood_current = stats.norm(mu_current + 20, 1).logpdf(data).sum()
likelihood_proposal = stats.norm(mu_proposal + 20, 1).logpdf(data).sum()
# Compute prior probability of current and proposed mu
prior_current = prior_logpdf(mu_current)
prior_proposal = prior_logpdf(mu_proposal)
# log(p(x|θ) p(θ)) = log(p(x|θ)) + log(p(θ))
p_current = likelihood_current + prior_current
p_proposal = likelihood_proposal + prior_proposal
# Accept proposal?
p_accept = np.exp(p_proposal - p_current)
accept = np.random.rand() < p_accept
if accept:
# Update position
mu_current = mu_proposal
else:
# don't move
pass
posterior.append(mu_current)
return np.array(posterior)
```
## Setup
Before using the sampler let's generate some data to test our Metropolis Hasting Implementation. In the code block below we are generating a bimodal distribution for the sampler.
```
data = stats.norm.rvs(loc=30, scale=1, size=1000).flatten()
```
We'll also plot our samples to get a sense of what the distribution of data looks like. Note how the histogram centers around 30. This should intuitively make sense as we're specified a mean of 30 when generating random values.
```
fig, ax = plt.subplots()
ax.hist(data)
fig.suptitle("Histogram of observed data");
```
As humans we can intuit *data mean* of **30** + an offset of **20** will lead to a parameter mean for *mu* of **10**. We want to see if our inference algorithm can recover our parameters.
## Single Variable Single Chain Inference Run
The simplest MCMC run we can perform is with a single variable and a single chain. We'll do so by putting our sampler function and data to use.
```
samples = 200
chain = mh_sampler(data=data, samples=samples)
chain[:100]
```
And just like that we've performed an inference run! We can generate a traceplot
```
fig, ax = plt.subplots(figsize=(10, 7))
x = np.arange(samples)
ax.plot(x, chain);
```
In terms of data structures, for a **single** variable **single** chain inference run, an array suffices for storing samples.
## Single Variable Multiple Chain Inference Run
As Bayesian modelers, life would be relatively easy if a single chain worked well every time, but unfortunately this is not the case. To understand why look at the above inference run. While the sampler started at *mu=8*, it took a 50 or so steps before the sampler honed in on the "correct" value of 10.
MCMC algorithms are sensitive to their starting points and in finite runs it's **not** guaranteed that the Markov Chain will approach the true underlying distribution. A common method to get around this is to sample from many chains in parallel and see if we get to the same place. We will discuss this further when we get to single model diagnostics.
```
chain_0 = mh_sampler(data=data, samples=samples)
chain_1 = mh_sampler(data=data, samples=samples, mu_init=13)
data_df = pd.DataFrame({"x_0":chain_0, "x_1":chain_1})
fig, ax = plt.subplots()
x = np.arange(samples)
ax.plot(x, data_df["x_0"], c="g")
ax.plot(x, data_df["x_1"])
```
With two chains converging to approximately a single value we can be more confident that the sampler reached the true underlying parameter. We can also store the results in a 2D data structures, such as Pandas Dataframes in python memory, and csvs or sql tables for persistent on disk storage.
## Multiple Variable Multiple Chain Inference Runs
A Bayesian modelers, life would be relatively easy if all models only had one variable (univariate models in math speak). Unfortunately many types of models require 2 or more variables. For example in a linear regression we are interested in estimating both <b>m</b> and <b>b</b>:
$$ y \sim mx+b$$
With at least 3 things to track (chains, samples, and variables) a 2d data structures become limiting. This problem exists in many domains and is the focus of the *xarray* project.
A motivating example comes from climate sciences. In this image from the xarray documentation the researcher might want to measure the temperature and humidity, across a 2D region at a point in time. Or they may want to plot the temperature over a time interval. xarray simplifies the data handling in cases like these.

### Xarray
In ArviZ an xarray DataSet object would look like the one below, where the variables are the Inference run variables, and the coordinates are at a minimum chains, draws.
```
posterior = xr.Dataset(
{"mu": (["chain", "draw"], [[11,12,13],[22,23,24]]), "sd": (["chain", "draw"], [[33,34,35],[44,45,46]])},
coords={"draw": [1,2,3], "chain": [0,1]},
)
posterior
```
## Multiple Variable Multiple Chain Inference runs and associated datasets
As a Bayesian modelers, life would be relatively easy if we were only concerned about posterior distributions. Looking back at the full end to end workflow, recall that there are other datasets, such as prior predictive samples, posterior predictive samples, among others. To aid the ArviZ user we present `az.InferenceData`.
### az.InferenceData
az.InferenceData serves as a data container for the various xarray datasets that are generated from an end-to-end Bayesian workflow. Consider our earlier simple model, and this time let's use `stan` to run a full analysis with multiple chains, multiple runs, and generate all sorts of datasets common in Bayesian analysis.
### Calculating prior
```
stan_code_prior = """
data {
int<lower=1> N;
}
parameters {
real mu; // Estimated parameter
}
model {
mu ~ normal(0, 1);
}
generated quantities {
real y_hat[N]; // prior prediction
for (n in 1:N) {
y_hat[n] = normal_rng(mu+20, 1);
}
}
"""
stan_prior = pystan.StanModel(model_code=stan_code_prior)
stan_data_prior = {"N" : len(data)}
stan_fit_prior = stan_prior.sampling(data=stan_data_prior)
stan_code_posterior = """
data {
int N;
real y[N]; // Observed data
}
parameters {
real mu; // Estimated parameter
}
model {
mu ~ normal(0, 1);
y ~ normal(mu+20, 1);
}
generated quantities {
real y_hat[N]; // posterior prediction
real log_lik[N]; // log_likelihood
for (n in 1:N) {
// Stan normal functions https://mc-stan.org/docs/2_19/functions-reference/normal-distribution.html
y_hat[n] = normal_rng(mu, 1);
log_lik[n] = normal_lpdf(y[n] | mu, 1);
}
}
"""
stan_model_posterior = pystan.StanModel(model_code=stan_code_posterior)
stan_data_posterior = dict(
y=data,
N=len(data)
)
stan_fit_posterior = stan_model_posterior.sampling(data=stan_data_posterior)
stan_inference_data = az.from_pystan(posterior=stan_fit_posterior,
observed_data="y",
# Other Bayesian Datasets that we have not discussed yet!
posterior_predictive="y_hat",
prior=stan_fit_prior,
prior_predictive="y_hat",
log_likelihood="log_lik",
)
```
### NetCDF
Calculating the various datasets is usually not trivial. Network Common Data Form (NetCDF) is an open standard for storing multidimensional datasets, and `xarray` is a library for doing high performance analysis on those datasets. NetCDF even comes with "group" support, making it easy to serialize az.InferenceData straight to disk. ArviZ uses NetCDF to save the results to disk, allowing reproducible analyses, multiple experiments, and sharing with others.
ArviZ even ships with sample datasets, serialized in NetCDF
https://github.com/arviz-devs/arviz/tree/master/arviz/data/_datasets
In short: like SQL is to Pandas DataFrame, NetCDF is to az.InferenceData.
```
data = az.load_arviz_data("centered_eight")
data
```
## The benefits of az.InferenceData
One of the goals for the ArviZ developers is to ensure that Bayesian practioners can share and reproduce analyses regardless of PPl, regardless of language and az.InferenceData was the implementation of this idea.
In summary az.InferenceData
* provides a consistent format for Bayesian datasets.
* makes it easy to save results
* makes use of ArviZ plotting and statistics functions simpler
* stores metadata for ease of reproducibility
## InferenceData in practice
In practice it's rare to ever generate a xarray manually for use in ArviZ. Instead ArviZ provides methods for instantiating InferenceData from plain Python objects, mappings to various PPLs, as well as methods to save and load NetCDF files.
For further references consider the ArviZ cookbook, and data structure tutorial.
https://arviz-devs.github.io/arviz/notebooks/InferenceDataCookbook.html
https://arviz-devs.github.io/arviz/notebooks/XarrayforArviZ.html
## Examples
See below for some useful methods of interacting with az.InferenceData, Xarray, and NetCDF
For Xarray methods we only demo a subset of the available API. For a much more comprehensive explanation view the indexing and selection page from the xarray docs
http://xarray.pydata.org/en/stable/indexing.html
### Creating InferenceData objects
We can create an InferenceData objects from our "home built" chain, not just from the output of supported PPLs
```
data_dict = {"mu": [chain_0, chain_1]}
home_built_data = az.from_dict(data_dict)
home_built_data
# Load NetCDF from disk into memory
## Replace with NetCDF that's "visible"
data = az.load_arviz_data("centered_eight")
# Reference posterior directly
posterior = data.posterior
posterior
# Select specific variables
posterior[["mu", "tau"]]
# Select specific chains and draws
posterior.sel(chain=[0,2], draw=slice(0,5))
# Get first 10 samples of mu from chain 0
posterior["mu"].sel(chain=0, draw=slice(0,10)).values
```
## Extra Credit
* xarray supports numpy "ufuncs" (https://docs.scipy.org/doc/numpy/reference/ufuncs.html). ArviZ uses these under the hood for efficient calculations.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D1_ModelTypes/student/W1D1_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial 1: "What" models
**Week 1, Day 1: Model Types**
**By Neuromatch Academy**
__Content creators:__ Matt Laporte, Byron Galbraith, Konrad Kording
__Content reviewers:__ Dalin Guo, Aishwarya Balwani, Madineh Sarvestani, Maryam Vaziri-Pashkam, Michael Waskom
We would like to acknowledge [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x) for sharing their data, a subset of which is used here.
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
___
# Tutorial Objectives
This is tutorial 1 of a 3-part series on different flavors of models used to understand neural data. In this tutorial we will explore 'What' models, used to describe the data. To understand what our data looks like, we will visualize it in different ways. Then we will compare it to simple mathematical models. Specifically, we will:
- Load a dataset with spiking activity from hundreds of neurons and understand how it is organized
- Make plots to visualize characteristics of the spiking activity across the population
- Compute the distribution of "inter-spike intervals" (ISIs) for a single neuron
- Consider several formal models of this distribution's shape and fit them to the data "by hand"
```
# @title Video 1: "What" Models
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="KgqR_jbjMQg", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
# Setup
Python requires you to explictly "import" libraries before their functions are available to use. We will always specify our imports at the beginning of each notebook or script.
```
import numpy as np
import matplotlib.pyplot as plt
```
Tutorial notebooks typically begin with several set-up steps that are hidden from view by default.
**Important:** Even though the code is hidden, you still need to run it so that the rest of the notebook can work properly. Step through each cell, either by pressing the play button in the upper-left-hand corner or with a keyboard shortcut (`Cmd-Return` on a Mac, `Ctrl-Enter` otherwise). A number will appear inside the brackets (e.g. `[3]`) to tell you that the cell was executed and what order that happened in.
If you are curious to see what is going on inside each cell, you can double click to expand. Once expanded, double-click the white space to the right of the editor to collapse again.
```
#@title Figure Settings
import ipywidgets as widgets #interactive display
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper functions
#@markdown Most of the tutorials make use of helper functions
#@markdown to simplify the code that you need to write. They are defined here.
# Please don't edit these, or worry about understanding them now!
def restrict_spike_times(spike_times, interval):
"""Given a spike_time dataset, restrict to spikes within given interval.
Args:
spike_times (sequence of np.ndarray): List or array of arrays,
each inner array has spike times for a single neuron.
interval (tuple): Min, max time values; keep min <= t < max.
Returns:
np.ndarray: like `spike_times`, but only within `interval`
"""
interval_spike_times = []
for spikes in spike_times:
interval_mask = (spikes >= interval[0]) & (spikes < interval[1])
interval_spike_times.append(spikes[interval_mask])
return np.array(interval_spike_times, object)
#@title Data retrieval
#@markdown This cell downloads the example dataset that we will use in this tutorial.
import io
import requests
r = requests.get('https://osf.io/sy5xt/download')
if r.status_code != 200:
print('Failed to download data')
else:
spike_times = np.load(io.BytesIO(r.content), allow_pickle=True)['spike_times']
```
---
# Section 1: Exploring the Steinmetz dataset
In this tutorial we will explore the structure of a neuroscience dataset.
We consider a subset of data from a study of [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x). In this study, Neuropixels probes were implanted in the brains of mice. Electrical potentials were measured by hundreds of electrodes along the length of each probe. Each electrode's measurements captured local variations in the electric field due to nearby spiking neurons. A spike sorting algorithm was used to infer spike times and cluster spikes according to common origin: a single cluster of sorted spikes is causally attributed to a single neuron.
In particular, a single recording session of spike times and neuron assignments was loaded and assigned to `spike_times` in the preceding setup.
Typically a dataset comes with some information about its structure. However, this information may be incomplete. You might also apply some transformations or "pre-processing" to create a working representation of the data of interest, which might go partly undocumented depending on the circumstances. In any case it is important to be able to use the available tools to investigate unfamiliar aspects of a data structure.
Let's see what our data looks like...
## Section 1.1: Warming up with `spike_times`
What is the Python type of our variable?
```
type(spike_times)
```
You should see `numpy.ndarray`, which means that it's a normal NumPy array.
If you see an error message, it probably means that you did not execute the set-up cells at the top of the notebook. So go ahead and make sure to do that.
Once everything is running properly, we can ask the next question about the dataset: what's its shape?
```
spike_times.shape
```
There are 734 entries in one dimension, and no other dimensions. What is the Python type of the first entry, and what is *its* shape?
```
idx = 0
print(
type(spike_times[idx]),
spike_times[idx].shape,
sep="\n",
)
```
It's also a NumPy array with a 1D shape! Why didn't this show up as a second dimension in the shape of `spike_times`? That is, why not `spike_times.shape == (734, 826)`?
To investigate, let's check another entry.
```
idx = 321
print(
type(spike_times[idx]),
spike_times[idx].shape,
sep="\n",
)
```
It's also a 1D NumPy array, but it has a different shape. Checking the NumPy types of the values in these arrays, and their first few elements, we see they are composed of floating point numbers (not another level of `np.ndarray`):
```
i_neurons = [0, 321]
i_print = slice(0, 5)
for i in i_neurons:
print(
"Neuron {}:".format(i),
spike_times[i].dtype,
spike_times[i][i_print],
"\n",
sep="\n"
)
```
Note that this time we've checked the NumPy `dtype` rather than the Python variable type. These two arrays contain floating point numbers ("floats") with 32 bits of precision.
The basic picture is coming together:
- `spike_times` is 1D, its entries are NumPy arrays, and its length is the number of neurons (734): by indexing it, we select a subset of neurons.
- An array in `spike_times` is also 1D and corresponds to a single neuron; its entries are floating point numbers, and its length is the number of spikes attributed to that neuron. By indexing it, we select a subset of spike times for that neuron.
Visually, you can think of the data structure as looking something like this:
```
| . . . . . |
| . . . . . . . . |
| . . . |
| . . . . . . . |
```
Before moving on, we'll calculate and store the number of neurons in the dataset and the number of spikes per neuron:
```
n_neurons = len(spike_times)
total_spikes_per_neuron = [len(spike_times_i) for spike_times_i in spike_times]
print(f"Number of neurons: {n_neurons}")
print(f"Number of spikes for first five neurons: {total_spikes_per_neuron[:5]}")
# @title Video 2: Exploring the dataset
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="oHwYWUI_o1U", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
## Section 1.2: Getting warmer: counting and plotting total spike counts
As we've seen, the number of spikes over the entire recording is variable between neurons. More generally, some neurons tend to spike more than others in a given period. Lets explore what the distribution of spiking looks like across all the neurons in the dataset.
Are most neurons "loud" or "quiet", compared to the average? To see, we'll define bins of constant width in terms of total spikes and count the neurons that fall in each bin. This is known as a "histogram".
You can plot a histogram with the matplotlib function `plt.hist`. If you just need to compute it, you can use the numpy function `np.histogram` instead.
```
plt.hist(total_spikes_per_neuron, bins=50, histtype="stepfilled")
plt.xlabel("Total spikes per neuron")
plt.ylabel("Number of neurons");
```
Let's see what percentage of neurons have a below-average spike count:
```
mean_spike_count = np.mean(total_spikes_per_neuron)
frac_below_mean = (total_spikes_per_neuron < mean_spike_count).mean()
print(f"{frac_below_mean:2.1%} of neurons are below the mean")
```
We can also see this by adding the average spike count to the histogram plot:
```
plt.hist(total_spikes_per_neuron, bins=50, histtype="stepfilled")
plt.xlabel("Total spikes per neuron")
plt.ylabel("Number of neurons")
plt.axvline(mean_spike_count, color="orange", label="Mean neuron")
plt.legend();
```
This shows that the majority of neurons are relatively "quiet" compared to the mean, while a small number of neurons are exceptionally "loud": they must have spiked more often to reach a large count.
### Exercise 1: Comparing mean and median neurons
If the mean neuron is more active than 68% of the population, what does that imply about the relationship between the mean neuron and the median neuron?
*Exercise objective:* Reproduce the plot above, but add the median neuron.
```
# To complete the exercise, fill in the missing parts (...) and uncomment the code
median_spike_count = ... # Hint: Try the function np.median
# plt.hist(..., bins=50, histtype="stepfilled")
# plt.axvline(..., color="limegreen", label="Median neuron")
# plt.axvline(mean_spike_count, color="orange", label="Mean neuron")
# plt.xlabel("Total spikes per neuron")
# plt.ylabel("Number of neurons")
# plt.legend()
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D1_ModelTypes/solutions/W1D1_Tutorial1_Solution_b3411d5d.py)
*Example output:*
<img alt='Solution hint' align='left' width=558 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1_ModelTypes/static/W1D1_Tutorial1_Solution_b3411d5d_0.png>
*Bonus:* The median is the 50th percentile. What about other percentiles? Can you show the interquartile range on the histogram?
---
# Section 2: Visualizing neuronal spiking activity
## Section 2.1: Getting a subset of the data
Now we'll visualize trains of spikes. Because the recordings are long, we will first define a short time interval and restrict the visualization to only the spikes in this interval. We defined a utility function, `restrict_spike_times`, to do this for you. If you call `help()` on the function, it will tell you a little bit about itself:
```
help(restrict_spike_times)
t_interval = (5, 15) # units are seconds after start of recording
interval_spike_times = restrict_spike_times(spike_times, t_interval)
```
Is this a representative interval? What fraction of the total spikes fall in this interval?
```
original_counts = sum([len(spikes) for spikes in spike_times])
interval_counts = sum([len(spikes) for spikes in interval_spike_times])
frac_interval_spikes = interval_counts / original_counts
print(f"{frac_interval_spikes:.2%} of the total spikes are in the interval")
```
How does this compare to the ratio between the interval duration and the experiment duration? (What fraction of the total time is in this interval?)
We can approximate the experiment duration by taking the minimum and maximum spike time in the whole dataset. To do that, we "concatenate" all of the neurons into one array and then use `np.ptp` ("peak-to-peak") to get the difference between the maximum and minimum value:
```
spike_times_flat = np.concatenate(spike_times)
experiment_duration = np.ptp(spike_times_flat)
interval_duration = t_interval[1] - t_interval[0]
frac_interval_time = interval_duration / experiment_duration
print(f"{frac_interval_time:.2%} of the total time is in the interval")
```
These two values—the fraction of total spikes and the fraction of total time—are similar. This suggests the average spike rate of the neuronal population is not very different in this interval compared to the entire recording.
## Section 2.2: Plotting spike trains and rasters
Now that we have a representative subset, we're ready to plot the spikes, using the matplotlib `plt.eventplot` function. Let's look at a single neuron first:
```
neuron_idx = 1
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
```
We can also plot multiple neurons. Here are three:
```
neuron_idx = [1, 11, 51]
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
```
This makes a "raster" plot, where the spikes from each neuron appear in a different row.
Plotting a large number of neurons can give you a sense for the characteristics in the population. Let's show every 5th neuron that was recorded:
```
neuron_idx = np.arange(0, len(spike_times), 5)
plt.eventplot(interval_spike_times[neuron_idx], color=".2")
plt.xlabel("Time (s)")
plt.yticks([]);
```
*Question*: How does the information in this plot relate to the histogram of total spike counts that you saw above?
```
# @title Video 3: Visualizing activity
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="QGA5FCW7kkA", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
---
# Section 3: Inter-spike intervals and their distributions
Given the ordered arrays of spike times for each neuron in `spike_times`, which we've just visualized, what can we ask next?
Scientific questions are informed by existing models. So, what knowledge do we already have that can inform questions about this data?
We know that there are physical constraints on neuron spiking. Spiking costs energy, which the neuron's cellular machinery can only obtain at a finite rate. Therefore neurons should have a refractory period: they can only fire as quickly as their metabolic processes can support, and there is a minimum delay between consecutive spikes of the same neuron.
More generally, we can ask "how long does a neuron wait to spike again?" or "what is the longest a neuron will wait?" Can we transform spike times into something else, to address questions like these more directly?
We can consider the inter-spike times (or interspike intervals: ISIs). These are simply the time differences between consecutive spikes of the same neuron.
### Exercise 2: Plot the distribution of ISIs for a single neuron
*Exercise objective:* make a histogram, like we did for spike counts, to show the distribution of ISIs for one of the neurons in the dataset.
Do this in three steps:
1. Extract the spike times for one of the neurons
2. Compute the ISIs (the amount of time between spikes, or equivalently, the difference between adjacent spike times)
3. Plot a histogram with the array of individual ISIs
```
def compute_single_neuron_isis(spike_times, neuron_idx):
"""Compute a vector of ISIs for a single neuron given spike times.
Args:
spike_times (list of 1D arrays): Spike time dataset, with the first
dimension corresponding to different neurons.
neuron_idx (int): Index of the unit to compute ISIs for.
Returns:
isis (1D array): Duration of time between each spike from one neuron.
"""
#############################################################################
# Students: Fill in missing code (...) and comment or remove the next line
raise NotImplementedError("Exercise: compute single neuron ISIs")
#############################################################################
# Extract the spike times for the specified neuron
single_neuron_spikes = ...
# Compute the ISIs for this set of spikes
# Hint: the function np.diff computes discrete differences along an array
isis = ...
return isis
# Uncomment the following lines when you are ready to test your function
# single_neuron_isis = compute_single_neuron_isis(spike_times, neuron_idx=283)
# plt.hist(single_neuron_isis, bins=50, histtype="stepfilled")
# plt.axvline(single_neuron_isis.mean(), color="orange", label="Mean ISI")
# plt.xlabel("ISI duration (s)")
# plt.ylabel("Number of spikes")
# plt.legend()
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D1_ModelTypes/solutions/W1D1_Tutorial1_Solution_4792dbfa.py)
*Example output:*
<img alt='Solution hint' align='left' width=558 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D1_ModelTypes/static/W1D1_Tutorial1_Solution_4792dbfa_0.png>
---
In general, the shorter ISIs are predominant, with counts decreasing rapidly (and smoothly, more or less) with increasing ISI. However, counts also rapidly decrease to zero with _decreasing_ ISI, below the maximum of the distribution (8-11 ms). The absence of these very low ISIs agrees with the refractory period hypothesis: the neuron cannot fire quickly enough to populate this region of the ISI distribution.
Check the distributions of some other neurons. To resolve various features of the distributions, you might need to play with the value of `n_bins`. Using too few bins might smooth over interesting details, but if you use too many bins, the random variability will start to dominate.
You might also want to restrict the range to see the shape of the distribution when focusing on relatively short or long ISIs. *Hint:* `plt.hist` takes a `range` argument
---
# Section 4: What is the functional form of an ISI distribution?
```
# @title Video 4: ISI distribution
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="DHhM80MOTe8", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
The ISI histograms seem to follow continuous, monotonically decreasing functions above their maxima. The function is clearly non-linear. Could it belong to a single family of functions?
To motivate the idea of using a mathematical function to explain physiological phenomena, let's define a few different function forms that we might expect the relationship to follow: exponential, inverse, and linear.
```
def exponential(xs, scale, rate, x0):
"""A simple parametrized exponential function, applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
scale (float): Linear scaling factor.
rate (float): Exponential growth (positive) or decay (negative) rate.
x0 (float): Horizontal offset.
"""
ys = scale * np.exp(rate * (xs - x0))
return ys
def inverse(xs, scale, x0):
"""A simple parametrized inverse function (`1/x`), applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
scale (float): Linear scaling factor.
x0 (float): Horizontal offset.
"""
ys = scale / (xs - x0)
return ys
def linear(xs, slope, y0):
"""A simple linear function, applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
slope (float): Slope of the line.
y0 (float): y-intercept of the line.
"""
ys = slope * xs + y0
return ys
```
### Interactive Demo: ISI functions explorer
Here is an interactive demo where you can vary the parameters of these functions and see how well the resulting outputs correspond to the data. Adjust the parameters by moving the sliders and see how close you can get the lines to follow the falling curve of the histogram. This will give you a taste of what you're trying to do when you *fit a model* to data.
"Interactive demo" cells have hidden code that defines an interface where you can play with the parameters of some function using sliders. You don't need to worry about how the code works – but you do need to **run the cell** to enable the sliders.
```
#@title
#@markdown Be sure to run this cell to enable the demo
# Don't worry about understanding this code! It's to setup an interactive plot.
single_neuron_idx = 283
single_neuron_spikes = spike_times[single_neuron_idx]
single_neuron_isis = np.diff(single_neuron_spikes)
counts, edges = np.histogram(
single_neuron_isis,
bins=50,
range=(0, single_neuron_isis.max())
)
functions = dict(
exponential=exponential,
inverse=inverse,
linear=linear,
)
colors = dict(
exponential="C1",
inverse="C2",
linear="C4",
)
@widgets.interact(
exp_scale=widgets.FloatSlider(1000, min=0, max=20000, step=250),
exp_rate=widgets.FloatSlider(-10, min=-200, max=50, step=1),
exp_x0=widgets.FloatSlider(0.1, min=-0.5, max=0.5, step=0.005),
inv_scale=widgets.FloatSlider(1000, min=0, max=3e2, step=10),
inv_x0=widgets.FloatSlider(0, min=-0.2, max=0.2, step=0.01),
lin_slope=widgets.FloatSlider(-1e5, min=-6e5, max=1e5, step=10000),
lin_y0=widgets.FloatSlider(10000, min=0, max=4e4, step=1000),
)
def fit_plot(
exp_scale=1000, exp_rate=-10, exp_x0=0.1,
inv_scale=1000, inv_x0=0,
lin_slope=-1e5, lin_y0=2000,
):
"""Helper function for plotting function fits with interactive sliders."""
func_params = dict(
exponential=(exp_scale, exp_rate, exp_x0),
inverse=(inv_scale, inv_x0),
linear=(lin_slope, lin_y0),
)
f, ax = plt.subplots()
ax.fill_between(edges[:-1], counts, step="post", alpha=.5)
xs = np.linspace(1e-10, edges.max())
for name, function in functions.items():
ys = function(xs, *func_params[name])
ax.plot(xs, ys, lw=3, color=colors[name], label=name);
ax.set(
xlim=(edges.min(), edges.max()),
ylim=(0, counts.max() * 1.1),
xlabel="ISI (s)",
ylabel="Number of spikes",
)
ax.legend()
# @title Video 5: Fitting models by hand
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="uW2HDk_4-wk", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
# Summary
In this tutorial, we loaded some neural data and poked at it to understand how the dataset is organized. Then we made some basic plots to visualize (1) the average level of activity across the population and (2) the distribution of ISIs for an individual neuron. In the very last bit, we started to think about using mathematical formalisms to understand or explain some physiological phenomenon. All of this only allowed us to understand "What" the data looks like.
This is the first step towards developing models that can tell us something about the brain. That's what we'll focus on in the next two tutorials.
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.