code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import netCDF4 as nc
from matplotlib import pyplot as plt
import numpy as np
import glob
import pickle
from salishsea_tools import evaltools as et, places
import datetime as dt
import os
import re
import cmocean
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import matplotlib as mpl
mpl.rc('xtick', labelsize=8)
mpl.rc('ytick', labelsize=8)
mpl.rc('legend', fontsize=8)
mpl.rc('axes', titlesize=8)
mpl.rc('axes', labelsize=8)
mpl.rc('figure', titlesize=8)
mpl.rc('font', size=8)
mpl.rc('text', usetex=True)
mpl.rc('text.latex', preamble = r'''
\usepackage{txfonts}
\usepackage{lmodern}
''')
mpl.rc('font', family='sans-serif', weight='normal', style='normal')
%matplotlib inline
with nc.Dataset('/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc') as mesh:
tmask=mesh.variables['tmask'][0,:,:,:]
e1t=np.expand_dims(mesh.variables['e1t'][:,:,:],1)
e2t=np.expand_dims(mesh.variables['e2t'][:,:,:],1)
zs=mesh.variables['gdept_1d'][0,:]
SOGtmaskPath='/ocean/eolson/MEOPAR/northernNO3PaperCalcs/save/SOGtmask.pkl'
(tmaskSOG,_,_,_,_)=pickle.load(open(SOGtmaskPath,'rb'))
np.shape(tmaskSOG)
idir0='/results/SalishSea/nowcast-green.201812/'
idir1='/data/eolson/results/MEOPAR/SS36runs/GrahamRuns/fluxesCorr/'
ts=dt.datetime(2015,1,1)
te=dt.datetime(2015,12,31)
plist=['Sentry Shoal','S3','Central node','Central SJDF']
ivars=['nitrate','diatoms','flagellates','ciliates']
#flist0=et.index_model_files_flex(idir0,'ptrc_T','1d','nowcast',ts,te)
flist0=et.index_model_files(ts,te,idir0,'nowcast',1,'ptrc_T',24)
flist1=et.index_model_files_flex(idir1,'ptrc_T','1d','long',ts,te)
len(flist0)
ts0=dict()
for pl in plist:
ts0[pl]=dict()
for ivar in ivars:
ts0[pl][ivar]=list()
ts0['SOGMean']=dict()
ts0['SOGSTE']=dict()
for ivar in ivars:
ts0['SOGMean'][ivar]=list()
ts0['SOGSTE'][ivar]=list()
for i,r in flist0.iterrows():
with nc.Dataset(r['paths']) as f:
tmaskSOG2=tmaskSOG*np.ones(np.shape(f.variables['nitrate'][:])) # broadcast
for pl in plist:
jj,ii=places.PLACES[pl]['NEMO grid ji']
for ivar in ivars:
ts0[pl][ivar].append(f.variables[ivar][:,:,jj,ii])
for ivar in ivars:
ts0['SOGMean'][ivar].append(np.mean(np.ma.masked_where(tmaskSOG2==0,
f.variables[ivar][:,:,:,:]),tuple((2,3))))
ts0['SOGSTE'][ivar].append(np.std(np.ma.masked_where(tmaskSOG2==0,f.variables[ivar][:,:,:,:]),tuple((2,3)))/\
np.sqrt(np.sum(tmaskSOG2,tuple((2,3)))))
for pl in plist:
for ivar in ivars:
ts0[pl][ivar]=np.concatenate(ts0[pl][ivar],axis=0)
for ivar in ivars:
ts0['SOGMean'][ivar]=np.concatenate(ts0['SOGMean'][ivar],axis=0)
ts0['SOGSTE'][ivar]=np.concatenate(ts0['SOGSTE'][ivar],axis=0)
dates0=np.array([ts+dt.timedelta(days=ii) for ii in range(0,len(ts0['S3']['nitrate']))])
ts1=dict()
for pl in plist:
ts1[pl]=dict()
for ivar in ivars:
ts1[pl][ivar]=list()
ts1['SOGMean']=dict()
ts1['SOGSTE']=dict()
for ivar in ivars:
ts1['SOGMean'][ivar]=list()
ts1['SOGSTE'][ivar]=list()
for i,r in flist1.iterrows():
with nc.Dataset(r['paths']) as f:
tmaskSOG2=tmaskSOG*np.ones(np.shape(f.variables['nitrate'][:])) # broadcast
for pl in plist:
jj,ii=places.PLACES[pl]['NEMO grid ji']
for ivar in ivars:
ts1[pl][ivar].append(f.variables[ivar][:,:,jj,ii])
for ivar in ivars:
ts1['SOGMean'][ivar].append(np.mean(np.ma.masked_where(tmaskSOG2==0,
f.variables[ivar][:,:,:,:]),tuple((2,3))))
ts1['SOGSTE'][ivar].append(np.std(np.ma.masked_where(tmaskSOG2==0,f.variables[ivar][:,:,:,:]),tuple((2,3)))/\
np.sqrt(np.sum(tmaskSOG2,tuple((2,3)))))
for pl in plist:
for ivar in ivars:
ts1[pl][ivar]=np.concatenate(ts1[pl][ivar],axis=0)
for ivar in ivars:
ts1['SOGMean'][ivar]=np.concatenate(ts1['SOGMean'][ivar],axis=0)
ts1['SOGSTE'][ivar]=np.concatenate(ts1['SOGSTE'][ivar],axis=0)
np.shape(ts1['SOGMean']['nitrate'])
dates1=np.array([ts+dt.timedelta(days=ii) for ii in range(0,len(ts1['S3']['nitrate'][:,0]))])
np.shape(ts0['S3']['nitrate']),np.shape(ts1['S3']['nitrate'])
zs[0],zs[10],zs[26]
fig,ax=plt.subplots(3,1,figsize=(6,4))
fig.subplots_adjust(hspace=.6,bottom = 0.18,top=.95)
p1,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,0],'k-',label='_nolegend_')
p2,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,0],'r--',label='_nolegend_')
p3,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,10],'b-',label='_nolegend_')
p4,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,10],'c--',label='_nolegend_')
p5,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,26],'g-',label='_nolegend_')
p6,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,26],'y--',label='_nolegend_')
ax[0].set_title('S3')
ax[0].set_ylabel('Nitrate\n($\muup$M N)')
p1,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,0],'k-',label='_nolegend_')
p2,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,0],'r--',label='_nolegend_')
p3,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,10],'b-',label='_nolegend_')
p4,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,10],'c--',label='_nolegend_')
p5,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,26],'g-',label='_nolegend_')
p6,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,26],'y--',label='_nolegend_')
ax[1].set_title('Sentry Shoal')
ax[1].set_ylabel('Nitrate\n($\muup$M N)')
p1,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,0],'k-',label='Surface, original')
p2,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,0],'r--',label='Surface, corrected')
p3,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,10],'b-',label='10 m, original')
p4,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,10],'c--',label='10 m, corrected')
p5,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,26],'g-',label='100 m, original')
p6,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,26],'y--',label='100 m, corrected')
ax[2].set_title('Strait of Georgia Mean')
ax[2].set_ylabel('Nitrate\n($\muup$M N)')
fig.legend(loc=8,ncol=3,bbox_to_anchor=(.5, 0))
for iax in (ax[0],ax[1],ax[2]):
iax.set_xlim((dt.datetime(2015,1,1),dt.datetime(2016,1,1)))
fig.savefig('compFluxesCorrNO3.eps',dpi=200)
fig,ax=plt.subplots(3,1,figsize=(6,4))
fig.subplots_adjust(hspace=.6,bottom = 0.18,top=.95)
p1,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,0],'k-',label='_nolegend_')
p2,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,0],'r--',label='_nolegend_')
p3,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,10],'b-',label='_nolegend_')
p4,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,10],'c--',label='_nolegend_')
p5,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,26],'g-',label='_nolegend_')
p6,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,26],'y--',label='_nolegend_')
ax[0].set_title('S3')
ax[0].set_ylabel('Diatoms\n($\muup$M N)')
p1,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,0],'k-',label='_nolegend_')
p2,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,0],'r--',label='_nolegend_')
p3,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,10],'b-',label='_nolegend_')
p4,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,10],'c--',label='_nolegend_')
p5,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,26],'g-',label='_nolegend_')
p6,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,26],'y--',label='_nolegend_')
ax[1].set_title('Sentry Shoal')
ax[1].set_ylabel('Diatoms\n($\muup$M N)')
p1,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,0],'k-',label='Surface, original')
p2,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,0],'r--',label='Surface, corrected')
p3,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,10],'b-',label='10 m, original')
p4,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,10],'c--',label='10 m, corrected')
p5,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,26],'g-',label='100 m, original')
p6,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,26],'y--',label='100 m, corrected')
ax[2].set_title('Strait of Georgia Mean')
ax[2].set_ylabel('Diatoms\n($\muup$M N)')
fig.legend(loc=8,ncol=3,bbox_to_anchor=(.5, 0))
for iax in (ax[0],ax[1],ax[2]):
iax.set_xlim((dt.datetime(2015,1,1),dt.datetime(2016,1,1)))
fig.savefig('compFluxesCorrDIAT.eps',dpi=200)
```
|
github_jupyter
|
import netCDF4 as nc
from matplotlib import pyplot as plt
import numpy as np
import glob
import pickle
from salishsea_tools import evaltools as et, places
import datetime as dt
import os
import re
import cmocean
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import matplotlib as mpl
mpl.rc('xtick', labelsize=8)
mpl.rc('ytick', labelsize=8)
mpl.rc('legend', fontsize=8)
mpl.rc('axes', titlesize=8)
mpl.rc('axes', labelsize=8)
mpl.rc('figure', titlesize=8)
mpl.rc('font', size=8)
mpl.rc('text', usetex=True)
mpl.rc('text.latex', preamble = r'''
\usepackage{txfonts}
\usepackage{lmodern}
''')
mpl.rc('font', family='sans-serif', weight='normal', style='normal')
%matplotlib inline
with nc.Dataset('/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc') as mesh:
tmask=mesh.variables['tmask'][0,:,:,:]
e1t=np.expand_dims(mesh.variables['e1t'][:,:,:],1)
e2t=np.expand_dims(mesh.variables['e2t'][:,:,:],1)
zs=mesh.variables['gdept_1d'][0,:]
SOGtmaskPath='/ocean/eolson/MEOPAR/northernNO3PaperCalcs/save/SOGtmask.pkl'
(tmaskSOG,_,_,_,_)=pickle.load(open(SOGtmaskPath,'rb'))
np.shape(tmaskSOG)
idir0='/results/SalishSea/nowcast-green.201812/'
idir1='/data/eolson/results/MEOPAR/SS36runs/GrahamRuns/fluxesCorr/'
ts=dt.datetime(2015,1,1)
te=dt.datetime(2015,12,31)
plist=['Sentry Shoal','S3','Central node','Central SJDF']
ivars=['nitrate','diatoms','flagellates','ciliates']
#flist0=et.index_model_files_flex(idir0,'ptrc_T','1d','nowcast',ts,te)
flist0=et.index_model_files(ts,te,idir0,'nowcast',1,'ptrc_T',24)
flist1=et.index_model_files_flex(idir1,'ptrc_T','1d','long',ts,te)
len(flist0)
ts0=dict()
for pl in plist:
ts0[pl]=dict()
for ivar in ivars:
ts0[pl][ivar]=list()
ts0['SOGMean']=dict()
ts0['SOGSTE']=dict()
for ivar in ivars:
ts0['SOGMean'][ivar]=list()
ts0['SOGSTE'][ivar]=list()
for i,r in flist0.iterrows():
with nc.Dataset(r['paths']) as f:
tmaskSOG2=tmaskSOG*np.ones(np.shape(f.variables['nitrate'][:])) # broadcast
for pl in plist:
jj,ii=places.PLACES[pl]['NEMO grid ji']
for ivar in ivars:
ts0[pl][ivar].append(f.variables[ivar][:,:,jj,ii])
for ivar in ivars:
ts0['SOGMean'][ivar].append(np.mean(np.ma.masked_where(tmaskSOG2==0,
f.variables[ivar][:,:,:,:]),tuple((2,3))))
ts0['SOGSTE'][ivar].append(np.std(np.ma.masked_where(tmaskSOG2==0,f.variables[ivar][:,:,:,:]),tuple((2,3)))/\
np.sqrt(np.sum(tmaskSOG2,tuple((2,3)))))
for pl in plist:
for ivar in ivars:
ts0[pl][ivar]=np.concatenate(ts0[pl][ivar],axis=0)
for ivar in ivars:
ts0['SOGMean'][ivar]=np.concatenate(ts0['SOGMean'][ivar],axis=0)
ts0['SOGSTE'][ivar]=np.concatenate(ts0['SOGSTE'][ivar],axis=0)
dates0=np.array([ts+dt.timedelta(days=ii) for ii in range(0,len(ts0['S3']['nitrate']))])
ts1=dict()
for pl in plist:
ts1[pl]=dict()
for ivar in ivars:
ts1[pl][ivar]=list()
ts1['SOGMean']=dict()
ts1['SOGSTE']=dict()
for ivar in ivars:
ts1['SOGMean'][ivar]=list()
ts1['SOGSTE'][ivar]=list()
for i,r in flist1.iterrows():
with nc.Dataset(r['paths']) as f:
tmaskSOG2=tmaskSOG*np.ones(np.shape(f.variables['nitrate'][:])) # broadcast
for pl in plist:
jj,ii=places.PLACES[pl]['NEMO grid ji']
for ivar in ivars:
ts1[pl][ivar].append(f.variables[ivar][:,:,jj,ii])
for ivar in ivars:
ts1['SOGMean'][ivar].append(np.mean(np.ma.masked_where(tmaskSOG2==0,
f.variables[ivar][:,:,:,:]),tuple((2,3))))
ts1['SOGSTE'][ivar].append(np.std(np.ma.masked_where(tmaskSOG2==0,f.variables[ivar][:,:,:,:]),tuple((2,3)))/\
np.sqrt(np.sum(tmaskSOG2,tuple((2,3)))))
for pl in plist:
for ivar in ivars:
ts1[pl][ivar]=np.concatenate(ts1[pl][ivar],axis=0)
for ivar in ivars:
ts1['SOGMean'][ivar]=np.concatenate(ts1['SOGMean'][ivar],axis=0)
ts1['SOGSTE'][ivar]=np.concatenate(ts1['SOGSTE'][ivar],axis=0)
np.shape(ts1['SOGMean']['nitrate'])
dates1=np.array([ts+dt.timedelta(days=ii) for ii in range(0,len(ts1['S3']['nitrate'][:,0]))])
np.shape(ts0['S3']['nitrate']),np.shape(ts1['S3']['nitrate'])
zs[0],zs[10],zs[26]
fig,ax=plt.subplots(3,1,figsize=(6,4))
fig.subplots_adjust(hspace=.6,bottom = 0.18,top=.95)
p1,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,0],'k-',label='_nolegend_')
p2,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,0],'r--',label='_nolegend_')
p3,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,10],'b-',label='_nolegend_')
p4,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,10],'c--',label='_nolegend_')
p5,=ax[0].plot(dates0,ts0['S3']['nitrate'][:,26],'g-',label='_nolegend_')
p6,=ax[0].plot(dates1,ts1['S3']['nitrate'][:,26],'y--',label='_nolegend_')
ax[0].set_title('S3')
ax[0].set_ylabel('Nitrate\n($\muup$M N)')
p1,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,0],'k-',label='_nolegend_')
p2,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,0],'r--',label='_nolegend_')
p3,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,10],'b-',label='_nolegend_')
p4,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,10],'c--',label='_nolegend_')
p5,=ax[1].plot(dates0,ts0['Sentry Shoal']['nitrate'][:,26],'g-',label='_nolegend_')
p6,=ax[1].plot(dates1,ts1['Sentry Shoal']['nitrate'][:,26],'y--',label='_nolegend_')
ax[1].set_title('Sentry Shoal')
ax[1].set_ylabel('Nitrate\n($\muup$M N)')
p1,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,0],'k-',label='Surface, original')
p2,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,0],'r--',label='Surface, corrected')
p3,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,10],'b-',label='10 m, original')
p4,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,10],'c--',label='10 m, corrected')
p5,=ax[2].plot(dates0,ts0['SOGMean']['nitrate'][:,26],'g-',label='100 m, original')
p6,=ax[2].plot(dates1,ts1['SOGMean']['nitrate'][:,26],'y--',label='100 m, corrected')
ax[2].set_title('Strait of Georgia Mean')
ax[2].set_ylabel('Nitrate\n($\muup$M N)')
fig.legend(loc=8,ncol=3,bbox_to_anchor=(.5, 0))
for iax in (ax[0],ax[1],ax[2]):
iax.set_xlim((dt.datetime(2015,1,1),dt.datetime(2016,1,1)))
fig.savefig('compFluxesCorrNO3.eps',dpi=200)
fig,ax=plt.subplots(3,1,figsize=(6,4))
fig.subplots_adjust(hspace=.6,bottom = 0.18,top=.95)
p1,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,0],'k-',label='_nolegend_')
p2,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,0],'r--',label='_nolegend_')
p3,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,10],'b-',label='_nolegend_')
p4,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,10],'c--',label='_nolegend_')
p5,=ax[0].plot(dates0,ts0['S3']['diatoms'][:,26],'g-',label='_nolegend_')
p6,=ax[0].plot(dates1,ts1['S3']['diatoms'][:,26],'y--',label='_nolegend_')
ax[0].set_title('S3')
ax[0].set_ylabel('Diatoms\n($\muup$M N)')
p1,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,0],'k-',label='_nolegend_')
p2,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,0],'r--',label='_nolegend_')
p3,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,10],'b-',label='_nolegend_')
p4,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,10],'c--',label='_nolegend_')
p5,=ax[1].plot(dates0,ts0['Sentry Shoal']['diatoms'][:,26],'g-',label='_nolegend_')
p6,=ax[1].plot(dates1,ts1['Sentry Shoal']['diatoms'][:,26],'y--',label='_nolegend_')
ax[1].set_title('Sentry Shoal')
ax[1].set_ylabel('Diatoms\n($\muup$M N)')
p1,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,0],'k-',label='Surface, original')
p2,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,0],'r--',label='Surface, corrected')
p3,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,10],'b-',label='10 m, original')
p4,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,10],'c--',label='10 m, corrected')
p5,=ax[2].plot(dates0,ts0['SOGMean']['diatoms'][:,26],'g-',label='100 m, original')
p6,=ax[2].plot(dates1,ts1['SOGMean']['diatoms'][:,26],'y--',label='100 m, corrected')
ax[2].set_title('Strait of Georgia Mean')
ax[2].set_ylabel('Diatoms\n($\muup$M N)')
fig.legend(loc=8,ncol=3,bbox_to_anchor=(.5, 0))
for iax in (ax[0],ax[1],ax[2]):
iax.set_xlim((dt.datetime(2015,1,1),dt.datetime(2016,1,1)))
fig.savefig('compFluxesCorrDIAT.eps',dpi=200)
| 0.166608 | 0.367696 |
# The Problem
Research paper topic modeling is an unsupervised machine learning method that helps us discover hidden semantic structures in a paper, that allows us to learn topic representations of papers in a corpus. The model can be applied to any kinds of labels on documents, such as tags on posts on the website.
[Research paper can be found here](https://github.com/susanli2016/Machine-Learning-with-Python/blob/master/dataset.csv)
# Text Cleaning
This function will take care of all the text cleaning & will return tokens of text.
```
import spacy
spacy.load('en')
from spacy.lang.en import English
parser = English()
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.startswith('@'):
lda_tokens.append('SCREEN_NAME')
else:
lda_tokens.append(token.lower_)
return lda_tokens
```
We use NLTK’s Wordnet to find the meanings of words, synonyms, antonyms, and more. In addition, we use WordNetLemmatizer to get the root word.
```
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
from nltk.stem.wordnet import WordNetLemmatizer
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
```
Filtering stopwords
```
nltk.download('stopwords')
en_stop = set(nltk.corpus.stopwords.words('english'))
```
Now we can define a function to prepare the text for topic modelling:
```
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
return tokens
```
Let's prepare our data for LDA
```
import random
text_data = []
with open('dataset.csv') as f:
for line in f:
tokens = prepare_text_for_lda(line)
if random.random() > .99:
print(tokens)
text_data.append(tokens)
```
# LDA with Gensim
Let's save dictionary & corpus for further use
```
from gensim import corpora
dictionary = corpora.Dictionary(text_data)
corpus = [dictionary.doc2bow(text) for text in text_data]
import pickle
pickle.dump(corpus, open('corpus.pkl', 'wb'))
dictionary.save('dictionary.gensim')
```
Let's get only five topics from data
```
import gensim
NUM_TOPICS = 5
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary, passes=15)
ldamodel.save('model5.gensim')
topics = ldamodel.print_topics(num_words=4)
for topic in topics:
print(topic)
```
Let's test it now with new data
```
new_doc = 'Practical Bayesian Optimization of Machine Learning Algorithms'
new_doc = prepare_text_for_lda(new_doc)
new_doc_bow = dictionary.doc2bow(new_doc)
print(new_doc_bow)
print(ldamodel.get_document_topics(new_doc_bow))
```
# Visualize
```
dictionary = gensim.corpora.Dictionary.load('dictionary.gensim')
corpus = pickle.load(open('corpus.pkl', 'rb'))
lda = gensim.models.ldamodel.LdaModel.load('model5.gensim')
import pyLDAvis.gensim
lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
```
|
github_jupyter
|
import spacy
spacy.load('en')
from spacy.lang.en import English
parser = English()
def tokenize(text):
lda_tokens = []
tokens = parser(text)
for token in tokens:
if token.orth_.isspace():
continue
elif token.like_url:
lda_tokens.append('URL')
elif token.orth_.startswith('@'):
lda_tokens.append('SCREEN_NAME')
else:
lda_tokens.append(token.lower_)
return lda_tokens
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
from nltk.stem.wordnet import WordNetLemmatizer
def get_lemma2(word):
return WordNetLemmatizer().lemmatize(word)
nltk.download('stopwords')
en_stop = set(nltk.corpus.stopwords.words('english'))
def prepare_text_for_lda(text):
tokens = tokenize(text)
tokens = [token for token in tokens if len(token) > 4]
tokens = [token for token in tokens if token not in en_stop]
tokens = [get_lemma(token) for token in tokens]
return tokens
import random
text_data = []
with open('dataset.csv') as f:
for line in f:
tokens = prepare_text_for_lda(line)
if random.random() > .99:
print(tokens)
text_data.append(tokens)
from gensim import corpora
dictionary = corpora.Dictionary(text_data)
corpus = [dictionary.doc2bow(text) for text in text_data]
import pickle
pickle.dump(corpus, open('corpus.pkl', 'wb'))
dictionary.save('dictionary.gensim')
import gensim
NUM_TOPICS = 5
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary, passes=15)
ldamodel.save('model5.gensim')
topics = ldamodel.print_topics(num_words=4)
for topic in topics:
print(topic)
new_doc = 'Practical Bayesian Optimization of Machine Learning Algorithms'
new_doc = prepare_text_for_lda(new_doc)
new_doc_bow = dictionary.doc2bow(new_doc)
print(new_doc_bow)
print(ldamodel.get_document_topics(new_doc_bow))
dictionary = gensim.corpora.Dictionary.load('dictionary.gensim')
corpus = pickle.load(open('corpus.pkl', 'rb'))
lda = gensim.models.ldamodel.LdaModel.load('model5.gensim')
import pyLDAvis.gensim
lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)
pyLDAvis.display(lda_display)
| 0.315525 | 0.929312 |
```
%pylab inline
from pyannote.core import notebook
```
# Timeline (`pyannote.core.timeline.Timeline`)
```
from pyannote.core import Timeline
```
**`Timeline`** instances are used to describe sets of temporal fragments (e.g. of an audio file).
One can optionally store an identifier of the associated multimedia document using **`uri`** keyword argument.
```
timeline = Timeline(uri='MyAudioFile')
```
Temporal fragments can be added to the timeline in any order and may be overlapping.
```
from pyannote.core import Segment
timeline.add(Segment(6, 8))
timeline.add(Segment(0.5, 3))
timeline.add(Segment(8.5, 10))
timeline.add(Segment(1, 4))
timeline.add(Segment(5, 7))
timeline.add(Segment(7, 8))
```
They are automatically sorted internally (by start time first).
```
for segment in timeline:
print segment
print "Timeline contains %d segments" % len(timeline)
print "The second segment of timeline is %s" % (str(timeline[1]))
```
One can visualize **`Timeline`** instances in IPython Notebook.
```
timeline
```
## Coverage, extent and gaps
The **extent** of a timeline is the segment of minimum duration that contains every segments of the timeline.
```
timeline.extent()
```
The **coverage** of a timeline is the timeline with the minimum number of segments with exactly the same time span as the original timeline.
```
timeline.support()
```
One can also retrieve **gaps** in a timeline.
```
timeline.gaps()
timeline.gaps(focus=Segment(0, 10.5))
```
## Cropping
Using the **crop** method, it is possible to select a subpart of timeline.
```
selection = Segment(3,7)
selection
timeline
```
In **intersection** mode, segments are cut in half if needed.
```
timeline.crop(selection, mode='intersection')
```
In **strict** mode, only segments fully included in selection are kept.
```
timeline.crop(selection, mode='strict')
```
In **loose** mode, any segment with a non-empty intersection is kept unchanged even though it starts before or end after the selection.
```
timeline.crop(selection, mode='loose')
```
## Union of timeline
The **update** (in place) and **union** (copy) methods can be used to combine two timelines.
```
notebook.crop = Segment(0, 6)
first_timeline = Timeline([Segment(0, 1), Segment(2, 3), Segment(4, 5)])
first_timeline
second_timeline = Timeline([Segment(1.5, 4.5)])
second_timeline
new_timeline = first_timeline.union(second_timeline)
new_timeline
```
## Intersection of timelines
```
second_timeline.crop(first_timeline)
```
**co_iter** method allow to iterator over pairs of intersecting segments.
```
for s_first, s_second in first_timeline.co_iter(second_timeline):
print s_first, s_second
```
## Need help?
You can always try the following...
Who knows? It might give you the information you are looking for!
```
help(Timeline)
```
|
github_jupyter
|
%pylab inline
from pyannote.core import notebook
from pyannote.core import Timeline
timeline = Timeline(uri='MyAudioFile')
from pyannote.core import Segment
timeline.add(Segment(6, 8))
timeline.add(Segment(0.5, 3))
timeline.add(Segment(8.5, 10))
timeline.add(Segment(1, 4))
timeline.add(Segment(5, 7))
timeline.add(Segment(7, 8))
for segment in timeline:
print segment
print "Timeline contains %d segments" % len(timeline)
print "The second segment of timeline is %s" % (str(timeline[1]))
timeline
timeline.extent()
timeline.support()
timeline.gaps()
timeline.gaps(focus=Segment(0, 10.5))
selection = Segment(3,7)
selection
timeline
timeline.crop(selection, mode='intersection')
timeline.crop(selection, mode='strict')
timeline.crop(selection, mode='loose')
notebook.crop = Segment(0, 6)
first_timeline = Timeline([Segment(0, 1), Segment(2, 3), Segment(4, 5)])
first_timeline
second_timeline = Timeline([Segment(1.5, 4.5)])
second_timeline
new_timeline = first_timeline.union(second_timeline)
new_timeline
second_timeline.crop(first_timeline)
for s_first, s_second in first_timeline.co_iter(second_timeline):
print s_first, s_second
help(Timeline)
| 0.365457 | 0.929696 |
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
class_df.columns
relevant_cols = ['Had you had sexual intercourse before starting university?', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)']
drugs_virgin_before_uni = class_df[relevant_cols]
drugs_virgin_before_uni.head()
drugs_virgin_before_uni.loc[drugs_virgin_before_uni['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] != 'I did not use drugs', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] = 'I used drugs'
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni.dropna()
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni[drugs_virgin_before_uni['Had you had sexual intercourse before starting university?'] == 'No']
drugs_virgin_before_uni
drugs_virgin_before_uni['Number of people'] = drugs_virgin_before_uni.groupby(['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'])['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'].transform('count')
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni.drop_duplicates(subset=['Had you had sexual intercourse before starting university?', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'Number of people'], keep='first')
drugs_virgin_before_uni
drugs_not_virgin_before_uni=class_df[relevant_cols]
drugs_not_virgin_before_uni.head()
drugs_not_virgin_before_uni.loc[drugs_not_virgin_before_uni['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] != 'I did not use drugs', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] = 'I used drugs'
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni.dropna()
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni[drugs_not_virgin_before_uni['Had you had sexual intercourse before starting university?'] == 'Yes']
drugs_not_virgin_before_uni.head()
drugs_not_virgin_before_uni['Number of people'] = drugs_not_virgin_before_uni.groupby(['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'])['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'].transform('count')
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni.drop_duplicates(subset=['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'Number of people'], keep='first')
drugs_not_virgin_before_uni
frames=[drugs_virgin_before_uni, drugs_not_virgin_before_uni]
drugsSex=pd.concat(frames)
drugsSex
total_respondants = drugsSex['Number of people'].sum()
total_respondants
drugsSex['Percentage of People'] = (drugsSex['Number of people'] / total_respondants) * 100
drugsSex
drugsSex.rename(columns={"If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)": "Drug Consumption"}, inplace=True)
drugsSex
sns.set(font_scale=1.2)
sns.set_theme(palette="colorblind")
ax=sns.catplot(x='Had you had sexual intercourse before starting university?', y='Percentage of People', hue='Drug Consumption', data=drugsSex, kind='bar', height=8, aspect=1.5)
ax.set(ylim=(0, 45))
plt.title("Drug Consumption VS Virginity Before 1A", fontsize=18, y=1.03)
plt.xlabel("Had sex before 1A", labelpad=15, fontsize=16)
plt.ylabel("Percentage of people (%)", labelpad=15, fontsize=16)
```
|
github_jupyter
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
class_df.columns
relevant_cols = ['Had you had sexual intercourse before starting university?', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)']
drugs_virgin_before_uni = class_df[relevant_cols]
drugs_virgin_before_uni.head()
drugs_virgin_before_uni.loc[drugs_virgin_before_uni['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] != 'I did not use drugs', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] = 'I used drugs'
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni.dropna()
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni[drugs_virgin_before_uni['Had you had sexual intercourse before starting university?'] == 'No']
drugs_virgin_before_uni
drugs_virgin_before_uni['Number of people'] = drugs_virgin_before_uni.groupby(['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'])['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'].transform('count')
drugs_virgin_before_uni.head(10)
drugs_virgin_before_uni = drugs_virgin_before_uni.drop_duplicates(subset=['Had you had sexual intercourse before starting university?', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'Number of people'], keep='first')
drugs_virgin_before_uni
drugs_not_virgin_before_uni=class_df[relevant_cols]
drugs_not_virgin_before_uni.head()
drugs_not_virgin_before_uni.loc[drugs_not_virgin_before_uni['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] != 'I did not use drugs', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'] = 'I used drugs'
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni.dropna()
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni[drugs_not_virgin_before_uni['Had you had sexual intercourse before starting university?'] == 'Yes']
drugs_not_virgin_before_uni.head()
drugs_not_virgin_before_uni['Number of people'] = drugs_not_virgin_before_uni.groupby(['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'])['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)'].transform('count')
drugs_not_virgin_before_uni = drugs_not_virgin_before_uni.drop_duplicates(subset=['If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)', 'Number of people'], keep='first')
drugs_not_virgin_before_uni
frames=[drugs_virgin_before_uni, drugs_not_virgin_before_uni]
drugsSex=pd.concat(frames)
drugsSex
total_respondants = drugsSex['Number of people'].sum()
total_respondants
drugsSex['Percentage of People'] = (drugsSex['Number of people'] / total_respondants) * 100
drugsSex
drugsSex.rename(columns={"If you have used recreational drugs, which ones? (If you have not used drugs, indicate as such)": "Drug Consumption"}, inplace=True)
drugsSex
sns.set(font_scale=1.2)
sns.set_theme(palette="colorblind")
ax=sns.catplot(x='Had you had sexual intercourse before starting university?', y='Percentage of People', hue='Drug Consumption', data=drugsSex, kind='bar', height=8, aspect=1.5)
ax.set(ylim=(0, 45))
plt.title("Drug Consumption VS Virginity Before 1A", fontsize=18, y=1.03)
plt.xlabel("Had sex before 1A", labelpad=15, fontsize=16)
plt.ylabel("Percentage of people (%)", labelpad=15, fontsize=16)
| 0.261614 | 0.334318 |
# Log metrics with MLflow in PyTorch Lightning
description: log mlflow metrics in pytorch lightning with azureml as the backend tracking store
Lightning supports many popular [logging frameworks](https://pytorch-lightning.readthedocs.io/en/stable/loggers.html). [MLflow](https://mlflow.org/) is a popular open-source library for managing the lifecycle of your ML projects. Azure ML offers integration with MLflow, including for training. Specifically, Azure ML integrates as a backend tracking store for MLflow's [Tracking](https://mlflow.org/docs/latest/tracking.html#) component for logging metrics and managing runs. This tutorial will cover using the MLflow logger and leveraging the Azure ML MLflow integration.
```
from azureml.core import Workspace
ws = Workspace.from_config()
ws
import git
from pathlib import Path
# get root of git repo
prefix = Path(git.Repo(".", search_parent_directories=True).working_tree_dir)
# training script
source_dir = prefix.joinpath(
"code", "train", "pytorch-lightning", "mnist-autoencoder"
)
script_name = "train-with-mlflow-logging.py"
# environment file
environment_file = prefix.joinpath("environments", "pt-lightning.yml")
# azure ml settings
environment_name = "pt-lightning"
experiment_name = "pt-lightning-mlflow-example"
cluster_name = "gpu-K80-2"
```
## Create environment
Define a conda environment YAML file with your training script dependencies and create an Azure ML environment. This notebook will use the same environment definition that was used for part 1 of the tutorial. The dependencies include **mlflow** and **azureml-mlflow**, which are needed for logging with MLflow.
```
from azureml.core import Environment
env = Environment.from_conda_specification(environment_name, environment_file)
# specify a GPU base image
env.docker.enabled = True
env.docker.base_image = (
"mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.2-cudnn8-ubuntu18.04"
)
```
## Enable logging in training script
In *train_with_mlfow_logging.py*:
### 1. Create an MLFlowLogger
To configure the MLFlowLogger, you will need to provide the following:
* **Tracking URI**: Specify the tracking URI to point to your Azure ML Workspace in order to use Azure ML as the backend tracking store for MLflow. You can get the URI with `get_mlflow_tracking_uri()`.
* **Experiment name**: Use the same name as the name of your Azure ML experiment.
* **Run ID**: You will need to link the MLFlowLogger's run ID to the ID of the Azure ML run.
To get the Azure ML Run object of the training run, use the azureml `Run.get_context()` method. Once you have the Run object, you can then access the Experiment and Workspace.
```python
from azureml.core import Run
run = Run.get_context()
mlflow_uri = run.experiment.workspace.get_mlflow_tracking_uri()
exp_name = run.experiment.name
mlf_logger = MLFlowLogger(experiment_name=exp_name, tracking_uri=mlflow_uri)
mlf_logger._run_id = run.id
trainer = pl.Trainer.from_argparse_args(args, logger=mlf_logger)
```
Lightning will then take care of setting the tracking URI, creating the MLflow experiment, starting the MLflow run, and creating the underlying `MlflowClient` object.
### 2. Log metrics
You can then log metrics and other objects in your script. This tutorial's training script leverages Lightning's automatic log functionalities to log the loss metric by calling `self.log()` inside the `training_step()` method. Since logging too frequently can slow down training, the tutorial logs at the end of every epoch.
```python
self.log('loss', loss, on_epoch=True, on_step=False)
```
For more information on logging and the configurable options, see Lightning's [Logging](https://pytorch-lightning.readthedocs.io/en/stable/logging.html) documentation and the [MLFlowLogger](https://pytorch-lightning.readthedocs.io/en/stable/logging.html#mlflow) reference documentation.
### Configure and run training job
Create a ScriptRunConfig to specify the training script & arguments, environment, and cluster to run on.
```
from azureml.core import ScriptRunConfig, Experiment
cluster = ws.compute_targets[cluster_name]
src = ScriptRunConfig(
source_directory=source_dir,
script=script_name,
arguments=["--max_epochs", 25, "--gpus", 2, "--accelerator", "ddp"],
compute_target=cluster,
environment=env,
)
run = Experiment(ws, experiment_name).submit(src)
run
```
If you navigate to the Azure ML studio UI, you can see the logged metrics visualized under the Experiment view and the "Metrics" tab of the individual Run view.
```
run.wait_for_completion(show_output=True)
```
|
github_jupyter
|
from azureml.core import Workspace
ws = Workspace.from_config()
ws
import git
from pathlib import Path
# get root of git repo
prefix = Path(git.Repo(".", search_parent_directories=True).working_tree_dir)
# training script
source_dir = prefix.joinpath(
"code", "train", "pytorch-lightning", "mnist-autoencoder"
)
script_name = "train-with-mlflow-logging.py"
# environment file
environment_file = prefix.joinpath("environments", "pt-lightning.yml")
# azure ml settings
environment_name = "pt-lightning"
experiment_name = "pt-lightning-mlflow-example"
cluster_name = "gpu-K80-2"
from azureml.core import Environment
env = Environment.from_conda_specification(environment_name, environment_file)
# specify a GPU base image
env.docker.enabled = True
env.docker.base_image = (
"mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.2-cudnn8-ubuntu18.04"
)
from azureml.core import Run
run = Run.get_context()
mlflow_uri = run.experiment.workspace.get_mlflow_tracking_uri()
exp_name = run.experiment.name
mlf_logger = MLFlowLogger(experiment_name=exp_name, tracking_uri=mlflow_uri)
mlf_logger._run_id = run.id
trainer = pl.Trainer.from_argparse_args(args, logger=mlf_logger)
self.log('loss', loss, on_epoch=True, on_step=False)
from azureml.core import ScriptRunConfig, Experiment
cluster = ws.compute_targets[cluster_name]
src = ScriptRunConfig(
source_directory=source_dir,
script=script_name,
arguments=["--max_epochs", 25, "--gpus", 2, "--accelerator", "ddp"],
compute_target=cluster,
environment=env,
)
run = Experiment(ws, experiment_name).submit(src)
run
run.wait_for_completion(show_output=True)
| 0.414425 | 0.987104 |
# Interact Exercise 6
## Imports
Put the standard imports for Matplotlib, Numpy and the IPython widgets in the following cell.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
```
## Exploring the Fermi distribution
In quantum statistics, the [Fermi-Dirac](http://en.wikipedia.org/wiki/Fermi%E2%80%93Dirac_statistics) distribution is related to the probability that a particle will be in a quantum state with energy $\epsilon$. The equation for the distribution $F(\epsilon)$ is:
```
Image('fermidist.png')
```
In this equation:
* $\epsilon$ is the single particle energy.
* $\mu$ is the chemical potential, which is related to the total number of particles.
* $k$ is the Boltzmann constant.
* $T$ is the temperature in Kelvin.
In the cell below, typeset this equation using LaTeX:
$\large F(\epsilon) = {\Large \frac{1}{e^{(\epsilon-\mu)/kT}+1}}$
Define a function `fermidist(energy, mu, kT)` that computes the distribution function for a given value of `energy`, chemical potential `mu` and temperature `kT`. Note here, `kT` is a single variable with units of energy. Make sure your function works with an array and don't use any `for` or `while` loops in your code.
```
def fermidist(energy, mu, kT):
"""Compute the Fermi distribution at energy, mu and kT."""
F = 1/(np.exp((energy-mu)/kT)+1)
return F
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
```
Write a function `plot_fermidist(mu, kT)` that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters `mu` and `kT`.
* Use enegies over the range $[0,10.0]$ and a suitable number of points.
* Choose an appropriate x and y limit for your visualization.
* Label your x and y axis and the overall visualization.
* Customize your plot in 3 other ways to make it effective and beautiful.
```
def plot_fermidist(mu, kT):
energy = np.linspace(0,10.0,21)
plt.plot(energy, fermidist(energy, mu, kT))
plt.tick_params(direction='out')
plt.xlabel('$Energy$')
plt.ylabel('$F(Energy)$')
plt.title('Fermi Distribution')
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
```
Use `interact` with `plot_fermidist` to explore the distribution:
* For `mu` use a floating point slider over the range $[0.0,5.0]$.
* for `kT` use a floating point slider over the range $[0.1,10.0]$.
```
interact(plot_fermidist, mu=(0.0,5.0), kT=(0.1,10.0))
```
Provide complete sentence answers to the following questions in the cell below:
* What happens when the temperature $kT$ is low?
* What happens when the temperature $kT$ is high?
* What is the effect of changing the chemical potential $\mu$?
* The number of particles in the system are related to the area under this curve. How does the chemical potential affect the number of particles.
Use LaTeX to typeset any mathematical symbols in your answer.
When $kT$ is low, the slope at $Energy = \mu$ becomes much steeper, while when it's high, the line is much flatter. Changing $\mu$ shifts where the line flips concavity. The higher $\mu$ is, the more particles there are in the system.
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
Image('fermidist.png')
def fermidist(energy, mu, kT):
"""Compute the Fermi distribution at energy, mu and kT."""
F = 1/(np.exp((energy-mu)/kT)+1)
return F
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
def plot_fermidist(mu, kT):
energy = np.linspace(0,10.0,21)
plt.plot(energy, fermidist(energy, mu, kT))
plt.tick_params(direction='out')
plt.xlabel('$Energy$')
plt.ylabel('$F(Energy)$')
plt.title('Fermi Distribution')
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
interact(plot_fermidist, mu=(0.0,5.0), kT=(0.1,10.0))
| 0.694821 | 0.992489 |
Hal - hal yang harus diperhatikan
1. Tetha dan X sebagai vector
2. Feature scaling dan mean normalization dari data
3. Define feature baru dari feature2 yang sudah ada, bisa untuk polynomial regression
4. data x dan y bertipe np.array
5. matriks x ke samping training example, kebawah banyaknya feature
```
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats("svg")
%matplotlib inline
import matplotlib
#matplotlib.style.use("dark_background")
matplotlib.style.use("default")
import scipy.optimize as optim
def normalize(x, mu, s) :
return (x - mu) / s
def normalEq (x, y):
x = x.astype(float)
y = y.astype(float)
m = len(x.T) #m adalah banyaknya training example
x = np.vstack([np.ones([m]), x]).T
theta = np.linalg.pinv(x.T @ x) @ x.T @ y
return theta
def hypFunction(x,theta) :
m = len(x.T)
x_0 = np.ones([m])
x = np.vstack([x_0,x])
return theta.T @ x
def plotRegression(x,y,theta, dist = 0.1 ) :
xdata = np.arange(x[0] - 1, x[-1] + 1, dist)
y_reg = hypFunction(xdata,theta)
plt.plot(x,y,'rx', label = 'data')
plt.plot(xdata,y_reg,label = 'regresi')
plt.grid()
plt.legend()
def computeCostFunc(x,y,theta,lambda_ = 0):
m = len(x.T) # m banyaknya training example
return 1/(2*m) * np.sum( (hypFunction(x,theta) - y)**2) + lambda_/(2*m) * np.sum(theta**2)
def gradDescent(x,y, alfa = 0.01, lambda_ = 0, itermax = 20000, debug = False) :
n = len(np.mat(x)) #n adalah banyaknya feature
m = len(x.T)
x = x.astype(float)
xdata = x
x_iter = np.arange(0,itermax,1)
y_costFunc = np.zeros([itermax])
theta = np.zeros([n+1])
x = np.vstack([np.ones([m]), x])
for it in range(itermax) :
if (debug) :
y_costFunc[it] = computeCostFunc(xdata,y,theta)
theta = theta * (1 - alfa * lambda_/m) - alfa/m * (hypFunction(xdata,theta) - y) @ x.T
if (debug) :
plt.plot(x_iter,y_costFunc)
plt.grid()
return theta
def theta_w_Scipy(x,y,lambda_ = 0) :
def computeCostFunc2(theta_,x_,y_,lambda_):
return computeCostFunc(x_,y_,theta_,lambda_)
n = len(np.mat(x))
theta = np.zeros([n+1])
return optim.fmin(computeCostFunc2,theta, args = (x,y,lambda_))
x = np.array([1,2,3,4])
y = 2*x -4
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
clf = LinearRegression(normalize=True)
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
print(r2_score(y_test,y_pred))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats("svg")
%matplotlib inline
import matplotlib
#matplotlib.style.use("dark_background")
matplotlib.style.use("default")
import scipy.optimize as optim
def normalize(x, mu, s) :
return (x - mu) / s
def normalEq (x, y):
x = x.astype(float)
y = y.astype(float)
m = len(x.T) #m adalah banyaknya training example
x = np.vstack([np.ones([m]), x]).T
theta = np.linalg.pinv(x.T @ x) @ x.T @ y
return theta
def hypFunction(x,theta) :
m = len(x.T)
x_0 = np.ones([m])
x = np.vstack([x_0,x])
return theta.T @ x
def plotRegression(x,y,theta, dist = 0.1 ) :
xdata = np.arange(x[0] - 1, x[-1] + 1, dist)
y_reg = hypFunction(xdata,theta)
plt.plot(x,y,'rx', label = 'data')
plt.plot(xdata,y_reg,label = 'regresi')
plt.grid()
plt.legend()
def computeCostFunc(x,y,theta,lambda_ = 0):
m = len(x.T) # m banyaknya training example
return 1/(2*m) * np.sum( (hypFunction(x,theta) - y)**2) + lambda_/(2*m) * np.sum(theta**2)
def gradDescent(x,y, alfa = 0.01, lambda_ = 0, itermax = 20000, debug = False) :
n = len(np.mat(x)) #n adalah banyaknya feature
m = len(x.T)
x = x.astype(float)
xdata = x
x_iter = np.arange(0,itermax,1)
y_costFunc = np.zeros([itermax])
theta = np.zeros([n+1])
x = np.vstack([np.ones([m]), x])
for it in range(itermax) :
if (debug) :
y_costFunc[it] = computeCostFunc(xdata,y,theta)
theta = theta * (1 - alfa * lambda_/m) - alfa/m * (hypFunction(xdata,theta) - y) @ x.T
if (debug) :
plt.plot(x_iter,y_costFunc)
plt.grid()
return theta
def theta_w_Scipy(x,y,lambda_ = 0) :
def computeCostFunc2(theta_,x_,y_,lambda_):
return computeCostFunc(x_,y_,theta_,lambda_)
n = len(np.mat(x))
theta = np.zeros([n+1])
return optim.fmin(computeCostFunc2,theta, args = (x,y,lambda_))
x = np.array([1,2,3,4])
y = 2*x -4
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
clf = LinearRegression(normalize=True)
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
print(r2_score(y_test,y_pred))
| 0.503906 | 0.918809 |
```
import datajoint as dj
dj.config['database.host'] = 'datajoint.internationalbrainlab.org'
from ibl_pipeline import subject, acquisition, action, behavior, reference
from ibl_pipeline.analyses.behavior import PsychResults
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
myPath = r"C:\Users\Luigi\Documents\GitHub\ibl-changepoint\data" # Write here your data path
os.chdir(myPath)
# We analyze data only from mice that have performed at least this number of stable sessions
MinSessionNumber = 5;
# These are the example mice
example_mice_nicknames = ['CSHL_005','CSHL_007','IBL-T1','IBL-T4','ibl_witten_04','ibl_witten_05']
# Here we download all mice
all_mice = subject.Subject().proj('subject_nickname')
mice_list = list(all_mice)
mice_names = list()
for subj in mice_list:
mice_names.append(subj['subject_nickname'])
# Uncomment to only download the example mice
# mice_names = example_mice_nicknames
sess_train = (acquisition.Session & 'task_protocol LIKE "%training%"')
sess_stable = (acquisition.Session & 'task_protocol LIKE "%biased%"')
stable_mice_names = list()
def get_mouse_data(df):
position_deg = 35. # Stimuli appear at +/- 35 degrees
# Create new dataframe
datamat = pd.DataFrame()
datamat['trial_num'] = df['trial_id']
datamat['session_num'] = np.cumsum(df['trial_id'] == 1)
datamat['stim_probability_left'] = df['trial_stim_prob_left']
signed_contrast = df['trial_stim_contrast_right'] - df['trial_stim_contrast_left']
datamat['contrast'] = np.abs(signed_contrast)
datamat['position'] = np.sign(signed_contrast)*position_deg
datamat['response_choice'] = df['trial_response_choice']
datamat.loc[df['trial_response_choice'] == 'CCW','response_choice'] = 1
datamat.loc[df['trial_response_choice'] == 'CW','response_choice'] = -1
datamat.loc[df['trial_response_choice'] == 'No Go','response_choice'] = 0
datamat['trial_correct'] = np.double(df['trial_feedback_type']==1)
datamat['reaction_time'] = df['trial_response_time'] - df['trial_stim_on_time'] # double-check
# Since some trials have zero contrast, need to compute the alleged position separately
datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'position'] = \
datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'response_choice']*position_deg
datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'position'] = \
datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'response_choice']*(-position_deg)
return datamat
# Loop over all mice
for mouse_nickname in mice_names:
mouse_subject = {'subject_nickname': mouse_nickname}
# Have a look at the protocol for the first session (could probably be done in a smarter way)
behavior_all = (acquisition.Session & (subject.Subject & mouse_subject)) \
* subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df0 = pd.DataFrame(behavior_all.fetch(order_by='session_start_time'))
first_protocol = df0['task_protocol']
# Only proceed with this mouse if it is a recent one (starts with a '*habituation*' protocol)
if (len(first_protocol) > 0) and (first_protocol[0] is not None) and (first_protocol[0].lower().find('habituation') >= 0):
# Get mouse data for biased sessions (thanks Anne Urai for the snippet)
behavior_stable = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \
* sess_stable.proj('session_uuid','task_protocol') * subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df = pd.DataFrame(behavior_stable.fetch(order_by='subject_nickname, session_start_time, trial_id'))
if len(df) > 0: # The mouse has performed in at least one stable session with biased blocks
datamat = get_mouse_data(df)
# Take mice that have performed a minimum number of sessions
if np.max(datamat['session_num']) >= MinSessionNumber:
# Should add 'N' to mice names that start with numbers?
# Save dataframe to CSV file
filename = mouse_nickname + '.csv'
datamat.to_csv(filename,index=False)
stable_mice_names.append(mouse_nickname)
# Get mouse last sessions of training data
behavior_train = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \
* sess_train.proj('session_uuid','task_protocol') * subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df_train = pd.DataFrame(behavior_train.fetch(order_by='subject_nickname, session_start_time, trial_id'))
datamat_train = get_mouse_data(df_train)
Nlast = np.max(datamat_train['session_num']) - 3
datamat_final = datamat_train[datamat_train['session_num'] > Nlast]
# Save final training dataframe to CSV file
filename = mouse_nickname + '_endtrain.csv'
datamat_final.to_csv(filename,index=False)
print(stable_mice_names)
```
|
github_jupyter
|
import datajoint as dj
dj.config['database.host'] = 'datajoint.internationalbrainlab.org'
from ibl_pipeline import subject, acquisition, action, behavior, reference
from ibl_pipeline.analyses.behavior import PsychResults
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
myPath = r"C:\Users\Luigi\Documents\GitHub\ibl-changepoint\data" # Write here your data path
os.chdir(myPath)
# We analyze data only from mice that have performed at least this number of stable sessions
MinSessionNumber = 5;
# These are the example mice
example_mice_nicknames = ['CSHL_005','CSHL_007','IBL-T1','IBL-T4','ibl_witten_04','ibl_witten_05']
# Here we download all mice
all_mice = subject.Subject().proj('subject_nickname')
mice_list = list(all_mice)
mice_names = list()
for subj in mice_list:
mice_names.append(subj['subject_nickname'])
# Uncomment to only download the example mice
# mice_names = example_mice_nicknames
sess_train = (acquisition.Session & 'task_protocol LIKE "%training%"')
sess_stable = (acquisition.Session & 'task_protocol LIKE "%biased%"')
stable_mice_names = list()
def get_mouse_data(df):
position_deg = 35. # Stimuli appear at +/- 35 degrees
# Create new dataframe
datamat = pd.DataFrame()
datamat['trial_num'] = df['trial_id']
datamat['session_num'] = np.cumsum(df['trial_id'] == 1)
datamat['stim_probability_left'] = df['trial_stim_prob_left']
signed_contrast = df['trial_stim_contrast_right'] - df['trial_stim_contrast_left']
datamat['contrast'] = np.abs(signed_contrast)
datamat['position'] = np.sign(signed_contrast)*position_deg
datamat['response_choice'] = df['trial_response_choice']
datamat.loc[df['trial_response_choice'] == 'CCW','response_choice'] = 1
datamat.loc[df['trial_response_choice'] == 'CW','response_choice'] = -1
datamat.loc[df['trial_response_choice'] == 'No Go','response_choice'] = 0
datamat['trial_correct'] = np.double(df['trial_feedback_type']==1)
datamat['reaction_time'] = df['trial_response_time'] - df['trial_stim_on_time'] # double-check
# Since some trials have zero contrast, need to compute the alleged position separately
datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'position'] = \
datamat.loc[(datamat['trial_correct'] == 1) & (signed_contrast == 0),'response_choice']*position_deg
datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'position'] = \
datamat.loc[(datamat['trial_correct'] == 0) & (signed_contrast == 0),'response_choice']*(-position_deg)
return datamat
# Loop over all mice
for mouse_nickname in mice_names:
mouse_subject = {'subject_nickname': mouse_nickname}
# Have a look at the protocol for the first session (could probably be done in a smarter way)
behavior_all = (acquisition.Session & (subject.Subject & mouse_subject)) \
* subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df0 = pd.DataFrame(behavior_all.fetch(order_by='session_start_time'))
first_protocol = df0['task_protocol']
# Only proceed with this mouse if it is a recent one (starts with a '*habituation*' protocol)
if (len(first_protocol) > 0) and (first_protocol[0] is not None) and (first_protocol[0].lower().find('habituation') >= 0):
# Get mouse data for biased sessions (thanks Anne Urai for the snippet)
behavior_stable = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \
* sess_stable.proj('session_uuid','task_protocol') * subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df = pd.DataFrame(behavior_stable.fetch(order_by='subject_nickname, session_start_time, trial_id'))
if len(df) > 0: # The mouse has performed in at least one stable session with biased blocks
datamat = get_mouse_data(df)
# Take mice that have performed a minimum number of sessions
if np.max(datamat['session_num']) >= MinSessionNumber:
# Should add 'N' to mice names that start with numbers?
# Save dataframe to CSV file
filename = mouse_nickname + '.csv'
datamat.to_csv(filename,index=False)
stable_mice_names.append(mouse_nickname)
# Get mouse last sessions of training data
behavior_train = (behavior.TrialSet.Trial & (subject.Subject & mouse_subject)) \
* sess_train.proj('session_uuid','task_protocol') * subject.Subject.proj('subject_nickname') \
* subject.SubjectLab.proj('lab_name')
df_train = pd.DataFrame(behavior_train.fetch(order_by='subject_nickname, session_start_time, trial_id'))
datamat_train = get_mouse_data(df_train)
Nlast = np.max(datamat_train['session_num']) - 3
datamat_final = datamat_train[datamat_train['session_num'] > Nlast]
# Save final training dataframe to CSV file
filename = mouse_nickname + '_endtrain.csv'
datamat_final.to_csv(filename,index=False)
print(stable_mice_names)
| 0.46393 | 0.441914 |
<a href="https://colab.research.google.com/github/kdstheace/Project_FinancialAnalysis/blob/Daniel/Financial_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import tensorflow as tf
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils import to_categorical
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from google.colab import drive
drive.mount('/content/drive')
#1) 데이터 준비
raw_data = pd.read_csv('drive/MyDrive/Colab Notebooks/data/bankruptcy_data.csv')
X = raw_data.drop('Bankrupt?', axis=1).drop(' Liability-Assets Flag', axis=1).drop(' Net Income Flag', axis=1)\
.drop(' Revenue Per Share (Yuan ¥)', axis=1).drop(' Operating Profit Per Share (Yuan ¥)', axis=1)\
.drop(' Per Share Net profit before tax (Yuan ¥)', axis=1).drop(' Interest-bearing debt interest rate', axis=1)
y = np.array(raw_data['Bankrupt?'])
print(raw_data.shape)
print(X.shape)
print(y.shape)
scaler = StandardScaler()
scaled_X = scaler.fit_transform(X)
scaled_X = pd.DataFrame(scaled_X, columns=X.columns)
display(scaled_X)
categorized_y = to_categorical(y)
print(categorized_y.shape)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, categorized_y, test_size=0.2, random_state=1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
model = Sequential()
model.add(Dense(units=100, activation='relu', input_dim=89))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=25, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=2, activation='sigmoid'))
model.summary()
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
hist = model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test), verbose=1)
scores = model.evaluate(X_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[0], scores[0]*100))
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
plt.rcParams['figure.figsize'] = (200, 50)
bp = scaled_X.boxplot()
pred = model.predict(X_test)
print(pred.shape)
for i in range(1364):
if np.argmax(pred[i]) != np.argmax(y_test[i]):
print(i)
labels=['nope', 'yes']
i=1277
print(labels[np.argmax(y_test[i])])
print(labels[np.argmax(pred[i])])
X.isnull().sum()
pd.set_option('display.max_row', 100)
pd.set_option('display.max_columns', 100)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import tensorflow as tf
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils import to_categorical
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from google.colab import drive
drive.mount('/content/drive')
#1) 데이터 준비
raw_data = pd.read_csv('drive/MyDrive/Colab Notebooks/data/bankruptcy_data.csv')
X = raw_data.drop('Bankrupt?', axis=1).drop(' Liability-Assets Flag', axis=1).drop(' Net Income Flag', axis=1)\
.drop(' Revenue Per Share (Yuan ¥)', axis=1).drop(' Operating Profit Per Share (Yuan ¥)', axis=1)\
.drop(' Per Share Net profit before tax (Yuan ¥)', axis=1).drop(' Interest-bearing debt interest rate', axis=1)
y = np.array(raw_data['Bankrupt?'])
print(raw_data.shape)
print(X.shape)
print(y.shape)
scaler = StandardScaler()
scaled_X = scaler.fit_transform(X)
scaled_X = pd.DataFrame(scaled_X, columns=X.columns)
display(scaled_X)
categorized_y = to_categorical(y)
print(categorized_y.shape)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, categorized_y, test_size=0.2, random_state=1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
model = Sequential()
model.add(Dense(units=100, activation='relu', input_dim=89))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=25, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=2, activation='sigmoid'))
model.summary()
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
hist = model.fit(X_train, y_train, epochs=50, validation_data=(X_test, y_test), verbose=1)
scores = model.evaluate(X_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[0], scores[0]*100))
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
plt.rcParams['figure.figsize'] = (200, 50)
bp = scaled_X.boxplot()
pred = model.predict(X_test)
print(pred.shape)
for i in range(1364):
if np.argmax(pred[i]) != np.argmax(y_test[i]):
print(i)
labels=['nope', 'yes']
i=1277
print(labels[np.argmax(y_test[i])])
print(labels[np.argmax(pred[i])])
X.isnull().sum()
pd.set_option('display.max_row', 100)
pd.set_option('display.max_columns', 100)
| 0.509276 | 0.873107 |
```
# Python library imports: numpy, random, sklearn, pandas, etc
import warnings
warnings.filterwarnings('ignore')
import sys
import random
import numpy as np
from sklearn import linear_model, cross_validation, metrics, svm
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# function to read HDFS file into dataframe using PyDoop
import pydoop.hdfs as hdfs
def read_csv_from_hdfs( path ):
pieces = []
fhandle = hdfs.open(path)
print "validating file : %s" % fhandle
cols = ['key', 'value'];
pieces.append(pd.read_csv(fhandle, names=cols, dtype=None, delimiter="\t"))
fhandle.close()
return pd.concat(pieces, ignore_index=True)
def extract_data_as_frame(in_data ):
# dataset LONGITUDE LATITUDE T_DAILY_MEAN SUR_TEMP_DAILY_AVG SOIL_MOISTURE_10_DAILY
data_list = []
for index in data_val2:
dict1 = {}
x= float(index[3])
if x < -30:
continue
x= float(index[2])
if x < -30:
continue
dict1.update(lat=index[1],lon=index[0], day=float(index[2]), surface=float(index[3]), moisture=float(index[4]))
data_list.append(dict1)
data_as_frame = pd.DataFrame(data_list, columns=['lat', 'lon', 'day', 'surface', 'moisture'])
return data_as_frame
def extract_geo_data(in_data ):
geo_list = []
for index in data_val2:
dict1 = {}
dict1.update(lat=index[1],lon=index[0])
geo_list.append(dict1)
geo_key = pd.DataFrame(geo_list, columns=['lat', 'lon'])
return geo_key
def extract_temp_data(in_data ):
temp_list = []
for index in data_val2:
dict1 = {}
dict1.update(day=index[2],surface=index[3])
temp_list.append(dict1)
temp_values = pd.DataFrame(temp_list, columns=['day', 'surface'])
return temp_values
def extract_soil_mositure(in_data ):
moisture_list = []
for index in data_val2:
dict1 = {}
dict1.update(moisture=index[4] )
moisture_list.append(dict1)
moisture_values = pd.DataFrame(moisture_list, columns=['moisture'])
return moisture_values
result = read_csv_from_hdfs('/user/cloudera/sci_data_1_out/part-r-00000')
data_val = result.iloc[:,[1]]
# data_val2 will be a series so we convert it to useful dataframes
# dataset LONGITUDE LATITUDE T_DAILY_MEAN SUR_TEMP_DAILY_AVG SOIL_MOISTURE_10_DAILY
data_val2 = data_val.value.str.split('|')
data_as_frame =extract_data_as_frame(data_val2 )
len(data_as_frame)
bymoisture = data_as_frame.head(40).groupby(['moisture']).mean()
bymoisture[:2].plot(kind='bar')
```
|
github_jupyter
|
# Python library imports: numpy, random, sklearn, pandas, etc
import warnings
warnings.filterwarnings('ignore')
import sys
import random
import numpy as np
from sklearn import linear_model, cross_validation, metrics, svm
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# function to read HDFS file into dataframe using PyDoop
import pydoop.hdfs as hdfs
def read_csv_from_hdfs( path ):
pieces = []
fhandle = hdfs.open(path)
print "validating file : %s" % fhandle
cols = ['key', 'value'];
pieces.append(pd.read_csv(fhandle, names=cols, dtype=None, delimiter="\t"))
fhandle.close()
return pd.concat(pieces, ignore_index=True)
def extract_data_as_frame(in_data ):
# dataset LONGITUDE LATITUDE T_DAILY_MEAN SUR_TEMP_DAILY_AVG SOIL_MOISTURE_10_DAILY
data_list = []
for index in data_val2:
dict1 = {}
x= float(index[3])
if x < -30:
continue
x= float(index[2])
if x < -30:
continue
dict1.update(lat=index[1],lon=index[0], day=float(index[2]), surface=float(index[3]), moisture=float(index[4]))
data_list.append(dict1)
data_as_frame = pd.DataFrame(data_list, columns=['lat', 'lon', 'day', 'surface', 'moisture'])
return data_as_frame
def extract_geo_data(in_data ):
geo_list = []
for index in data_val2:
dict1 = {}
dict1.update(lat=index[1],lon=index[0])
geo_list.append(dict1)
geo_key = pd.DataFrame(geo_list, columns=['lat', 'lon'])
return geo_key
def extract_temp_data(in_data ):
temp_list = []
for index in data_val2:
dict1 = {}
dict1.update(day=index[2],surface=index[3])
temp_list.append(dict1)
temp_values = pd.DataFrame(temp_list, columns=['day', 'surface'])
return temp_values
def extract_soil_mositure(in_data ):
moisture_list = []
for index in data_val2:
dict1 = {}
dict1.update(moisture=index[4] )
moisture_list.append(dict1)
moisture_values = pd.DataFrame(moisture_list, columns=['moisture'])
return moisture_values
result = read_csv_from_hdfs('/user/cloudera/sci_data_1_out/part-r-00000')
data_val = result.iloc[:,[1]]
# data_val2 will be a series so we convert it to useful dataframes
# dataset LONGITUDE LATITUDE T_DAILY_MEAN SUR_TEMP_DAILY_AVG SOIL_MOISTURE_10_DAILY
data_val2 = data_val.value.str.split('|')
data_as_frame =extract_data_as_frame(data_val2 )
len(data_as_frame)
bymoisture = data_as_frame.head(40).groupby(['moisture']).mean()
bymoisture[:2].plot(kind='bar')
| 0.293911 | 0.428652 |
Trying to determine where to impose a $M_{halo}$ cut based on either $M_{halo}$ or $M_{max}$ cuts
```
import numpy as np
# --- centralms ---
from centralMS import util as UT
from centralMS import catalog as Cat
import corner as DFM
import matplotlib as mpl
import matplotlib.pyplot as pl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
%matplotlib inline
subhalo = Cat.CentralSubhalos(nsnap0=15)
shcat = subhalo.Read()
fig = plt.figure(figsize=(8,4))
sub = fig.add_subplot(121)
sub.scatter(shcat['m.max'], shcat['m.star'], c='k', s=1)
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
sub.set_ylim([6., 12.2])
sub = fig.add_subplot(122)
DFM.hist2d(shcat['m.max'], shcat['m.star'],
levels=[0.68, 0.95], range=[[10., 15.],[6., 12.2]],
ax=sub)
print shcat['m.max'][shcat['m.star'] == 0.].min(), shcat['m.max'][shcat['m.star'] == 0.].max()
print shcat['m.max'][shcat['m.star'] != 0.].min()
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['m.max'], shcat['halo.m'], c='C0', s=1)
sub.plot([0., 15.], [0., 15.], c='k', lw=2, ls='--')
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([8., 15.])
sub.set_ylabel('$M_{halo}$', fontsize=25)
sub.set_ylim([8., 15.])
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'], shcat['m.star'], c='C0', s=0.1)
sub.vlines(10.6, 0., 12., color='k', linestyle='--')
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
sub.set_ylim([0., 12.])
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'][shcat['halo.m'] > 10.6], shcat['m.star'][shcat['halo.m'] > 10.6], c='C0', s=0.1)
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['m.max'][shcat['halo.m'] > 10.6], shcat['m.star'][shcat['halo.m'] > 10.6], c='C0', s=0.1)
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
mh_bins = np.linspace(10., 15., 20)
ms_med = []
ms_sig = []
for i_m in range(len(mh_bins)-1):
inmhbin = ((shcat['halo.m'] >= mh_bins[i_m]) & (shcat['halo.m'] < mh_bins[i_m+1]))
ms_med.append(np.median(shcat['m.star'][inmhbin]))
ms_sig.append(np.std(shcat['m.star'][inmhbin]))
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'], shcat['m.star'], c='k', s=0.1)
sub.errorbar(0.5*(mh_bins[:-1] + mh_bins[1:]), ms_med, ms_sig)
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
```
|
github_jupyter
|
import numpy as np
# --- centralms ---
from centralMS import util as UT
from centralMS import catalog as Cat
import corner as DFM
import matplotlib as mpl
import matplotlib.pyplot as pl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
%matplotlib inline
subhalo = Cat.CentralSubhalos(nsnap0=15)
shcat = subhalo.Read()
fig = plt.figure(figsize=(8,4))
sub = fig.add_subplot(121)
sub.scatter(shcat['m.max'], shcat['m.star'], c='k', s=1)
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
sub.set_ylim([6., 12.2])
sub = fig.add_subplot(122)
DFM.hist2d(shcat['m.max'], shcat['m.star'],
levels=[0.68, 0.95], range=[[10., 15.],[6., 12.2]],
ax=sub)
print shcat['m.max'][shcat['m.star'] == 0.].min(), shcat['m.max'][shcat['m.star'] == 0.].max()
print shcat['m.max'][shcat['m.star'] != 0.].min()
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['m.max'], shcat['halo.m'], c='C0', s=1)
sub.plot([0., 15.], [0., 15.], c='k', lw=2, ls='--')
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([8., 15.])
sub.set_ylabel('$M_{halo}$', fontsize=25)
sub.set_ylim([8., 15.])
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'], shcat['m.star'], c='C0', s=0.1)
sub.vlines(10.6, 0., 12., color='k', linestyle='--')
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
sub.set_ylim([0., 12.])
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'][shcat['halo.m'] > 10.6], shcat['m.star'][shcat['halo.m'] > 10.6], c='C0', s=0.1)
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['m.max'][shcat['halo.m'] > 10.6], shcat['m.star'][shcat['halo.m'] > 10.6], c='C0', s=0.1)
sub.set_xlabel('$M_{max}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
mh_bins = np.linspace(10., 15., 20)
ms_med = []
ms_sig = []
for i_m in range(len(mh_bins)-1):
inmhbin = ((shcat['halo.m'] >= mh_bins[i_m]) & (shcat['halo.m'] < mh_bins[i_m+1]))
ms_med.append(np.median(shcat['m.star'][inmhbin]))
ms_sig.append(np.std(shcat['m.star'][inmhbin]))
fig = plt.figure()
sub = fig.add_subplot(111)
sub.scatter(shcat['halo.m'], shcat['m.star'], c='k', s=0.1)
sub.errorbar(0.5*(mh_bins[:-1] + mh_bins[1:]), ms_med, ms_sig)
sub.set_xlabel('$M_{halo}$', fontsize=25)
sub.set_xlim([10., 15.])
sub.set_ylabel('$M_*$', fontsize=25)
| 0.271445 | 0.738315 |
# Lab 05 : Final code -- demo
```
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture05/lab05_final'
print(path_to_file)
# move to Google Drive directory
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import time
import utils
```
### Download the data
```
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
```
### Make a one layer net class.
```
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
```
### Build the net
```
net=one_layer_net(784,10)
print(net)
utils.display_num_param(net)
```
### Choose the criterion, batchsize
```
criterion = nn.CrossEntropyLoss()
bs=200
```
### Evaluate on test set
```
def eval_on_test_set():
running_error=0
num_batches=0
for i in range(0,10000,bs):
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
inputs = minibatch_data.view(bs,784)
scores=net( inputs )
error = utils.get_error( scores , minibatch_label)
running_error += error.item()
num_batches+=1
total_error = running_error/num_batches
print( 'test error = ', total_error*100 ,'percent')
```
### Training loop
```
start = time.time()
lr = 0.05 # initial learning rate
for epoch in range(200):
# learning rate strategy : divide the learning rate by 1.5 every 10 epochs
if epoch%10==0 and epoch>10:
lr = lr / 1.5
# create a new optimizer at the beginning of each epoch: give the current learning rate.
optimizer=torch.optim.SGD( net.parameters() , lr=lr )
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
# forward and backward pass
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute some stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# once the epoch is finished we divide the "running quantities"
# by the number of batches
total_loss = running_loss/num_batches
total_error = running_error/num_batches
elapsed_time = time.time() - start
# every 10 epoch we display the stats
# and compute the error rate on the test set
if epoch % 10 == 0 :
print(' ')
print('epoch=',epoch, ' time=', elapsed_time,
' loss=', total_loss , ' error=', total_error*100 ,'percent lr=', lr)
eval_on_test_set()
```
### Choose image at random from the test set and see how good/bad are the predictions
```
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
```
|
github_jupyter
|
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
# mount google drive
from google.colab import drive
drive.mount('/content/gdrive')
path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture05/lab05_final'
print(path_to_file)
# move to Google Drive directory
os.chdir(path_to_file)
!pwd
import torch
import torch.nn as nn
import torch.optim as optim
from random import randint
import time
import utils
from utils import check_mnist_dataset_exists
data_path=check_mnist_dataset_exists()
train_data=torch.load(data_path+'mnist/train_data.pt')
train_label=torch.load(data_path+'mnist/train_label.pt')
test_data=torch.load(data_path+'mnist/test_data.pt')
test_label=torch.load(data_path+'mnist/test_label.pt')
class one_layer_net(nn.Module):
def __init__(self, input_size, output_size):
super(one_layer_net , self).__init__()
self.linear_layer = nn.Linear( input_size, output_size , bias=False)
def forward(self, x):
scores = self.linear_layer(x)
return scores
net=one_layer_net(784,10)
print(net)
utils.display_num_param(net)
criterion = nn.CrossEntropyLoss()
bs=200
def eval_on_test_set():
running_error=0
num_batches=0
for i in range(0,10000,bs):
minibatch_data = test_data[i:i+bs]
minibatch_label= test_label[i:i+bs]
inputs = minibatch_data.view(bs,784)
scores=net( inputs )
error = utils.get_error( scores , minibatch_label)
running_error += error.item()
num_batches+=1
total_error = running_error/num_batches
print( 'test error = ', total_error*100 ,'percent')
start = time.time()
lr = 0.05 # initial learning rate
for epoch in range(200):
# learning rate strategy : divide the learning rate by 1.5 every 10 epochs
if epoch%10==0 and epoch>10:
lr = lr / 1.5
# create a new optimizer at the beginning of each epoch: give the current learning rate.
optimizer=torch.optim.SGD( net.parameters() , lr=lr )
running_loss=0
running_error=0
num_batches=0
shuffled_indices=torch.randperm(60000)
for count in range(0,60000,bs):
# forward and backward pass
optimizer.zero_grad()
indices=shuffled_indices[count:count+bs]
minibatch_data = train_data[indices]
minibatch_label= train_label[indices]
inputs = minibatch_data.view(bs,784)
inputs.requires_grad_()
scores=net( inputs )
loss = criterion( scores , minibatch_label)
loss.backward()
optimizer.step()
# compute some stats
running_loss += loss.detach().item()
error = utils.get_error( scores.detach() , minibatch_label)
running_error += error.item()
num_batches+=1
# once the epoch is finished we divide the "running quantities"
# by the number of batches
total_loss = running_loss/num_batches
total_error = running_error/num_batches
elapsed_time = time.time() - start
# every 10 epoch we display the stats
# and compute the error rate on the test set
if epoch % 10 == 0 :
print(' ')
print('epoch=',epoch, ' time=', elapsed_time,
' loss=', total_loss , ' error=', total_error*100 ,'percent lr=', lr)
eval_on_test_set()
# choose a picture at random
idx=randint(0, 10000-1)
im=test_data[idx]
# diplay the picture
utils.show(im)
# feed it to the net and display the confidence scores
scores = net( im.view(1,784))
probs= torch.softmax(scores, dim=1)
utils.show_prob_mnist(probs)
| 0.682785 | 0.751443 |
```
try:
import cirq
from cirq_iqm import Adonis, circuit_from_qasm
from cirq_iqm.optimizers import simplify_circuit
except ImportError:
print('Installing missing dependencies...')
!pip install --quiet cirq cirq_iqm
from cirq_iqm import Adonis, circuit_from_qasm
from cirq_iqm.optimizers import simplify_circuit
print('Installation ready')
```
# The Adonis architecture
Qubit connectivity:
```
QB1
|
QB4 - QB3 - QB2
|
QB5
```
Construct an `IQMDevice` instance representing the Adonis architecture
```
adonis = Adonis()
print(adonis.NATIVE_GATES)
print(adonis.NATIVE_GATE_INSTANCES)
print(adonis.qubits)
```
# Creating a quantum circuit
Create a quantum circuit and insert native gates
```
a, b, c = adonis.qubits[:3]
circuit = cirq.Circuit(device=adonis)
circuit.append(cirq.X(a))
circuit.append(cirq.PhasedXPowGate(phase_exponent=0.3, exponent=0.5)(c))
circuit.append(cirq.CZ(a, c))
circuit.append(cirq.YPowGate(exponent=1.1)(c))
print(circuit)
```
Insert non-native gates, which are immediately decomposed into native ones
```
circuit.append(cirq.ZZPowGate(exponent=0.2, global_shift=-0.5)(a, c))
circuit.append(cirq.HPowGate(exponent=-0.4)(a))
print(circuit)
```
# Optimizing a quantum circuit
Use the `simplify_circuit` method to run a sequence of optimization passes on a circuit
```
circuit = cirq.Circuit([
cirq.H(a),
cirq.CNOT(a, c),
cirq.measure(a, c, key='result'),
], device=adonis)
print(circuit)
circuit = simplify_circuit(circuit)
print(circuit)
```
# Simulating a quantum circuit
Circuits that contain IQM-native gates can be simulated using the standard Cirq simulators
```
sim = cirq.Simulator()
samples = sim.run(circuit, repetitions=100)
print('Samples:')
print(samples.histogram(key='result'))
print('\nState before the measurement:')
result = sim.simulate(circuit[:-1])
print(result)
```
Note that the above output vector represents the state before the measurement in the optimized circuit, not the original
one, which would have the same phase for both terms. `simplify_circuit` has eliminated a `ZPowGate` which
has no effect on the measurement.
---
# Creating a quantum circuit from an OpenQASM 2.0 program
```
qasm_program = """
OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg meas[3];
rx(1.7) q[1];
h q[0];
cx q[1], q[2];
"""
circuit = circuit_from_qasm(qasm_program)
print(circuit)
```
Decompose the circuit for the Adonis architecture
```
decomposed = adonis.decompose_circuit(circuit)
print(decomposed)
```
Map the circuit qubits to device qubits manually
```
qubit_mapping = {cirq.NamedQubit(k): v for k, v in {'q_0': a, 'q_1': b, 'q_2': c}.items()}
mapped = decomposed.transform_qubits(qubit_mapping)
print(mapped)
```
or automatically
```
mapped = adonis.route_circuit(decomposed)
print(mapped)
```
See the `examples` directory for more examples.
|
github_jupyter
|
try:
import cirq
from cirq_iqm import Adonis, circuit_from_qasm
from cirq_iqm.optimizers import simplify_circuit
except ImportError:
print('Installing missing dependencies...')
!pip install --quiet cirq cirq_iqm
from cirq_iqm import Adonis, circuit_from_qasm
from cirq_iqm.optimizers import simplify_circuit
print('Installation ready')
QB1
|
QB4 - QB3 - QB2
|
QB5
adonis = Adonis()
print(adonis.NATIVE_GATES)
print(adonis.NATIVE_GATE_INSTANCES)
print(adonis.qubits)
a, b, c = adonis.qubits[:3]
circuit = cirq.Circuit(device=adonis)
circuit.append(cirq.X(a))
circuit.append(cirq.PhasedXPowGate(phase_exponent=0.3, exponent=0.5)(c))
circuit.append(cirq.CZ(a, c))
circuit.append(cirq.YPowGate(exponent=1.1)(c))
print(circuit)
circuit.append(cirq.ZZPowGate(exponent=0.2, global_shift=-0.5)(a, c))
circuit.append(cirq.HPowGate(exponent=-0.4)(a))
print(circuit)
circuit = cirq.Circuit([
cirq.H(a),
cirq.CNOT(a, c),
cirq.measure(a, c, key='result'),
], device=adonis)
print(circuit)
circuit = simplify_circuit(circuit)
print(circuit)
sim = cirq.Simulator()
samples = sim.run(circuit, repetitions=100)
print('Samples:')
print(samples.histogram(key='result'))
print('\nState before the measurement:')
result = sim.simulate(circuit[:-1])
print(result)
qasm_program = """
OPENQASM 2.0;
include "qelib1.inc";
qreg q[3];
creg meas[3];
rx(1.7) q[1];
h q[0];
cx q[1], q[2];
"""
circuit = circuit_from_qasm(qasm_program)
print(circuit)
decomposed = adonis.decompose_circuit(circuit)
print(decomposed)
qubit_mapping = {cirq.NamedQubit(k): v for k, v in {'q_0': a, 'q_1': b, 'q_2': c}.items()}
mapped = decomposed.transform_qubits(qubit_mapping)
print(mapped)
mapped = adonis.route_circuit(decomposed)
print(mapped)
| 0.38168 | 0.865622 |
<a href="https://colab.research.google.com/github/will-cotton4/DS-Unit-2-Sprint-4-Practicing-Understanding/blob/master/DS_Unit_2_Sprint_Challenge_4_Practicing_Understanding.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
_Lambda School Data Science Unit 2_
# Sprint Challenge: Practicing & Understanding Predictive Modeling
### Chicago Food Inspections
For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019.
[See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.
According to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls."
#### Your challenge: Predict whether inspections failed
The target is the `Fail` column.
- When the food establishment failed the inspection, the target is `1`.
- When the establishment passed, the target is `0`.
#### Run this cell to load the data:
```
import pandas as pd
train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
train['Facility Type'].value_counts()
train.columns
```
### Part 1: Preprocessing
You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding. (Pandas, category_encoders, sklearn.preprocessing, or any other library.)
_To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._
#### Preliminary Exploration
```
!pip install category-encoders
!pip install eli5
!pip install shap
train.columns
train.isnull().sum()
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
def wrangle(df):
df = df.copy()
# Remove values that have too many options/don't provide helpful info
df = df.drop(columns = ['DBA Name', 'AKA Name', 'License #', 'Address', 'Location', 'City', 'State'])
def clean_facility(string):
foods_drinks = ['Restaurant', 'Grocery Store', 'Bakery', 'Catering',
'Liquor', 'Golden Diner', 'Mobile Food Preparer',
'Mobile Food Dispenser', 'Tavern', 'TAVERN']
kids_stuff = ['School', 'Daycare (2 - 6 Years)', "Children's Services Facility",
'Daycare Above and Under 2 Years', 'Long Term Care', 'Daycare Combo 1586', 'Daycare (Under 2 Years)']
if type(string) is str:
if string in foods_drinks:
return 'food/drink'
elif string in kids_stuff:
return 'kids'
else:
return 'other'
df['Facility Type'] = df['Facility Type'].apply(clean_facility)
# Broken
# Bin violations by type
def clean_violation(entry):
if(type(entry) == str):
return entry.split('.')[0]
else:
return entry
df.Violations = df.Violations.apply(clean_violation)
df.Violations.fillna(0)
#Rename risk categories:
risk_dict = {'Risk 1 (High)': 1, 'Risk 2 (Medium)': 2, 'Risk 3 (Low)': 3}
df.Risk = df.Risk.replace(risk_dict)
# Remove missing values
df = df.dropna()
# Clean inspection type
def clean_inspection(string):
words = string.lower().split()
if 'complaint' in words:
return 'complaint'
elif 'canvass' in words:
return 'canvass'
elif 're-inspection' in words:
return 're-inspection'
elif 'poisoning' in words:
return 'poison'
else:
return 'other'
df['Inspection Type'] = df['Inspection Type'].apply(clean_inspection)
one_hot = pd.get_dummies(df['Facility Type'], prefix = 'Facility')
df = df.join(one_hot)
df = df.drop(columns = ['Facility Type'])
one_hot = pd.get_dummies(df['Inspection Type'], prefix = 'Inspection')
df = df.join(one_hot)
df = df.drop(columns = ['Inspection Type'])
df['Inspection Date'] = pd.to_datetime(df['Inspection Date'])
df['Inspection Month'] = df['Inspection Date'].apply(lambda x: x.month)
df['Inspection Year'] = df['Inspection Date'].apply(lambda x: x.year)
df.Violations = df.Violations.apply(int)
return df
train = wrangle(train)
test = wrangle(test)
```
**Violations is leaky. Some of the violations are auto-fails, and obviously an establishment will pass if it doesn't have any violations. This obviously isn't too useful, since in some sense we're using future results to predict things that have happened in the past (in order to know the nature of the violations we'd have to have already done the inspection).**
### Part 2: Modeling
**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) **Use cross-validation** to **do hyperparameter optimization**, and **estimate your ROC AUC** validation score.
Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._
```
train.columns
features = ['Risk', 'Zip', 'Facility_food/drink', 'Facility_kids',
'Facility_other', 'Inspection_canvass', 'Inspection_complaint',
'Inspection_other', 'Inspection_poison', 'Inspection_re-inspection']
X_train = train[features].dropna()
y_train = train['Fail'].dropna()
X_test = test[features].dropna()
y_test = test['Fail'].dropna()
X_train.isnull().sum()
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
param_distributions = {
'n_estimators': randint(100,500),
'max_depth': randint(2,4)
}
gridsearch = RandomizedSearchCV(
XGBClassifier(n_jobs=-1, random_state=42),
param_distributions=param_distributions,
n_iter=4,
cv=3,
scoring='roc_auc',
verbose=10,
return_train_score=True,
n_jobs=-1
)
gridsearch.fit(X_train, y_train)
from sklearn.metrics import roc_auc_score
print(gridsearch.best_score_)
best = gridsearch.best_estimator_
y_pred = best.predict_proba(X_test)[:,1]
print(roc_auc_score(y_test, y_pred))
```
### Part 3: Visualization
Make one visualization for model interpretation. (You may use any libraries.) Choose one of these types:
- Feature Importances
- Permutation Importances
- Partial Dependence Plot
- Shapley Values
_To earn a score of 3 for this part, make at least two of these visualization types._
```
# Feature importance plot:
import matplotlib.pyplot as plt
n = 15
figsize = (5,15)
importances = pd.Series(best.feature_importances_, X_train.columns)
top_n = importances.sort_values()[-n:]
plt.figure(figsize=figsize)
top_n.plot.barh(color='blue')
#Shapley plot
import shap
data_for_prediction = X_test.sample(1)
print(data_for_prediction)
shap.initjs()
explainer = shap.TreeExplainer(best)
shap_values = explainer.shap_values(data_for_prediction)
shap.force_plot(explainer.expected_value, shap_values, data_for_prediction)
```
### Part 4: Gradient Descent
Answer both of these two questions:
- What does Gradient Descent seek to minimize?
- What is the "Learning Rate" and what is its function?
One sentence is sufficient for each.
_To earn a score of 3 for this part, go above and beyond. Show depth of understanding and mastery of intuition in your answers._
1. Gradent descent seeks to minimize a cost function for a given problem by locating the direction of steepest descent (along the negative gradient) and traveling in that direction.
2. The learning rate determines how much to scale the gradient when iterating through the GD algorithm. For example, a learning rate of 0.1 would indicate that we would travel in the direction of the negative gradient with a length of 0.1 times the original gradient length.
|
github_jupyter
|
import pandas as pd
train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
train['Facility Type'].value_counts()
train.columns
!pip install category-encoders
!pip install eli5
!pip install shap
train.columns
train.isnull().sum()
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
def wrangle(df):
df = df.copy()
# Remove values that have too many options/don't provide helpful info
df = df.drop(columns = ['DBA Name', 'AKA Name', 'License #', 'Address', 'Location', 'City', 'State'])
def clean_facility(string):
foods_drinks = ['Restaurant', 'Grocery Store', 'Bakery', 'Catering',
'Liquor', 'Golden Diner', 'Mobile Food Preparer',
'Mobile Food Dispenser', 'Tavern', 'TAVERN']
kids_stuff = ['School', 'Daycare (2 - 6 Years)', "Children's Services Facility",
'Daycare Above and Under 2 Years', 'Long Term Care', 'Daycare Combo 1586', 'Daycare (Under 2 Years)']
if type(string) is str:
if string in foods_drinks:
return 'food/drink'
elif string in kids_stuff:
return 'kids'
else:
return 'other'
df['Facility Type'] = df['Facility Type'].apply(clean_facility)
# Broken
# Bin violations by type
def clean_violation(entry):
if(type(entry) == str):
return entry.split('.')[0]
else:
return entry
df.Violations = df.Violations.apply(clean_violation)
df.Violations.fillna(0)
#Rename risk categories:
risk_dict = {'Risk 1 (High)': 1, 'Risk 2 (Medium)': 2, 'Risk 3 (Low)': 3}
df.Risk = df.Risk.replace(risk_dict)
# Remove missing values
df = df.dropna()
# Clean inspection type
def clean_inspection(string):
words = string.lower().split()
if 'complaint' in words:
return 'complaint'
elif 'canvass' in words:
return 'canvass'
elif 're-inspection' in words:
return 're-inspection'
elif 'poisoning' in words:
return 'poison'
else:
return 'other'
df['Inspection Type'] = df['Inspection Type'].apply(clean_inspection)
one_hot = pd.get_dummies(df['Facility Type'], prefix = 'Facility')
df = df.join(one_hot)
df = df.drop(columns = ['Facility Type'])
one_hot = pd.get_dummies(df['Inspection Type'], prefix = 'Inspection')
df = df.join(one_hot)
df = df.drop(columns = ['Inspection Type'])
df['Inspection Date'] = pd.to_datetime(df['Inspection Date'])
df['Inspection Month'] = df['Inspection Date'].apply(lambda x: x.month)
df['Inspection Year'] = df['Inspection Date'].apply(lambda x: x.year)
df.Violations = df.Violations.apply(int)
return df
train = wrangle(train)
test = wrangle(test)
train.columns
features = ['Risk', 'Zip', 'Facility_food/drink', 'Facility_kids',
'Facility_other', 'Inspection_canvass', 'Inspection_complaint',
'Inspection_other', 'Inspection_poison', 'Inspection_re-inspection']
X_train = train[features].dropna()
y_train = train['Fail'].dropna()
X_test = test[features].dropna()
y_test = test['Fail'].dropna()
X_train.isnull().sum()
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
param_distributions = {
'n_estimators': randint(100,500),
'max_depth': randint(2,4)
}
gridsearch = RandomizedSearchCV(
XGBClassifier(n_jobs=-1, random_state=42),
param_distributions=param_distributions,
n_iter=4,
cv=3,
scoring='roc_auc',
verbose=10,
return_train_score=True,
n_jobs=-1
)
gridsearch.fit(X_train, y_train)
from sklearn.metrics import roc_auc_score
print(gridsearch.best_score_)
best = gridsearch.best_estimator_
y_pred = best.predict_proba(X_test)[:,1]
print(roc_auc_score(y_test, y_pred))
# Feature importance plot:
import matplotlib.pyplot as plt
n = 15
figsize = (5,15)
importances = pd.Series(best.feature_importances_, X_train.columns)
top_n = importances.sort_values()[-n:]
plt.figure(figsize=figsize)
top_n.plot.barh(color='blue')
#Shapley plot
import shap
data_for_prediction = X_test.sample(1)
print(data_for_prediction)
shap.initjs()
explainer = shap.TreeExplainer(best)
shap_values = explainer.shap_values(data_for_prediction)
shap.force_plot(explainer.expected_value, shap_values, data_for_prediction)
| 0.472927 | 0.917746 |
## CNN - tf.keras
```
import pandas as pd
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
stop_words = stopwords.words('english')
training_data = pd.read_csv('train.csv')
```
#### Combining the 3 columns ( keyword + location + text ) - filling the NAN by a blank or ''
```
training_data['text'] = training_data['keyword'].fillna('') + training_data['location'].fillna('') \
+ training_data['text'].fillna('')
training_data.head()
training_data = training_data.drop(columns=['id','keyword','location'],axis=1)
training_data.head()
```
#### Data Cleaning step involves tokenizing, removing the stopwords and removing non-alphanumeric tokens
```
def clean_data(text):
tokens = text.split()
no_stopwords = [x for x in tokens if x not in stop_words]
no_nonalphanum = [x.lower() for x in no_stopwords if x.isalnum()]
return ' '.join(no_nonalphanum)
training_data['text'] = training_data['text'].apply(clean_data)
print(training_data.shape)
training_data.head()
test_data = pd.read_csv('test.csv')
test_id = test_data['id']
test_data['text'] = test_data['keyword'].fillna('') + test_data['location'].fillna('') \
+ test_data['text'].fillna('')
test_data = test_data.drop(columns=['id','keyword','location'],axis=1)
test_data['text'] = test_data['text'].apply(clean_data)
print(test_data.shape)
test_data.head()
combined_data = pd.DataFrame()
combined_data = combined_data.append(training_data,ignore_index=True,sort=False)
combined_data = combined_data.append(test_data,ignore_index=True,sort=False)
combined_data = combined_data.text
combined_data.shape
x_train,x_test,y_train,y_test = train_test_split(training_data.text,training_data.target)
from string import punctuation
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras import regularizers
```
#### using nltk tokenizer to tokenize and convert text to numbers
```
train_docs = training_data.text
tokenizer = Tokenizer()
tokenizer.fit_on_texts(combined_data)
encoded_docs = tokenizer.texts_to_sequences(x_train)
max_length = max([len(s.split()) for s in train_docs])
x_train = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
encoded_docs = tokenizer.texts_to_sequences(x_test)
x_test = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
vocab_size = len(tokenizer.word_index) + 1
```
#### CNN with a L2 regularization
```
model = Sequential()
model.add(Embedding(vocab_size, 100, input_length=max_length))
model.add(Conv1D(filters=32, kernel_size=8, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(32, activation='relu',kernel_regularizer=regularizers.l2(0.05)))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, verbose=2, validation_data=(x_test,y_test))
# evaluate
loss, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test Accuracy: %f' % (acc*100))
prediction = model.predict_classes(x_test)
accuracy_score(y_test,prediction)
encoded_docs = tokenizer.texts_to_sequences(test_data.text)
test_data = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
prediction = model.predict_classes(test_data)
prediction = prediction.reshape(-1,1)
predictions = pd.DataFrame()
for i,j in zip(test_id,prediction):
new = pd.DataFrame({'id':i,'target':j})
predictions = predictions.append(new,ignore_index=True)
predictions.to_csv('submission.csv',index=False)
```
#### Accuracy :---
##### - Local Accuracy : 74.42 (over split training data)
##### - Online Accuracy : 74.94 (After fitting over all training data)
|
github_jupyter
|
import pandas as pd
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
stop_words = stopwords.words('english')
training_data = pd.read_csv('train.csv')
training_data['text'] = training_data['keyword'].fillna('') + training_data['location'].fillna('') \
+ training_data['text'].fillna('')
training_data.head()
training_data = training_data.drop(columns=['id','keyword','location'],axis=1)
training_data.head()
def clean_data(text):
tokens = text.split()
no_stopwords = [x for x in tokens if x not in stop_words]
no_nonalphanum = [x.lower() for x in no_stopwords if x.isalnum()]
return ' '.join(no_nonalphanum)
training_data['text'] = training_data['text'].apply(clean_data)
print(training_data.shape)
training_data.head()
test_data = pd.read_csv('test.csv')
test_id = test_data['id']
test_data['text'] = test_data['keyword'].fillna('') + test_data['location'].fillna('') \
+ test_data['text'].fillna('')
test_data = test_data.drop(columns=['id','keyword','location'],axis=1)
test_data['text'] = test_data['text'].apply(clean_data)
print(test_data.shape)
test_data.head()
combined_data = pd.DataFrame()
combined_data = combined_data.append(training_data,ignore_index=True,sort=False)
combined_data = combined_data.append(test_data,ignore_index=True,sort=False)
combined_data = combined_data.text
combined_data.shape
x_train,x_test,y_train,y_test = train_test_split(training_data.text,training_data.target)
from string import punctuation
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras import regularizers
train_docs = training_data.text
tokenizer = Tokenizer()
tokenizer.fit_on_texts(combined_data)
encoded_docs = tokenizer.texts_to_sequences(x_train)
max_length = max([len(s.split()) for s in train_docs])
x_train = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
encoded_docs = tokenizer.texts_to_sequences(x_test)
x_test = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
vocab_size = len(tokenizer.word_index) + 1
model = Sequential()
model.add(Embedding(vocab_size, 100, input_length=max_length))
model.add(Conv1D(filters=32, kernel_size=8, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(32, activation='relu',kernel_regularizer=regularizers.l2(0.05)))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, verbose=2, validation_data=(x_test,y_test))
# evaluate
loss, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test Accuracy: %f' % (acc*100))
prediction = model.predict_classes(x_test)
accuracy_score(y_test,prediction)
encoded_docs = tokenizer.texts_to_sequences(test_data.text)
test_data = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
prediction = model.predict_classes(test_data)
prediction = prediction.reshape(-1,1)
predictions = pd.DataFrame()
for i,j in zip(test_id,prediction):
new = pd.DataFrame({'id':i,'target':j})
predictions = predictions.append(new,ignore_index=True)
predictions.to_csv('submission.csv',index=False)
| 0.615897 | 0.774796 |
```
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import minimize
```
### Definition of the model
```
# The SIR model differential equations.
def deriv(y, t, N, beta,gamma):
S,I,R = y
dSdt = -(beta*I/N)*S
dIdt = (beta*S/N)*I - gamma*I
dRdt = gamma*I
return dSdt, dIdt, dRdt
```
### Integration of the differential equations
```
def time_evo(N,beta,gamma,I0=1,R0=0,t=np.arange(0,365)):
# Definition of the initial conditions
# I0 and R0 denotes the number of initial infected people (I0)
# and the number of people that recovered and are immunized (R0)
# t ise the timegrid
S0=N-I0-R0 # number of people that can still contract the virus
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N,beta,gamma))
S, I, R = np.transpose(ret)
return (t,S,I,R)
```
### Show the result
```
fin_result=time_evo(1000,0.5,0.1)
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
plt.plot(t, s_vec, 'b', label='Susceptible')
plt.plot(t, i_vec, 'r', label='Infected')
plt.plot(t, r_vec, 'g', label='Recovered')
#plt.plot(t, m_vec, 'k', label='Deaths')
#plt.plot(t, i_vec+r_vec, color='orange',linestyle='--', label='Infected + Recovered')
plt.legend(loc=5)
#plt.yscale('log')
#plt.ylim(0.5,3000)
plt.xlim(0,100)
plt.xlabel('Number of days')
plt.ylabel('Number of people')
plt.grid(color='gray', linestyle='-', linewidth=0.5)
plt.savefig('output/plotsir.png',dpi=300)
plt.show()
#print(s_vec+i_vec+r_vec+m_vec)
```
# All-in-one
```
vector_regions = ['nord', 'centro', 'sud', 'isole']#,'nolombardia','lombardia']
time_window = 5
for r in range(len(vector_regions)):
fit_region = vector_regions[r]
if fit_region =='nord':
region = ['Lombardia','Veneto','Emilia-Romagna','Liguria','Piemonte','Valle d\'Aosta','P.A. Trento','P.A. Bolzano','Friuli Venezia Giulia']
n_regions = len(region)
elif fit_region =='centro':
region = ['Toscana','Marche','Umbria','Lazio','Abruzzo','Molise']
n_regions = len(region)
elif fit_region =='sud':
region = ['Puglia','Calabria','Basilicata','Campania']
n_regions = len(region)
elif fit_region =='isole':
region = ['Sicilia','Sardegna']
n_regions = len(region)
elif fit_region =='italia':
region = 'Italia'
n_regions = 1
elif fit_region =='nolombardia':
region = ['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto']
n_regions = len(region)
elif fit_region =='lombardia':
region = ['Lombardia']
n_regions = 1
print(fit_region)
popolation_regions = np.array([ 1304970, 559084, 533050, 1947131, 5801692, 4459477, 1215220,5879082, 1550640, 10060574, 1525271, 305617, 4356406, 4029053, 1639591, 4999891, 3729641, 541380, 882015, 125666, 4905854])
name_regions = np.array(['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Lombardia','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto'])
regions = np.vstack((name_regions,popolation_regions))
mask_reg = []
for i in range(n_regions):
mask_reg.append(regions[0,:] == region[i])
mask_reg = np.array(mask_reg)
data = pd.read_csv('https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv')
N = 0
xxx = []
yyy = []
zzz = []
for i in range(n_regions):
N += int(regions[1,mask_reg[i]])
mask_REG=data['denominazione_regione']==region[i]
xxx.append(data.loc[mask_REG,'totale_casi'])
yyy.append(data.loc[mask_REG,'deceduti'])
zzz.append(data.loc[mask_REG,'dimessi_guariti'])
ydata = np.array(np.sum(xxx,axis=0))
ydata_death = np.array(np.sum(yyy,axis=0))
ydata_rec = np.array(np.sum(zzz,axis=0))
ydata_inf = ydata-ydata_rec-ydata_death
xdata = pd.to_numeric(range(ydata.shape[0]))
today = len(xdata)
def minimizer(R0,t1=today-time_window,t2=today):
#true data
ydata_inf_2=np.array(ydata_inf[t1:t2])
xdata_2=np.arange(0,len(ydata_inf_2))
#model
fin_result=time_evo(N,0.07*R0,0.07,I0=ydata_inf_2[0])
i_vec=fin_result[2]
i_vec_2=i_vec[0:len(xdata_2)]
#average error
error=np.sum(np.abs(ydata_inf_2-i_vec_2)/ydata_inf_2)*100
return error
minimizer_vec=np.vectorize(minimizer)
xgrid = np.arange(0.1,1.3,0.01)
ygrid = minimizer_vec(xgrid)
r0_ideal = round(xgrid[np.argmin(ygrid)],2)
print('r0_ideal for the '+fit_region+': ',r0_ideal)
ydata_inf_2 = np.array(ydata_inf[today-time_window:today])
xdata_2 = np.arange(0,len(ydata_inf_2))
print('ydata_inf.shape '+fit_region+': ',ydata_inf.shape)
print('ydata_inf for the '+fit_region+': ',ydata_inf)
print('ydata_inf_2 for the '+fit_region+': ',ydata_inf_2)
fin_result = time_evo(N,0.07*r0_ideal,0.07,I0=ydata_inf_2[0])
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
def minimizer_gen(t1,t2):
xgrid=np.arange(0.1,7.2,0.1)
ygrid=minimizer_vec(xgrid,t1=t1,t2=t2)
r0_ideal=round(xgrid[np.argmin(ygrid)],2)
return r0_ideal
r0_time=[]
for i in range(today-(time_window-1)):
min_val=minimizer_gen(i,i+time_window)
r0_time.append(min_val)
print(i,min_val)
if fit_region =='nord':
r0_time_nord=np.array(r0_time)
elif fit_region =='centro':
r0_time_centro=np.array(r0_time)
elif fit_region =='sud':
r0_time_sud=np.array(r0_time)
elif fit_region =='isole':
r0_time_isole=np.array(r0_time)
elif fit_region =='nolombardia':
r0_time_nolombardia=np.array(r0_time)
elif fit_region =='lombardia':
r0_time_lombardia=np.array(r0_time)
r0_time.clear()
df_r0=pd.DataFrame(pd.to_datetime(np.arange(len(r0_time_nord)),unit='D',origin='2020-02-28'))
df_r0['nord'] = r0_time_nord
df_r0['centro'] = r0_time_centro
df_r0['sud'] = r0_time_sud
df_r0['isole'] = r0_time_isole
#df_r0['nolombardia'] = r0_time_nolombardia
#df_r0['lombardia'] = r0_time_lombardia
df_r0.columns = ['Data','nord','centro','sud','isole']#,'nolombardia','lombardia']
df_r0.to_csv('output/r0_regions_work.csv',index=False)
```
|
github_jupyter
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import minimize
# The SIR model differential equations.
def deriv(y, t, N, beta,gamma):
S,I,R = y
dSdt = -(beta*I/N)*S
dIdt = (beta*S/N)*I - gamma*I
dRdt = gamma*I
return dSdt, dIdt, dRdt
def time_evo(N,beta,gamma,I0=1,R0=0,t=np.arange(0,365)):
# Definition of the initial conditions
# I0 and R0 denotes the number of initial infected people (I0)
# and the number of people that recovered and are immunized (R0)
# t ise the timegrid
S0=N-I0-R0 # number of people that can still contract the virus
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N,beta,gamma))
S, I, R = np.transpose(ret)
return (t,S,I,R)
fin_result=time_evo(1000,0.5,0.1)
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
plt.plot(t, s_vec, 'b', label='Susceptible')
plt.plot(t, i_vec, 'r', label='Infected')
plt.plot(t, r_vec, 'g', label='Recovered')
#plt.plot(t, m_vec, 'k', label='Deaths')
#plt.plot(t, i_vec+r_vec, color='orange',linestyle='--', label='Infected + Recovered')
plt.legend(loc=5)
#plt.yscale('log')
#plt.ylim(0.5,3000)
plt.xlim(0,100)
plt.xlabel('Number of days')
plt.ylabel('Number of people')
plt.grid(color='gray', linestyle='-', linewidth=0.5)
plt.savefig('output/plotsir.png',dpi=300)
plt.show()
#print(s_vec+i_vec+r_vec+m_vec)
vector_regions = ['nord', 'centro', 'sud', 'isole']#,'nolombardia','lombardia']
time_window = 5
for r in range(len(vector_regions)):
fit_region = vector_regions[r]
if fit_region =='nord':
region = ['Lombardia','Veneto','Emilia-Romagna','Liguria','Piemonte','Valle d\'Aosta','P.A. Trento','P.A. Bolzano','Friuli Venezia Giulia']
n_regions = len(region)
elif fit_region =='centro':
region = ['Toscana','Marche','Umbria','Lazio','Abruzzo','Molise']
n_regions = len(region)
elif fit_region =='sud':
region = ['Puglia','Calabria','Basilicata','Campania']
n_regions = len(region)
elif fit_region =='isole':
region = ['Sicilia','Sardegna']
n_regions = len(region)
elif fit_region =='italia':
region = 'Italia'
n_regions = 1
elif fit_region =='nolombardia':
region = ['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto']
n_regions = len(region)
elif fit_region =='lombardia':
region = ['Lombardia']
n_regions = 1
print(fit_region)
popolation_regions = np.array([ 1304970, 559084, 533050, 1947131, 5801692, 4459477, 1215220,5879082, 1550640, 10060574, 1525271, 305617, 4356406, 4029053, 1639591, 4999891, 3729641, 541380, 882015, 125666, 4905854])
name_regions = np.array(['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Lombardia','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto'])
regions = np.vstack((name_regions,popolation_regions))
mask_reg = []
for i in range(n_regions):
mask_reg.append(regions[0,:] == region[i])
mask_reg = np.array(mask_reg)
data = pd.read_csv('https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv')
N = 0
xxx = []
yyy = []
zzz = []
for i in range(n_regions):
N += int(regions[1,mask_reg[i]])
mask_REG=data['denominazione_regione']==region[i]
xxx.append(data.loc[mask_REG,'totale_casi'])
yyy.append(data.loc[mask_REG,'deceduti'])
zzz.append(data.loc[mask_REG,'dimessi_guariti'])
ydata = np.array(np.sum(xxx,axis=0))
ydata_death = np.array(np.sum(yyy,axis=0))
ydata_rec = np.array(np.sum(zzz,axis=0))
ydata_inf = ydata-ydata_rec-ydata_death
xdata = pd.to_numeric(range(ydata.shape[0]))
today = len(xdata)
def minimizer(R0,t1=today-time_window,t2=today):
#true data
ydata_inf_2=np.array(ydata_inf[t1:t2])
xdata_2=np.arange(0,len(ydata_inf_2))
#model
fin_result=time_evo(N,0.07*R0,0.07,I0=ydata_inf_2[0])
i_vec=fin_result[2]
i_vec_2=i_vec[0:len(xdata_2)]
#average error
error=np.sum(np.abs(ydata_inf_2-i_vec_2)/ydata_inf_2)*100
return error
minimizer_vec=np.vectorize(minimizer)
xgrid = np.arange(0.1,1.3,0.01)
ygrid = minimizer_vec(xgrid)
r0_ideal = round(xgrid[np.argmin(ygrid)],2)
print('r0_ideal for the '+fit_region+': ',r0_ideal)
ydata_inf_2 = np.array(ydata_inf[today-time_window:today])
xdata_2 = np.arange(0,len(ydata_inf_2))
print('ydata_inf.shape '+fit_region+': ',ydata_inf.shape)
print('ydata_inf for the '+fit_region+': ',ydata_inf)
print('ydata_inf_2 for the '+fit_region+': ',ydata_inf_2)
fin_result = time_evo(N,0.07*r0_ideal,0.07,I0=ydata_inf_2[0])
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
def minimizer_gen(t1,t2):
xgrid=np.arange(0.1,7.2,0.1)
ygrid=minimizer_vec(xgrid,t1=t1,t2=t2)
r0_ideal=round(xgrid[np.argmin(ygrid)],2)
return r0_ideal
r0_time=[]
for i in range(today-(time_window-1)):
min_val=minimizer_gen(i,i+time_window)
r0_time.append(min_val)
print(i,min_val)
if fit_region =='nord':
r0_time_nord=np.array(r0_time)
elif fit_region =='centro':
r0_time_centro=np.array(r0_time)
elif fit_region =='sud':
r0_time_sud=np.array(r0_time)
elif fit_region =='isole':
r0_time_isole=np.array(r0_time)
elif fit_region =='nolombardia':
r0_time_nolombardia=np.array(r0_time)
elif fit_region =='lombardia':
r0_time_lombardia=np.array(r0_time)
r0_time.clear()
df_r0=pd.DataFrame(pd.to_datetime(np.arange(len(r0_time_nord)),unit='D',origin='2020-02-28'))
df_r0['nord'] = r0_time_nord
df_r0['centro'] = r0_time_centro
df_r0['sud'] = r0_time_sud
df_r0['isole'] = r0_time_isole
#df_r0['nolombardia'] = r0_time_nolombardia
#df_r0['lombardia'] = r0_time_lombardia
df_r0.columns = ['Data','nord','centro','sud','isole']#,'nolombardia','lombardia']
df_r0.to_csv('output/r0_regions_work.csv',index=False)
| 0.343342 | 0.834238 |
```
import Ouzo_Graph_Tools as ouzo_graphs
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate, stats
def extract_plates(path, sheet_list):
"""Will return a sublist of plates absorbance information in dataframe format
Must ensure that excel sheet has only the samples made in the csv plan as will cause errors downstream."""
plate_dfs = []
for sheet_name in sheet_list:
plate_df = pd.read_excel(path, sheet_name = sheet_name).T
plate_dfs.append(plate_df)
return plate_dfs
def merge_wavelength_dfs(df_list):
merge_list = []
for i, df in enumerate(df_list):
if i == 0:
df = df
else:
df = df.drop(['Wavelength'])
merge_list.append(df)
return pd.concat(merge_list)
def baseline_correction(df_samples, baseline_series):
"""Given the series iloc of a the blank, subtracts the value at every wavelength of blank at resp. wavelength.
Simple subtraction blanking."""
new_df_con = []
for key, row in df_samples.iterrows():
if key == 'Wavelength':
wavelengths = row
new_df_con.append(wavelengths)
else:
series = row
corrected = series.subtract(baseline_series)
new_df_con.append(corrected)
baseline_corrected_df = pd.concat(new_df_con, axis = 1).T
baseline_corrected_df.index = df_samples[0].index
return baseline_corrected_df
def add_abs_to_sample_info(sample_info_df, abs_df):
wavelengths = list(abs_df.loc['Wavelength'])
wavelengths_names = [str(wavelength)+'nm' for wavelength in wavelengths]
abs_df.columns = wavelengths_names
sample_info_df.reset_index(drop=True, inplace=True)
abs_df.reset_index(drop=True, inplace=True)
combined_df = pd.concat([sample_info, abs_df], axis = 1)
return combined_df
def remove_visual_outliers(x, y, z, z_score_threshold = 3):
"""This is not a to remove statistical outliers, only to remove values which present. Outliers will be
removed based on the data of z and subsequently from x and y given the same indexes of entries. Inputs must be nparrays"""
z_array = np.asarray(z)
z_scores = np.abs(stats.zscore(np.asarray(z)))
threshold = z_score_threshold
index_to_remove = np.where(z_scores > threshold)[0] # must be in ascending order
x = x.copy()
y = y.copy()
z = z.copy()
for index in reversed(index_to_remove): # reveresed to perserve index
del x[index]
del y[index]
del z[index]
xyz_array = [x,y,z]
return xyz_array
# what happens with overflow or undefined data???
# Load all things needed in this case
sample_info = pd.read_csv(r"C:\Users\Edwin\Desktop\Ouzo Runs\11_17_2020\experiment_info") # make it find the blank position from sample_info
plate_names = ['Sheet1','Sheet2', 'Sheet3']
plate_dfs = extract_plates(r"C:\Users\Edwin\Desktop\Ouzo Runs\11_17_2020\11_17_2020_Plate123.xlsx", plate_names) # can edit/remove wells accidently measured etc, but really should be done at excel level
merged_df = merge_wavelength_dfs(plate_dfs)
sample_info
# baseline and combine
baseline_series = merged_df.iloc[-1]
merged_baselined_df = baseline_correction(merged_df, baseline_series)
combined_df = add_abs_to_sample_info(sample_info, merged_df)
# extract data by dict method of df calling series
wavelength = '400.0nm'
x_name = combined_df['Component 4 wtf'][0]
y_name = combined_df['Component 3 wtf'][0]
x = [float(i) for i in combined_df['Component 4 wtf'][1:].tolist()][:-1] #ethanol,
y = [float(i) for i in combined_df['Component 3 wtf'][1:].tolist()][:-1] # pfh
z = [float(i) for i in combined_df[wavelength][1:].tolist()][:-1]
combined_restricted_xyz = [x,y,z]
modi = remove_visual_outliers(x,y,z,2) # this should only be used to find the new vmin and vmax but not to exclude
start = 60
stop = 90
ethanol = x[start:stop]
pfh = y[start:stop]
abs_ = z[start:stop]
pfh
fig, ax = plt.subplots(1)
ax.scatter(ethanol,abs_)
ax.set_xlabel('Ethanol wtf')
ax.set_ylabel('AU')
.scatter(ethanol,abs_)
print(len(modi[2]))
plt.scatter(range(len(modi[2])), modi[2])
min_x = min(combined_restricted_xyz[0])
max_x = max(combined_restricted_xyz[0])
min_y = min(combined_restricted_xyz[1])
max_y = max(combined_restricted_xyz[1])
min_z = min(modi[2])
max_z = max(modi[2])
# print(max_z)
### First make the xx,yy coordinates that the interpolation will span
x_space = np.linspace(min_x,max_x,100)
y_space = np.linspace(min_y,max_y,100)
xx, yy = np.meshgrid(x_space,y_space)
### Next make tuple the x,y data so it can be fed into interpolation method to make the interpolation mesh
cartcoord = list(zip(combined_restricted_xyz[0],combined_restricted_xyz[1]))
interp = interpolate.LinearNDInterpolator(cartcoord, combined_restricted_xyz[2])
Z0 = interp(xx,yy)
cartcoord_v = list(zip(modi[0],modi[1]))
interp_v= interpolate.LinearNDInterpolator(cartcoord_v, modi[2])
Z0_v = interp(xx,yy) # does not work as still interpolates far out and makes a v_max that is the same as z0??
# Finally, create the plot.
# Note: Mappable for the interpolation is independent of the scatter colormap (which is created automatically), they are the same when you do not restrict either.
# Restriction is defined once you restrict the x/y space of the mesh to a space smaller than that of the scatter plot.
fig, ax = plt.subplots()
vmin = np.nanmin(Z0_v) # so this is will ensure all the interpolations will fit on the colorbar
vmax = np.nanmax(Z0_v)
# norm=colors.Normalize(vmin=min_z, vmax=max_z) # but you can manually use your dat which is the most import to plot to find the range excl outliers
norm=colors.Normalize(vmin=min_z, vmax=max_z)
mappable = ax.pcolormesh(xx, yy, Z0, norm=norm)
# mappable.set_clim(vmin=vmin, vmax=vmax)
# mappable.set_clim(vmin,vmax)
ax.scatter(combined_restricted_xyz[0], combined_restricted_xyz[1],
c = combined_restricted_xyz[2], norm=norm, cmap = mpl.cm.viridis, edgecolors='k')
cbar = plt.colorbar(mappable)
cbar_txt = "AU at wavelength " + str(wavelength) + 'nm'
cbar.set_label(cbar_txt, labelpad = 10)
# ax.set_xlim(xmin =0, xmax = 0.0006) # simple ratios for easy viewing
ax.set_ylim([0.0005, 0.003])
ax.set_xlim([0.38,0.610])
# ax.set_yticks(np.arange(-0.0001, 0.00035, 0.0001))
# ax.set_yticks(np.arange(0, 0.0007, 0.00005))
# ax.set_xticks(np.arange(0, 0.00035, 0.001))
# axacx.yticks(np.arange(min, max, step))
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
# ax.set_xlim([-0.0001,0.0006])
# ax.text(0.4,0.002, "vmin = " + str(vmin) + '\nvmax = '+ str(vmax))
# ax.text(0.4,0.002,'*Negative AU values due to \n instrument resolution of 0.001 AU')
# ax.set_ylim([0,0.05])
remove_index[0]
zmin = min(z)
zmax = max(z)
# so knowing this informaiton or even using the interpolation infomation you could look at the frequncy of certain values and if low enough could be deteremiend to push out in the the color bar extreme to imprve visuals
# so if you have 100 samples between 0.1 and 0.2 and 4 samples between 0.6 and 0.65 it would be fine to have vmin/vmax = 0.1 to 0.2
# so can use clim, but the big issue is that vmin and vmax are nan
import seaborn as sns
sns.boxplot(x=z)
```
|
github_jupyter
|
import Ouzo_Graph_Tools as ouzo_graphs
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate, stats
def extract_plates(path, sheet_list):
"""Will return a sublist of plates absorbance information in dataframe format
Must ensure that excel sheet has only the samples made in the csv plan as will cause errors downstream."""
plate_dfs = []
for sheet_name in sheet_list:
plate_df = pd.read_excel(path, sheet_name = sheet_name).T
plate_dfs.append(plate_df)
return plate_dfs
def merge_wavelength_dfs(df_list):
merge_list = []
for i, df in enumerate(df_list):
if i == 0:
df = df
else:
df = df.drop(['Wavelength'])
merge_list.append(df)
return pd.concat(merge_list)
def baseline_correction(df_samples, baseline_series):
"""Given the series iloc of a the blank, subtracts the value at every wavelength of blank at resp. wavelength.
Simple subtraction blanking."""
new_df_con = []
for key, row in df_samples.iterrows():
if key == 'Wavelength':
wavelengths = row
new_df_con.append(wavelengths)
else:
series = row
corrected = series.subtract(baseline_series)
new_df_con.append(corrected)
baseline_corrected_df = pd.concat(new_df_con, axis = 1).T
baseline_corrected_df.index = df_samples[0].index
return baseline_corrected_df
def add_abs_to_sample_info(sample_info_df, abs_df):
wavelengths = list(abs_df.loc['Wavelength'])
wavelengths_names = [str(wavelength)+'nm' for wavelength in wavelengths]
abs_df.columns = wavelengths_names
sample_info_df.reset_index(drop=True, inplace=True)
abs_df.reset_index(drop=True, inplace=True)
combined_df = pd.concat([sample_info, abs_df], axis = 1)
return combined_df
def remove_visual_outliers(x, y, z, z_score_threshold = 3):
"""This is not a to remove statistical outliers, only to remove values which present. Outliers will be
removed based on the data of z and subsequently from x and y given the same indexes of entries. Inputs must be nparrays"""
z_array = np.asarray(z)
z_scores = np.abs(stats.zscore(np.asarray(z)))
threshold = z_score_threshold
index_to_remove = np.where(z_scores > threshold)[0] # must be in ascending order
x = x.copy()
y = y.copy()
z = z.copy()
for index in reversed(index_to_remove): # reveresed to perserve index
del x[index]
del y[index]
del z[index]
xyz_array = [x,y,z]
return xyz_array
# what happens with overflow or undefined data???
# Load all things needed in this case
sample_info = pd.read_csv(r"C:\Users\Edwin\Desktop\Ouzo Runs\11_17_2020\experiment_info") # make it find the blank position from sample_info
plate_names = ['Sheet1','Sheet2', 'Sheet3']
plate_dfs = extract_plates(r"C:\Users\Edwin\Desktop\Ouzo Runs\11_17_2020\11_17_2020_Plate123.xlsx", plate_names) # can edit/remove wells accidently measured etc, but really should be done at excel level
merged_df = merge_wavelength_dfs(plate_dfs)
sample_info
# baseline and combine
baseline_series = merged_df.iloc[-1]
merged_baselined_df = baseline_correction(merged_df, baseline_series)
combined_df = add_abs_to_sample_info(sample_info, merged_df)
# extract data by dict method of df calling series
wavelength = '400.0nm'
x_name = combined_df['Component 4 wtf'][0]
y_name = combined_df['Component 3 wtf'][0]
x = [float(i) for i in combined_df['Component 4 wtf'][1:].tolist()][:-1] #ethanol,
y = [float(i) for i in combined_df['Component 3 wtf'][1:].tolist()][:-1] # pfh
z = [float(i) for i in combined_df[wavelength][1:].tolist()][:-1]
combined_restricted_xyz = [x,y,z]
modi = remove_visual_outliers(x,y,z,2) # this should only be used to find the new vmin and vmax but not to exclude
start = 60
stop = 90
ethanol = x[start:stop]
pfh = y[start:stop]
abs_ = z[start:stop]
pfh
fig, ax = plt.subplots(1)
ax.scatter(ethanol,abs_)
ax.set_xlabel('Ethanol wtf')
ax.set_ylabel('AU')
.scatter(ethanol,abs_)
print(len(modi[2]))
plt.scatter(range(len(modi[2])), modi[2])
min_x = min(combined_restricted_xyz[0])
max_x = max(combined_restricted_xyz[0])
min_y = min(combined_restricted_xyz[1])
max_y = max(combined_restricted_xyz[1])
min_z = min(modi[2])
max_z = max(modi[2])
# print(max_z)
### First make the xx,yy coordinates that the interpolation will span
x_space = np.linspace(min_x,max_x,100)
y_space = np.linspace(min_y,max_y,100)
xx, yy = np.meshgrid(x_space,y_space)
### Next make tuple the x,y data so it can be fed into interpolation method to make the interpolation mesh
cartcoord = list(zip(combined_restricted_xyz[0],combined_restricted_xyz[1]))
interp = interpolate.LinearNDInterpolator(cartcoord, combined_restricted_xyz[2])
Z0 = interp(xx,yy)
cartcoord_v = list(zip(modi[0],modi[1]))
interp_v= interpolate.LinearNDInterpolator(cartcoord_v, modi[2])
Z0_v = interp(xx,yy) # does not work as still interpolates far out and makes a v_max that is the same as z0??
# Finally, create the plot.
# Note: Mappable for the interpolation is independent of the scatter colormap (which is created automatically), they are the same when you do not restrict either.
# Restriction is defined once you restrict the x/y space of the mesh to a space smaller than that of the scatter plot.
fig, ax = plt.subplots()
vmin = np.nanmin(Z0_v) # so this is will ensure all the interpolations will fit on the colorbar
vmax = np.nanmax(Z0_v)
# norm=colors.Normalize(vmin=min_z, vmax=max_z) # but you can manually use your dat which is the most import to plot to find the range excl outliers
norm=colors.Normalize(vmin=min_z, vmax=max_z)
mappable = ax.pcolormesh(xx, yy, Z0, norm=norm)
# mappable.set_clim(vmin=vmin, vmax=vmax)
# mappable.set_clim(vmin,vmax)
ax.scatter(combined_restricted_xyz[0], combined_restricted_xyz[1],
c = combined_restricted_xyz[2], norm=norm, cmap = mpl.cm.viridis, edgecolors='k')
cbar = plt.colorbar(mappable)
cbar_txt = "AU at wavelength " + str(wavelength) + 'nm'
cbar.set_label(cbar_txt, labelpad = 10)
# ax.set_xlim(xmin =0, xmax = 0.0006) # simple ratios for easy viewing
ax.set_ylim([0.0005, 0.003])
ax.set_xlim([0.38,0.610])
# ax.set_yticks(np.arange(-0.0001, 0.00035, 0.0001))
# ax.set_yticks(np.arange(0, 0.0007, 0.00005))
# ax.set_xticks(np.arange(0, 0.00035, 0.001))
# axacx.yticks(np.arange(min, max, step))
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
# ax.set_xlim([-0.0001,0.0006])
# ax.text(0.4,0.002, "vmin = " + str(vmin) + '\nvmax = '+ str(vmax))
# ax.text(0.4,0.002,'*Negative AU values due to \n instrument resolution of 0.001 AU')
# ax.set_ylim([0,0.05])
remove_index[0]
zmin = min(z)
zmax = max(z)
# so knowing this informaiton or even using the interpolation infomation you could look at the frequncy of certain values and if low enough could be deteremiend to push out in the the color bar extreme to imprve visuals
# so if you have 100 samples between 0.1 and 0.2 and 4 samples between 0.6 and 0.65 it would be fine to have vmin/vmax = 0.1 to 0.2
# so can use clim, but the big issue is that vmin and vmax are nan
import seaborn as sns
sns.boxplot(x=z)
| 0.629091 | 0.618377 |
```
from argotools.dataFormatter import Load
from argotools.visualize import InputVis
from argotools.visualize import OutputVis
from argotools.experiment import ARGO_lrmse
from argotools.forecastlib.argo_methods_ import *
from argotools.forecastlib.functions import *
from sklearn.linear_model import LassoCV
path_to_ili = '/Users/leonardo/Desktop/UNIVERSIDAD/SEMESTRE4/dataAnalytics/final_project/TARGET_DATA/ALL_DENGUE.csv'
path_to_GC = '/Users/leonardo/Desktop/UNIVERSIDAD/SEMESTRE4/dataAnalytics/final_project/FEATURES/filtered_words_nodups.csv'
country_codes = [ 'Singapore']
study_period = ['2012-01-01', '2017-03-13']
```
## Initializing object
Load is the main class from dataFormatter. It contains several functions that automate the loading, formatting and basic preprocessing of data prior to any more serious procedures. To generate an object, you just call its constructor:
```
data_object = Load(start_period=study_period[0], end_period=study_period[1], ids=country_codes)
```
## Adding features and target data
In its inner structure, the Load object keeps target and feature data in separate dictionaries. There are several functions to add data into the load object, depending on which is your case. The most commonly used are "add_target" and "add_features_customSource". After learning how to use these two, it is fairly easy to understand the other functions available.
We'll add target data from a formatted source (influenza cases from Flunet) and features data from a custom source (Google Correlate data).
NOTE: In this example we use GC data directly from Google Correlate. This data has not been properly filtered, and many words that are correlated with the term influenza may not be useful to fit a model. For real model-fitting, please make sure to properly preprocess your data.
```
for country in country_codes:
data_object.add_target(id_=country, path_to_file=path_to_ili)
data_object.add_features_customSource(id_=country, path_to_file=path_to_GC, source='standard', overwrite=False, verbose=False, autoanswer=None)
```
## Different indices
Note that the target and feature data have different indices (Load pops-up a message if this happens). This is a problem that's frequent when using different data sources. It is greatly recommended to set all the indices alike to avoid any confusion from pandas in the following phases of data. In this case, changing the indices is fairly easy because our data sources do not have missing rows (they're both 365 rows) and the dates correspond only to 1 day difference. For other problems, it might not be the case, and it is a good idea to fix this differences prior to using Load.
In this example, we'll use the target's index as our standard and overwrite the features indices using pandas function "set_index".
```
# Fixing indices
for country in country_codes:
data_object.features[country].set_index(data_object.target[country].index, inplace=True)
study_period[0] = '2012-01-02' # We changed the dates to the exact ones because datavis does not accept approximate dates.
study_period[1] = '2017-03-13'
```
## Fitting a linear regression using argotools
After you've looked at your data and performed the neccessary pre-processing steps, it is time to fit a model. We use the experiment library to do this.
```
mod = LassoCV(cv=10, fit_intercept=True, n_alphas=1000, max_iter=20000, tol=.001, normalize=True,\
positive=False)
model_dict = {
'ARGO_filtered': [lasso_family, preproc_rmv_values, 5, None, False, mod],
'ARGO': [lasso_family, preproc_rmv_values, None, None, False, mod],
'AR': [lasso_family, preproc_rmv_values, None, None, False, mod]
}
argo_tester = ARGO_lrmse(data=data_object, model_dict=model_dict, output_name='NORM_TEST', training_window= 'static', \
training_window_size=104, horizon=1, feature_preprocessing='zscore',\
ar_model=52, load_folder = None, ar_column_filtering=True, out_of_sample_rmse=False)
argo_tester.run(period_start='2015-01-05', period_end='2016-12-26', verbose=False, cont=False) #'2016-12-26'
```
the experiment library fits a multivariate linear model using the ARGO methodology. In timeseries prediction, data becomes available with time (every week, in this case), therefore, it is useful to update and recalibrate your prediction model everytime a new prediction week is coming. This library helps with the recalibration process for every model, in every location. After finishing with the fitting / prediction process, it writes out the model predictions into a csv file in the folder structure it created. the library also has the advantage that it keeps track of the model coefficients in this recalibration process, giving you the possibility of analyzing how the features impact change within time.
## Visualizing the output
Lets take a look at the data that we have created through the ARGO_lrmse class.
```
results_df = pd.read_csv('/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/preds.csv', index_col=0)
print(results_df)
```
The csv file contains the predictions for each model and for each week. There are several things we can do with this results. We'll Output vis to benchmark and visualize them.
## Computing metrics from the results
We'll start fist with generating the metrics. We initialize the class object by inputting the results folder name (since we didn't we give it any particular name, the experiment object output a generic name like "ARGO_experiment") and the location identifiers.
To perform the metric computation, we use the "group_compute_metrics" function, which loads every location results and computes the metrics specified in the "which_metrics" variable. There are several metrics already available within the class, and it is fairly use to generate your own metric to work with this function.
To compute your metrics you only need the following:
1.- The intervals where to compute the metrics. the intervals are input for each ID in terms of a dictionary. Each key in the dictionary is an ID and it contains a list of tuples indicating the intervals: [('YYYY-MM-DD', 'YYYY-MM-DD')...]
2.- The name of the intervals, in the same format of a dict.
3.- The metrics you want to use (they should be available within the class)
```
results_visualizer = OutputVis('NORM_TEST', ids=country_codes)
# To compute metrics, we need to set the intervals where to compute the metrics, here we do it yearly and as whole period
start_interval = [ '2015-01-04', '2016-01-03', '2015-01-04'] #'2013-01-05'
end_interval = [ '2015-12-28', '2016-12-25', '2016-12-25']
period_labels = [ 'Y2015', 'Y2016', 'ALL_YEARS']
i = list(zip(start_interval, end_interval))
intervals = dict( zip(country_codes,[i]*len(country_codes)) )
interval_labels = dict( zip(country_codes, [period_labels]*len(country_codes)))
results_visualizer.group_compute_metrics(intervals, interval_labels, which_metrics=['PEARSON', 'RMSE', 'NRMSE'], write_to_overview=True)
metrics_example = pd.read_csv('/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/metrics.csv')
print('This are the metrics for Argentina: \n',metrics_example)
```
After performing the metrics, we'll do a series of visualizations for the models. First we setup a set of style dictionaries, we'll give each model a color and a transparency to keep all the plots with the same style. After that, we call out a series of functions that will do the work for us.
```
color_dict = {
'AR':'blue',
'ARGO':'r',
'ARGO_filtered':'blueviolet',
'ILI':'black'
}
alpha_dict = {
'AR': .8,
'ARGO':.8,
'ARGO_filtered':.8,
'ILI':.8
}
results_visualizer.plot_SEC(series_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/preds.csv',\
coeff_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/ARGO_coefficients.csv', \
target_name='ILI', models=['ARGO', 'AR'],\
color_dict=color_dict, start_period=None, end_period=None, alpha_dict=alpha_dict,\
output_filename='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/SEC_plot', ext='png', mode='save', n_coeff=20, \
font_path='/System/Library/Fonts/PingFang.ttc', vmin=-50, vmax=50)
#results_visualizer.plot_coefficients(coefficients_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/ARGO_SINGAPORE/Singapore/ARGO_coefficients.csv',\
# mode='save', output_filename='/Users/leonardo/Desktop/flu-code/argotools-pkg/ARGO_SINGAPORE/Singapore/coeff_plot')
mods = ['AR', 'ARGO']
results_visualizer.group_barplot_metric(ids=country_codes, metric='NRMSE', period='ALL_YEARS',\
models=mods, color_dict=color_dict,\
alpha_dict=alpha_dict, metric_filename='metrics.csv',\
bar_separation_multiplier=1.5, mode='save', output_filename='NRMSE_ALLYEARS', ext='png')
results_visualizer.season_analysis(country_codes, [ 'Y2015', 'Y2016'], mods, folder_main=None, metrics=['PEARSON', 'NRMSE'], filename='metrics_condensed.csv', output_filename='season_analysis',\
color_dict=None, alpha_dict=None, mode='save', ext='png')
for country_code in country_codes:
results_visualizer.plot_series(id_=country_code,series_names=['AR','ARGO','ILI'], color_dict=color_dict, alpha_dict=alpha_dict,\
add_weekly_winner=True, winner_models=['AR', 'ARGO'], mode='save')
```
"group_barplot_metric" produces a horizontal barplot that lets you compare the performance of the models you fit based on a given metric (In this example, we use the 'NRMSE' metric, which is an RMSE scaled by the target's euclidean norm).
'season_analysis' provides two visualizations: the first provides a violin plot and box plot of the metrics. The distribution show by the violin plot gives us an idea of where is the most data concentrated in the range spanned by the box plot. The second visualization is a heatmap that contains the numer of times each model gets ranked in the first, second, ... nth place for each interval of time you ask the function look at (For example, if you look at the yearly performance (2013, 2014, 2015, 2016) for these three models (ARGO), you'll have 4 first places, 4 second places and 4 third places). The models which have the "best" performance would more often have the first and second places, thus having a stronger shade of red on the upper squares.
'plot_series' is a function that works individually for each location of study (Here we are just showing 1). the function just makes a quick plot of the model and the predictions based on the color scheme and transparency scheme we provided through our dictionaries. The model also gives some extra information below the timeseries plot. The rectangular heatmap tells you which model (identified by the colorbar) had the least regular error. For example, we can see that, in Mexico's 2015 flu unusual outbreak, the autoreggressive model dominated by having less error for almost the whole outbreak.



We have performed basic EDA, pre-processing, prototyping and benchmarking by only writing some lines of code. Moreover, our data has an organized structure and is easily compatible with other libraries through the CSV files. Hopefully you'll find some value in this library.
|
github_jupyter
|
from argotools.dataFormatter import Load
from argotools.visualize import InputVis
from argotools.visualize import OutputVis
from argotools.experiment import ARGO_lrmse
from argotools.forecastlib.argo_methods_ import *
from argotools.forecastlib.functions import *
from sklearn.linear_model import LassoCV
path_to_ili = '/Users/leonardo/Desktop/UNIVERSIDAD/SEMESTRE4/dataAnalytics/final_project/TARGET_DATA/ALL_DENGUE.csv'
path_to_GC = '/Users/leonardo/Desktop/UNIVERSIDAD/SEMESTRE4/dataAnalytics/final_project/FEATURES/filtered_words_nodups.csv'
country_codes = [ 'Singapore']
study_period = ['2012-01-01', '2017-03-13']
data_object = Load(start_period=study_period[0], end_period=study_period[1], ids=country_codes)
for country in country_codes:
data_object.add_target(id_=country, path_to_file=path_to_ili)
data_object.add_features_customSource(id_=country, path_to_file=path_to_GC, source='standard', overwrite=False, verbose=False, autoanswer=None)
# Fixing indices
for country in country_codes:
data_object.features[country].set_index(data_object.target[country].index, inplace=True)
study_period[0] = '2012-01-02' # We changed the dates to the exact ones because datavis does not accept approximate dates.
study_period[1] = '2017-03-13'
mod = LassoCV(cv=10, fit_intercept=True, n_alphas=1000, max_iter=20000, tol=.001, normalize=True,\
positive=False)
model_dict = {
'ARGO_filtered': [lasso_family, preproc_rmv_values, 5, None, False, mod],
'ARGO': [lasso_family, preproc_rmv_values, None, None, False, mod],
'AR': [lasso_family, preproc_rmv_values, None, None, False, mod]
}
argo_tester = ARGO_lrmse(data=data_object, model_dict=model_dict, output_name='NORM_TEST', training_window= 'static', \
training_window_size=104, horizon=1, feature_preprocessing='zscore',\
ar_model=52, load_folder = None, ar_column_filtering=True, out_of_sample_rmse=False)
argo_tester.run(period_start='2015-01-05', period_end='2016-12-26', verbose=False, cont=False) #'2016-12-26'
results_df = pd.read_csv('/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/preds.csv', index_col=0)
print(results_df)
results_visualizer = OutputVis('NORM_TEST', ids=country_codes)
# To compute metrics, we need to set the intervals where to compute the metrics, here we do it yearly and as whole period
start_interval = [ '2015-01-04', '2016-01-03', '2015-01-04'] #'2013-01-05'
end_interval = [ '2015-12-28', '2016-12-25', '2016-12-25']
period_labels = [ 'Y2015', 'Y2016', 'ALL_YEARS']
i = list(zip(start_interval, end_interval))
intervals = dict( zip(country_codes,[i]*len(country_codes)) )
interval_labels = dict( zip(country_codes, [period_labels]*len(country_codes)))
results_visualizer.group_compute_metrics(intervals, interval_labels, which_metrics=['PEARSON', 'RMSE', 'NRMSE'], write_to_overview=True)
metrics_example = pd.read_csv('/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/metrics.csv')
print('This are the metrics for Argentina: \n',metrics_example)
color_dict = {
'AR':'blue',
'ARGO':'r',
'ARGO_filtered':'blueviolet',
'ILI':'black'
}
alpha_dict = {
'AR': .8,
'ARGO':.8,
'ARGO_filtered':.8,
'ILI':.8
}
results_visualizer.plot_SEC(series_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/preds.csv',\
coeff_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/ARGO_coefficients.csv', \
target_name='ILI', models=['ARGO', 'AR'],\
color_dict=color_dict, start_period=None, end_period=None, alpha_dict=alpha_dict,\
output_filename='/Users/leonardo/Desktop/flu-code/argotools-pkg/NORM_TEST/Singapore/SEC_plot', ext='png', mode='save', n_coeff=20, \
font_path='/System/Library/Fonts/PingFang.ttc', vmin=-50, vmax=50)
#results_visualizer.plot_coefficients(coefficients_filepath='/Users/leonardo/Desktop/flu-code/argotools-pkg/ARGO_SINGAPORE/Singapore/ARGO_coefficients.csv',\
# mode='save', output_filename='/Users/leonardo/Desktop/flu-code/argotools-pkg/ARGO_SINGAPORE/Singapore/coeff_plot')
mods = ['AR', 'ARGO']
results_visualizer.group_barplot_metric(ids=country_codes, metric='NRMSE', period='ALL_YEARS',\
models=mods, color_dict=color_dict,\
alpha_dict=alpha_dict, metric_filename='metrics.csv',\
bar_separation_multiplier=1.5, mode='save', output_filename='NRMSE_ALLYEARS', ext='png')
results_visualizer.season_analysis(country_codes, [ 'Y2015', 'Y2016'], mods, folder_main=None, metrics=['PEARSON', 'NRMSE'], filename='metrics_condensed.csv', output_filename='season_analysis',\
color_dict=None, alpha_dict=None, mode='save', ext='png')
for country_code in country_codes:
results_visualizer.plot_series(id_=country_code,series_names=['AR','ARGO','ILI'], color_dict=color_dict, alpha_dict=alpha_dict,\
add_weekly_winner=True, winner_models=['AR', 'ARGO'], mode='save')
| 0.487551 | 0.771801 |
# NearMiss
This procedures aims to select samples that are somewhat similar to the minority class, using 1 of three alternative procedures:
1) Select observations closer to the closest minority class
2) Select observations closer to the farthest minority class
3) Select observations furthest from their nearest neighbours
===
This procedure will select as many obserations from the majority class, as observations from the minority class are present in the dataset.
===
**Final Data size**: 2 x minority class
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
```
## Create data
We will create data where the classes have different degrees of separateness.
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html
```
def make_data(sep):
# returns arrays
X, y = make_classification(n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,# how separate the classes are
random_state=1)
# trasform arrays into pandas df and series
X = pd.DataFrame(X, columns =['varA', 'varB'])
y = pd.Series(y)
return X, y
```
## Undersample with NearMiss
[NearMiss](https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.NearMiss.html)
### Well separated classes
```
# create data
X, y = make_data(sep=2)
# set up Near Miss, first method
# that is, version = 1
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm1.fit_resample(X, y)
# size of original data
X.shape, y.shape
# size of undersampled data
# majority class is undersampled till it matches the minority class
X_resampled.shape, y_resampled.shape
y.value_counts()
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
Note how the observations closest to the minority class were retained in the dataset.
**Now let's try the second method**
```
# version = 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm2.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
The method retains those samples who are closest, to the furthest observations of the minority class.
```
# again majority class is undersampled till
# the same number of minority observations
X_resampled.shape
# version = 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm3.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
### Partially separated classes
Let's repeat the same exercise in data where the classes are not so clearly separated.
```
# create data
X, y = make_data(sep=0)
# set up edited nearest neighbour transformer
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm1.fit_resample(X, y)
# original data
X.shape, y.shape
# undersampled data
X_resampled.shape, y_resampled.shape
```
As the classes are not so clearly distinguished, more samples were removed from the dataset.
```
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# version 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm2.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# version 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm3.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
## NearMiss
### Real data - Performance comparison
Does it work well with real datasets?
Well, it will depend on the dataset, so we need to try and compare the models built on the whole dataset, and that built on the undersampled dataset.
```
# load data
# only a few observations to speed the computaton
data = pd.read_csv('../kdd2004.csv').sample(10000)
data.head()
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
# NearMiss version 1
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm1, y_nm1 = nm1.fit_resample(X_train, y_train)
# NearMiss version 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm2, y_nm2 = nm2.fit_resample(X_train, y_train)
# NearMiss version 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm3, y_nm3 = nm3.fit_resample(X_train, y_train)
# compare shapes
X_train.shape, X_nm1.shape, X_nm2.shape, X_nm3.shape
```
## Machine learning performance comparison
Let's compare model performance with and without undersampling.
```
# function to train random forests and evaluate the performance
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=39, max_depth=4)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = rf.predict_proba(X_test)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# evaluate performance of algorithm built
# using imbalanced dataset
run_randomForests(X_train,
X_test,
y_train,
y_test)
# evaluate performance of algorithm built
# using enn undersampled dataset
run_randomForests(X_nm1,
X_test,
y_nm1,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_nm2,
X_test,
y_nm2,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_nm3,
X_test,
y_nm3,
y_test)
```
Performance does not improve in this case, utilising this undersampling procedure.
**HOMEWORK**
Try NearMiss in other datasets available in the package imbalanced-learn
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NearMiss
def make_data(sep):
# returns arrays
X, y = make_classification(n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,# how separate the classes are
random_state=1)
# trasform arrays into pandas df and series
X = pd.DataFrame(X, columns =['varA', 'varB'])
y = pd.Series(y)
return X, y
# create data
X, y = make_data(sep=2)
# set up Near Miss, first method
# that is, version = 1
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm1.fit_resample(X, y)
# size of original data
X.shape, y.shape
# size of undersampled data
# majority class is undersampled till it matches the minority class
X_resampled.shape, y_resampled.shape
y.value_counts()
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# version = 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm2.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# again majority class is undersampled till
# the same number of minority observations
X_resampled.shape
# version = 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm3.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# create data
X, y = make_data(sep=0)
# set up edited nearest neighbour transformer
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm1.fit_resample(X, y)
# original data
X.shape, y.shape
# undersampled data
X_resampled.shape, y_resampled.shape
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# version 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm2.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# version 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_resampled, y_resampled = nm3.fit_resample(X, y)
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# load data
# only a few observations to speed the computaton
data = pd.read_csv('../kdd2004.csv').sample(10000)
data.head()
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
# NearMiss version 1
nm1 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=1,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm1, y_nm1 = nm1.fit_resample(X_train, y_train)
# NearMiss version 2
nm2 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=2,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm2, y_nm2 = nm2.fit_resample(X_train, y_train)
# NearMiss version 3
nm3 = NearMiss(
sampling_strategy='auto', # undersamples only the majority class
version=3,
n_neighbors=3,
n_jobs=4) # I have 4 cores in my laptop
X_nm3, y_nm3 = nm3.fit_resample(X_train, y_train)
# compare shapes
X_train.shape, X_nm1.shape, X_nm2.shape, X_nm3.shape
# function to train random forests and evaluate the performance
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=39, max_depth=4)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = rf.predict_proba(X_test)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# evaluate performance of algorithm built
# using imbalanced dataset
run_randomForests(X_train,
X_test,
y_train,
y_test)
# evaluate performance of algorithm built
# using enn undersampled dataset
run_randomForests(X_nm1,
X_test,
y_nm1,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_nm2,
X_test,
y_nm2,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_nm3,
X_test,
y_nm3,
y_test)
| 0.727879 | 0.968768 |
```
import ROOT
import ostap.fixes.fixes
from ostap.core.core import cpp, Ostap
from ostap.core.core import pwd, cwd, ROOTCWD
from ostap.core.core import rootID, funcID, funID, fID, histoID, hID, dsID
from ostap.core.core import VE
from ostap.histos.histos import h1_axis, h2_axes, h3_axes
from ostap.histos.graphs import makeGraph, hToGraph, hToGraph2, hToGraph3, lw_graph
import ostap.trees.trees
import ostap.trees.cuts
import ostap.histos.param
import ostap.histos.compare
import ostap.io.root_file
import ostap.math.models
import ostap.fitting.roofit
import ostap.fitting.models as Models
canv = ROOT.TCanvas("canv","canv",900,450)
rfile = ROOT.TFile("rad/new.root","READ")
tfile = ROOT.TFile("two/new.root","READ")
ds = rfile["tree"]
dt = tfile["tree"]
from math import sqrt
my_events = []
my_events2 = []
for ev in ds:
lCSC = sqrt( ev.xCSC**2 + ev.yCSC**2 )
zTPC = ev.zpos+2.19+ROOT.gRandom.Gaus(0,0.2576)
Treco = ev.Tp+ROOT.gRandom.Gaus(0,0.05)
evt = {"T":Treco, "l":lCSC, "Z":zTPC, "Atr":ev.THETAe, "Ttr":ev.Tp, "Ztr":ev.zpos}
my_events.append( evt )
print("EVENTS SELECTED (rad.tail): " + str(len(my_events)))
for ev in dt:
lCSC = sqrt( ev.xCSC**2 + ev.yCSC**2 )
zTPC = ev.zpos+2.19+ROOT.gRandom.Gaus(0,0.2576)
Treco = ev.Tp+ROOT.gRandom.Gaus(0,0.05)
evt = {"T":Treco, "l":lCSC, "Z":zTPC, "Atr":ev.THETAe, "Ttr":ev.Tp, "Ztr":ev.zpos}
my_events2.append( evt )
print("EVENTS SELECTED (two body): " + str(len(my_events2)))
from statistics import mean, median, stdev, mode
h1 = ROOT.TH1F("h1",";#theta, mrad;events",10000,50,250)
h2 = ROOT.TH1F("h2",";#theta, mrad;events",10000,50,250)
hT = ROOT.TH1F("hT",";#theta, mrad;events",10000,50,250)
h2.SetLineColor(2)
hT.SetLineColor(1)
hT.SetFillColor(1)
hT.SetFillStyle(3005)
evts = 0.
thetas = []
theta2 = []
for ev in my_events:
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
h1.Fill(1000.*ev["Atr"])
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
h2.Fill(1000.*ev["Atr"])
thetas.append( 1000.*ev["Atr"] )
evts+=1.
evts2=0.
for ev in my_events2:
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
hT.Fill(1000.*ev["Atr"])
theta2.append( 1000.*ev["Atr"] )
evts2+=1.
h2.GetXaxis().SetRangeUser(134,138)
h2.Draw()
hT.Draw("same")
Line =ROOT.TLine( mean(thetas), 10, mean(thetas),1000)
Line.SetLineWidth(3)
Line.SetLineColor(2)
Line2 =ROOT.TLine( median(thetas), 10, median(thetas),1000)
Line2.SetLineWidth(3)
Line2t =ROOT.TLine( median(theta2), 10, median(theta2),1000)
Line2t.SetLineWidth(3)
Line2t.SetLineStyle(7)
Line3 =ROOT.TLine( mode(thetas), 10, mode(thetas),1000)
Line3.SetLineWidth(3)
Line3.SetLineColor(4)
Line.Draw("same")
Line2.Draw("same")
Line2t.Draw("same")
Line3.Draw("same")
ROOT.gPad.SetLogy()
canv.Draw()
print("h1: mean=" + str(h1.mean()) + "\t rms=" + str(h1.rms()) + "\t" + str(h1.rms().value()/sqrt(18493.)))
print("h2: mean=" + str(h2.mean()) + "\t rms=" + str(h2.rms()) + "\t" + str(h2.mean().prec()))
print("list mean " + str(mean(thetas)) + " +- " + str(stdev(thetas)/sqrt(evts)))
print("list sigma " + str(stdev(thetas)) )
print("list mean " + str(mean(thetas)) )
print("list meadian " + str(median(thetas)) + " " + str(median(theta2)) + " " + str(0.04/135.146) )
print("list mode " + str(mode(thetas)) )
from statistics import mean, median, stdev, mode
h1 = ROOT.TH1F("h1",";#theta, mrad;events",10000,50,250)
h2 = ROOT.TH1F("h2",";#theta, mrad;events",10000,50,250)
hT = ROOT.TH1F("hT",";#theta, mrad;events",10000,50,250)
h2.SetLineColor(2)
hT.SetLineColor(1)
hT.SetFillColor(1)
hT.SetFillStyle(3005)
evts = 0.
thetas = []
theta2 = []
for ev in my_events:
if ev["T"]>4.985 and ev["T"]<5.015:
h1.Fill(1000.*ev["Atr"])
if ev["T"]>4.985 and ev["T"]<5.015:
h2.Fill(1000.*ev["Atr"])
thetas.append( 1000.*ev["Atr"] )
evts+=1.
evts2=0.
for ev in my_events2:
if ev["T"]>4.985 and ev["T"]<5.015:
hT.Fill(1000.*ev["Atr"])
theta2.append( 1000.*ev["Atr"] )
evts2+=1.
h2.GetXaxis().SetRangeUser(134,138)
h2.Draw()
hT.Draw("same")
Line =ROOT.TLine( mean(thetas), 10, mean(thetas),1000)
Line.SetLineWidth(3)
Line.SetLineColor(2)
Line2 =ROOT.TLine( median(thetas), 10, median(thetas),1000)
Line2.SetLineWidth(3)
Line2t =ROOT.TLine( median(theta2), 10, median(theta2),1000)
Line2t.SetLineWidth(3)
Line2t.SetLineStyle(7)
Line3 =ROOT.TLine( mode(thetas), 10, mode(thetas),1000)
Line3.SetLineWidth(3)
Line3.SetLineColor(4)
Line.Draw("same")
Line2.Draw("same")
Line2t.Draw("same")
Line3.Draw("same")
ROOT.gPad.SetLogy()
canv.Draw()
print("h1: mean=" + str(h1.mean()) + "\t rms=" + str(h1.rms()) + "\t" + str(h1.rms().value()/sqrt(18493.)))
print("h2: mean=" + str(h2.mean()) + "\t rms=" + str(h2.rms()) + "\t" + str(h2.mean().prec()))
print("list mean " + str(mean(thetas)) + " +- " + str(stdev(thetas)/sqrt(evts)))
print("list sigma " + str(stdev(thetas)) )
print("list mean " + str(mean(thetas)) )
print("list meadian " + str(median(thetas)) + " " + str(median(theta2)) + " " + str(0.04/135.146) )
print("list mode " + str(mode(thetas)) )
hR_Ztr = ROOT.TH1F("hR_Ztr",";#DeltaZ_{TRUE}, mm;R_{REC}, mm",38,851.6-390.0,851.6-10.0)
hR = ROOT.TH1F("hR",";R_{REC},mm;events",30000,0,300)
hA = ROOT.TH1F("hA",";#theta, mrad;events",200,100,300)
Nevt = 0
dT = 0.015
theta_true_list = []
for bin in range(1,38):
hR.Reset()
Rs = []
Zs = []
for ev in my_events:
if ev["T"]>5.-dT and ev["T"]<5.+dT:
if ev["Z"]>10.*bin and ev["Z"]<10.*(bin+1):
hR.Fill(ev["l"])
Rs.append(ev["l"])
Zs.append(851.6-ev["Z"])
hA.Fill(1000.*ev["Atr"])
theta_true_list.append( 1000.*ev["Atr"] )
Nevt+=1
hR_Ztr[39-bin]=VE( median(Rs),(0.001*(3.52346+median(Zs)*0.0134859))**2)
#print("Bin " + str(bin) + " is done" )
hR_Ztr.Draw("e1")
f_pol1 = ROOT.TF1("f_pol1","pol1(0)",851.6-390.0,851.6-10.0)
hR_Ztr.Fit(f_pol1)
ROOT.gPad.SetLogy(False)
canv.Draw()
tgA = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
#print(tgA)
import ostap.math.math_ve as math_ve
th_true = median(theta_true_list)
th_reco = 1000.*math_ve.atan(tgA)
print(str(Nevt)+" events")
print("TRUE mean " + str(th_true) + " mrad.")
print("REC. mean " + str(th_reco) + " mrad.\t" + str(th_reco.prec()))
hR_Ztr = ROOT.TH1F("hR_Ztr",";#DeltaZ_{TRUE}, mm;R_{REC}, mm",38,851.6-390.0,851.6-10.0)
hR = ROOT.TH1F("hR",";R_{REC},mm;events",30000,0,300)
hA = ROOT.TH1F("hA",";#theta, mrad;events",200,100,300)
Nevt = 0
dT = 0.015
theta_true_list = []
vZ=[]
vR=[]
eZ=[]
eR=[]
for bin in range(1,38):
hR.Reset()
Rs = []
Zs = []
NN = 0
for ev in my_events:
if ev["Ttr"]>5.-dT and ev["Ttr"]<5.+dT:
if ev["Z"]>10.*bin and ev["Ztr"]<10.*(bin+1):
hR.Fill(ev["l"])
Rs.append(ev["l"])
Zs.append(851.6-ev["Ztr"])
hA.Fill(1000.*ev["Atr"])
theta_true_list.append( 1000.*ev["Atr"] )
NN+=1
Nevt+=1
hR_Ztr[39-bin]=VE( median(Rs),(0.001*(3.52346+median(Zs)*0.0134859))**2)
vR.append(median(Rs))
eR.append((0.001*(3.52346+median(Zs)*0.0134859))**2)
vZ.append(mean(Zs))
eZ.append(stdev(Zs)/sqrt(NN))
#print("Bin " + str(bin) + " is done" )
hR_Ztr.Draw("e1")
f_pol1 = ROOT.TF1("f_pol1","pol1(0)",851.6-390.0,851.6-10.0)
hR_Ztr.Fit(f_pol1)
ROOT.gPad.SetLogy(False)
canv.Draw()
tgA = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
#print(tgA)
import ostap.math.math_ve as math_ve
th_true = median(theta_true_list)
th_reco = 1000.*math_ve.atan(tgA)
print(str(Nevt)+" events")
print("TRUE mean " + str(th_true) + " mrad.")
print("REC. mean " + str(th_reco) + " mrad.\t" + str(th_reco.prec()))
gr = makeGraph(vZ,vR,eZ,eR)
gr.Draw("AP")
gr.Fit(f_pol1)
tgG = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
th_gr = 1000.*math_ve.atan(tgG)
print("GRAPH mean" + str(th_gr) + " mrad.\t" + str(th_gr.prec()))
canv.Draw()
th_gr-th_true
135.173-135.112/
```
|
github_jupyter
|
import ROOT
import ostap.fixes.fixes
from ostap.core.core import cpp, Ostap
from ostap.core.core import pwd, cwd, ROOTCWD
from ostap.core.core import rootID, funcID, funID, fID, histoID, hID, dsID
from ostap.core.core import VE
from ostap.histos.histos import h1_axis, h2_axes, h3_axes
from ostap.histos.graphs import makeGraph, hToGraph, hToGraph2, hToGraph3, lw_graph
import ostap.trees.trees
import ostap.trees.cuts
import ostap.histos.param
import ostap.histos.compare
import ostap.io.root_file
import ostap.math.models
import ostap.fitting.roofit
import ostap.fitting.models as Models
canv = ROOT.TCanvas("canv","canv",900,450)
rfile = ROOT.TFile("rad/new.root","READ")
tfile = ROOT.TFile("two/new.root","READ")
ds = rfile["tree"]
dt = tfile["tree"]
from math import sqrt
my_events = []
my_events2 = []
for ev in ds:
lCSC = sqrt( ev.xCSC**2 + ev.yCSC**2 )
zTPC = ev.zpos+2.19+ROOT.gRandom.Gaus(0,0.2576)
Treco = ev.Tp+ROOT.gRandom.Gaus(0,0.05)
evt = {"T":Treco, "l":lCSC, "Z":zTPC, "Atr":ev.THETAe, "Ttr":ev.Tp, "Ztr":ev.zpos}
my_events.append( evt )
print("EVENTS SELECTED (rad.tail): " + str(len(my_events)))
for ev in dt:
lCSC = sqrt( ev.xCSC**2 + ev.yCSC**2 )
zTPC = ev.zpos+2.19+ROOT.gRandom.Gaus(0,0.2576)
Treco = ev.Tp+ROOT.gRandom.Gaus(0,0.05)
evt = {"T":Treco, "l":lCSC, "Z":zTPC, "Atr":ev.THETAe, "Ttr":ev.Tp, "Ztr":ev.zpos}
my_events2.append( evt )
print("EVENTS SELECTED (two body): " + str(len(my_events2)))
from statistics import mean, median, stdev, mode
h1 = ROOT.TH1F("h1",";#theta, mrad;events",10000,50,250)
h2 = ROOT.TH1F("h2",";#theta, mrad;events",10000,50,250)
hT = ROOT.TH1F("hT",";#theta, mrad;events",10000,50,250)
h2.SetLineColor(2)
hT.SetLineColor(1)
hT.SetFillColor(1)
hT.SetFillStyle(3005)
evts = 0.
thetas = []
theta2 = []
for ev in my_events:
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
h1.Fill(1000.*ev["Atr"])
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
h2.Fill(1000.*ev["Atr"])
thetas.append( 1000.*ev["Atr"] )
evts+=1.
evts2=0.
for ev in my_events2:
if ev["Ttr"]>4.985 and ev["Ttr"]<5.015:
hT.Fill(1000.*ev["Atr"])
theta2.append( 1000.*ev["Atr"] )
evts2+=1.
h2.GetXaxis().SetRangeUser(134,138)
h2.Draw()
hT.Draw("same")
Line =ROOT.TLine( mean(thetas), 10, mean(thetas),1000)
Line.SetLineWidth(3)
Line.SetLineColor(2)
Line2 =ROOT.TLine( median(thetas), 10, median(thetas),1000)
Line2.SetLineWidth(3)
Line2t =ROOT.TLine( median(theta2), 10, median(theta2),1000)
Line2t.SetLineWidth(3)
Line2t.SetLineStyle(7)
Line3 =ROOT.TLine( mode(thetas), 10, mode(thetas),1000)
Line3.SetLineWidth(3)
Line3.SetLineColor(4)
Line.Draw("same")
Line2.Draw("same")
Line2t.Draw("same")
Line3.Draw("same")
ROOT.gPad.SetLogy()
canv.Draw()
print("h1: mean=" + str(h1.mean()) + "\t rms=" + str(h1.rms()) + "\t" + str(h1.rms().value()/sqrt(18493.)))
print("h2: mean=" + str(h2.mean()) + "\t rms=" + str(h2.rms()) + "\t" + str(h2.mean().prec()))
print("list mean " + str(mean(thetas)) + " +- " + str(stdev(thetas)/sqrt(evts)))
print("list sigma " + str(stdev(thetas)) )
print("list mean " + str(mean(thetas)) )
print("list meadian " + str(median(thetas)) + " " + str(median(theta2)) + " " + str(0.04/135.146) )
print("list mode " + str(mode(thetas)) )
from statistics import mean, median, stdev, mode
h1 = ROOT.TH1F("h1",";#theta, mrad;events",10000,50,250)
h2 = ROOT.TH1F("h2",";#theta, mrad;events",10000,50,250)
hT = ROOT.TH1F("hT",";#theta, mrad;events",10000,50,250)
h2.SetLineColor(2)
hT.SetLineColor(1)
hT.SetFillColor(1)
hT.SetFillStyle(3005)
evts = 0.
thetas = []
theta2 = []
for ev in my_events:
if ev["T"]>4.985 and ev["T"]<5.015:
h1.Fill(1000.*ev["Atr"])
if ev["T"]>4.985 and ev["T"]<5.015:
h2.Fill(1000.*ev["Atr"])
thetas.append( 1000.*ev["Atr"] )
evts+=1.
evts2=0.
for ev in my_events2:
if ev["T"]>4.985 and ev["T"]<5.015:
hT.Fill(1000.*ev["Atr"])
theta2.append( 1000.*ev["Atr"] )
evts2+=1.
h2.GetXaxis().SetRangeUser(134,138)
h2.Draw()
hT.Draw("same")
Line =ROOT.TLine( mean(thetas), 10, mean(thetas),1000)
Line.SetLineWidth(3)
Line.SetLineColor(2)
Line2 =ROOT.TLine( median(thetas), 10, median(thetas),1000)
Line2.SetLineWidth(3)
Line2t =ROOT.TLine( median(theta2), 10, median(theta2),1000)
Line2t.SetLineWidth(3)
Line2t.SetLineStyle(7)
Line3 =ROOT.TLine( mode(thetas), 10, mode(thetas),1000)
Line3.SetLineWidth(3)
Line3.SetLineColor(4)
Line.Draw("same")
Line2.Draw("same")
Line2t.Draw("same")
Line3.Draw("same")
ROOT.gPad.SetLogy()
canv.Draw()
print("h1: mean=" + str(h1.mean()) + "\t rms=" + str(h1.rms()) + "\t" + str(h1.rms().value()/sqrt(18493.)))
print("h2: mean=" + str(h2.mean()) + "\t rms=" + str(h2.rms()) + "\t" + str(h2.mean().prec()))
print("list mean " + str(mean(thetas)) + " +- " + str(stdev(thetas)/sqrt(evts)))
print("list sigma " + str(stdev(thetas)) )
print("list mean " + str(mean(thetas)) )
print("list meadian " + str(median(thetas)) + " " + str(median(theta2)) + " " + str(0.04/135.146) )
print("list mode " + str(mode(thetas)) )
hR_Ztr = ROOT.TH1F("hR_Ztr",";#DeltaZ_{TRUE}, mm;R_{REC}, mm",38,851.6-390.0,851.6-10.0)
hR = ROOT.TH1F("hR",";R_{REC},mm;events",30000,0,300)
hA = ROOT.TH1F("hA",";#theta, mrad;events",200,100,300)
Nevt = 0
dT = 0.015
theta_true_list = []
for bin in range(1,38):
hR.Reset()
Rs = []
Zs = []
for ev in my_events:
if ev["T"]>5.-dT and ev["T"]<5.+dT:
if ev["Z"]>10.*bin and ev["Z"]<10.*(bin+1):
hR.Fill(ev["l"])
Rs.append(ev["l"])
Zs.append(851.6-ev["Z"])
hA.Fill(1000.*ev["Atr"])
theta_true_list.append( 1000.*ev["Atr"] )
Nevt+=1
hR_Ztr[39-bin]=VE( median(Rs),(0.001*(3.52346+median(Zs)*0.0134859))**2)
#print("Bin " + str(bin) + " is done" )
hR_Ztr.Draw("e1")
f_pol1 = ROOT.TF1("f_pol1","pol1(0)",851.6-390.0,851.6-10.0)
hR_Ztr.Fit(f_pol1)
ROOT.gPad.SetLogy(False)
canv.Draw()
tgA = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
#print(tgA)
import ostap.math.math_ve as math_ve
th_true = median(theta_true_list)
th_reco = 1000.*math_ve.atan(tgA)
print(str(Nevt)+" events")
print("TRUE mean " + str(th_true) + " mrad.")
print("REC. mean " + str(th_reco) + " mrad.\t" + str(th_reco.prec()))
hR_Ztr = ROOT.TH1F("hR_Ztr",";#DeltaZ_{TRUE}, mm;R_{REC}, mm",38,851.6-390.0,851.6-10.0)
hR = ROOT.TH1F("hR",";R_{REC},mm;events",30000,0,300)
hA = ROOT.TH1F("hA",";#theta, mrad;events",200,100,300)
Nevt = 0
dT = 0.015
theta_true_list = []
vZ=[]
vR=[]
eZ=[]
eR=[]
for bin in range(1,38):
hR.Reset()
Rs = []
Zs = []
NN = 0
for ev in my_events:
if ev["Ttr"]>5.-dT and ev["Ttr"]<5.+dT:
if ev["Z"]>10.*bin and ev["Ztr"]<10.*(bin+1):
hR.Fill(ev["l"])
Rs.append(ev["l"])
Zs.append(851.6-ev["Ztr"])
hA.Fill(1000.*ev["Atr"])
theta_true_list.append( 1000.*ev["Atr"] )
NN+=1
Nevt+=1
hR_Ztr[39-bin]=VE( median(Rs),(0.001*(3.52346+median(Zs)*0.0134859))**2)
vR.append(median(Rs))
eR.append((0.001*(3.52346+median(Zs)*0.0134859))**2)
vZ.append(mean(Zs))
eZ.append(stdev(Zs)/sqrt(NN))
#print("Bin " + str(bin) + " is done" )
hR_Ztr.Draw("e1")
f_pol1 = ROOT.TF1("f_pol1","pol1(0)",851.6-390.0,851.6-10.0)
hR_Ztr.Fit(f_pol1)
ROOT.gPad.SetLogy(False)
canv.Draw()
tgA = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
#print(tgA)
import ostap.math.math_ve as math_ve
th_true = median(theta_true_list)
th_reco = 1000.*math_ve.atan(tgA)
print(str(Nevt)+" events")
print("TRUE mean " + str(th_true) + " mrad.")
print("REC. mean " + str(th_reco) + " mrad.\t" + str(th_reco.prec()))
gr = makeGraph(vZ,vR,eZ,eR)
gr.Draw("AP")
gr.Fit(f_pol1)
tgG = VE(f_pol1.GetParameter(1),f_pol1.GetParError(1)**2)
th_gr = 1000.*math_ve.atan(tgG)
print("GRAPH mean" + str(th_gr) + " mrad.\t" + str(th_gr.prec()))
canv.Draw()
th_gr-th_true
135.173-135.112/
| 0.188399 | 0.283515 |
# Least squares problems
We sometimes wish to solve problems of the form
$$
\boldsymbol{A} \boldsymbol{x} = \boldsymbol{b}
$$
where $\boldsymbol{A}$ is a $m \times n$ matrix, where $m > n$. Clearly $\boldsymbol{A}$ is not square, and in general no solution to the problem exists. This is a typical of an over-determined problem - we have more equations than unknowns. A classical example is when trying to fit an $k$th-order polynomial to $p > k + 1$ data points - the degree of the polynomial is not high enough to construct an interpolating polynomial.
In this notebook we assume that $\boldsymbol{A}$ has full rank, i.e. the columns of $\boldsymbol{A}$ are linearly independent. We will look at the case when $\boldsymbol{A}$ is not full rank later.
Before computing least-squares problems, we start with examples of polynomial interpolation.
Note: This notebook uses [interactive widgets](https://ipywidgets.readthedocs.io/) to interactively explore various effects. The widget sliders will be be visiable through nbviewer. The below installs interactive widegts when running the notebook in a Jupyter session.
## Polynomial interpolation
Polynomial interpolation involves fitting a $n$th-order polynomial to $n + 1$ data points. The polynomial interpolates each point.
### Interpolating the sine function
We first consider the interpolation of 20 equally spaces points that lie of the sine graph. To do this, we use NumPy to generate 20 points $\{ x_{i} \}$ on the interval $[-\pi, \pi]$, and evaluate $\sin(x)$ at each point such that $\boldsymbol{y} = \{ \sin(x_{i})\}$:
```
import numpy as np
N = 20
x_p = np.linspace(-np.pi, np.pi, N)
y_p = np.sin(x_p)
```
We use the variable `N` to hold the number of points so we can change it if we want to experiment.
We can plot the points:
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Points on a sine graph')
plt.plot(x_p, y_p,'ro');
```
With 20 data points, we can interpolate the points with a polynomial $f(x)$ of degree 19:
$$
f = c_{19} x^{19} + c_{18} x^{18} + \ldots + c_{1} x + c_{0}.
$$
We can find the polynomial coefficients $c_{i}$ by solving $\boldsymbol{A} \boldsymbol{c} = \boldsymbol{y}$, where $\boldsymbol{A}$ is the Vandermonde matrix:
$$
\boldsymbol{A} = \begin{bmatrix}
x_{1}^{19} & x_{1}^{18} & \ldots & x_{1}^{2} & x_{1} & 1
\\
x_{2}^{19} & x_{2}^{18} & \ldots & x_{2}^{2} & x_{2} & 1
\\
\vdots & \vdots & \vdots & \ldots & \vdots
\\
x_{20}^{19} & x_{20}^{18} & \ldots & x_{20}^{2} & x_{20} & 1
\end{bmatrix}
$$
the vector $\boldsymbol{c}$ contains the polynomial coefficient
$$
\boldsymbol{c} = \begin{bmatrix}
c_{19} & c_{20} & \ldots & c_{0}
\end{bmatrix}^{T}
$$
and the vector $\boldsymbol{y}$ contains the points $y_{i}$ that we wish to fit, which is this example are the points on the sine graph.
*Note: the ordering in each row of the Vandermonde matrix above is reversed with respect to what you will find in most books. We do this because earlier versions of the NumPy built-in function for generating the Vandermonde matrix use the above ordering. Later verions provide an option to generate the more conventional ordering.*
Using the NumPy built-in function to generate the Vandermonde matrix:
```
A = np.vander(x_p, N)
```
We can solve the system to find the coefficients:
```
c = np.linalg.solve(A, y_p)
```
NumPy has a function `poly1d` to turn the coefficients into a polynomial object, and it can display a representation of the polynomial:
```
p = np.poly1d(c)
print(p)
```
To plot the fitted polynomial, we evaluate the polynomial at 200 points:
```
# Create an array of 200 equally spaced points on [-pi, pi]
x_fit = np.linspace(-np.pi, np.pi, 200)
# Evaluate the polynomial at the points
y_fit = p(x_fit)
# Plot the interpolating polynomial and the sample points
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.title('Points on a sine graph interpolate by a polynomial')
plot = plt.plot(x_p, y_p, 'ro', x_fit, y_fit,'b-');
```
We can see from the graph that the polynomial closely ressambles the sine function.
### Interpolating a noisy sine curve
We now repeat the fitting exercise, but we now add a small amount of noise to the points on the sine curve that we wish to interpolate. We will create a function to make the plot so we can easily change the amplitude of the noise.
```
def y_p_noise(noise):
return y_p + np.random.uniform(-noise/2.0, noise/2.0, len(y_p))
```
We can plot the points with noise of amplitude $0.02$:
```
plt.xlabel('x')
plt.ylabel('$y$')
plt.ylim(-1.1, 1.1)
plt.title('Points on a noisy sine')
y_noise = y_p_noise(0.02)
plt.plot(x_p, y_noise, 'ro');
```
To the eye, the noise canniot be detected.
We can solve the system to find the coefficients:
```
c_noise = np.linalg.solve(A, y_noise)
p_noise = np.poly1d(c_noise)
y_fit = p_noise(x_fit)
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.title('Points on a sine graph with noise interpolated by a polynomial')
plt.plot(x_p, y_p, 'ro', x_fit, y_fit,'b-');
```
The points are clearly interpolated, but the the result is now terrible near the boundaries of the interval, with large spikes. The spikes are known as Runge's phenomenon. A similar effect with a Fourier basis at discontinuties is known as 'Gibb's phenomenon'.
This is a common problem with polynomial fitting. With the exact sine points, we were lucky. With well-chosen, non-uniform interpolation points it is possible to improve the interpolation.
To explore the effect of noise, we can create an interactive plot with a slider to vary the noise apmplitude.
```
from ipywidgets import widgets
from ipywidgets import interact
@interact(noise=(0.0, 0.25, 0.005))
def plot_interp_sine(noise=0.005):
y_noise = y_p_noise(noise)
c_noise = np.linalg.solve(A, y_noise)
p_noise = np.poly1d(c_noise)
y_noise_fit = p_noise(x_fit)
plt.xlabel('x')
plt.ylabel('$y$')
plt.title('Points on a noisy sine (noise amplitude: {})'.format(noise))
plt.plot(x_p, y_noise, 'ro', x_fit, y_noise_fit,'b-');
```
### Conditioning of the Vandermonde matrix
We have seen by example already that the conditioning of the Vandermonde matrix is poor. If the
conditioning of matrix $\boldsymbol{A}$ is poor, then the conditioning of the normal matrix $\boldsymbol{A}^{T}\boldsymbol{A}$ will be much worse:
The Vandermonde matrix is notoriously ill-conditioned. Computing the condition number:
```
print("Condition number of the Vandermonde matrix: {}".format(np.linalg.cond(A, 2)))
```
We see the that the condition number is very large. Such a matrix should not be solved with methods such as LU decomposition (despite what is done in the above examples!).
## Least-squares fitting
We will now looking at fitting a polynomial of degree $k < n + 1$ to points on the sine graph. The degree of the polynomial is not high enough to interpolate all points, so we will compute a best-fit in the least-squares sense.
We have seen in lectures that solving the least squares solution involves solving
$$
\boldsymbol{A}^{T}\boldsymbol{A} \boldsymbol{c} = \boldsymbol{A}^{T} \boldsymbol{y}
$$
If we want ot fit a $5$th-order polynomial to 20 data points, $\boldsymbol{A}$ is the $20 \times 6$ matrix:
$$
\boldsymbol{A} = \begin{bmatrix}
x_{1}^{5} & x_{1}^{4} & \ldots & x_{1}^{2} & x_{1} & 1
\\
x_{2}^{5} & x_{2}^{4} & \ldots & x_{2}^{2} & x_{2} & 1
\\
\vdots & \vdots & \vdots & \ldots & \vdots
\\
\vdots & \vdots & \vdots & \ldots & \vdots
\\
x_{20}^{5} & x_{20}^{4} & \ldots & x_{20}^{2} & x_{20} & 1
\end{bmatrix}
$$
and $\boldsymbol{c}$ contains the $6$ polynomial coefficients
$$
\boldsymbol{c}
= \begin{bmatrix}
c_{0} & c_{1} & c_{2} & c_{3} & c_{4}
\end{bmatrix}
$$
and $\boldsymbol{y}$ contains the 20 points we want to fit.
### Fitting points on the sine graph
Let's try fitting a lower-order polynomial to the 20 data points without noise. We start with a polynomial of degree 6. We first create the Vandermonde matrix:
```
A = np.vander(x_p, 6)
```
and then solve $$\boldsymbol{A}^{T}\boldsymbol{A} \boldsymbol{c} = \boldsymbol{A}^{T} \boldsymbol{y}$$ and create a NumPy polynomial from the coefficients:
```
ATA = (A.T).dot(A)
c_ls = np.linalg.solve(ATA, (A.T).dot(y_p))
p_ls = np.poly1d(c_ls)
print(p_ls)
```
Plotting the polynomial:
```
# Evaluate polynomial at some points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a $6$th-order polynomial')
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
```
To explore the polynimial order, we will create an interactive plot with a slider for the polynomial degree.
```
@interact(order=(0, 19))
def plot(order):
# Create Vandermonde matrix
A = np.vander(x_p, order + 1)
ATA = (A.T).dot(A)
c_ls = np.linalg.solve(ATA, (A.T).dot(y_p))
p_ls = np.poly1d(c_ls)
# Evaluate polynomial at some points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.2, 1.2)
plt.title('Least-squares fit of 20 points on the sine graph with a ${}$th-order polynomial'.format(order))
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
```
The fit appears to be very good. We experiment now with some other order polynomials. We will use from now on the NumPy function `polyfit` to shorten the code. Moreover, `plolyfit` will uses a different solution algorithm from what we have above, namely a singular value decomposition, to compute the same problem but with less susceptibility to round-off errors. We will write a short function to make is easy to vary the polnomial degree;
```
def plot(order=10):
# Compute the coefficients of a nth order polynomial that fits the data points (x_p, y_p)
c_ls = np.polyfit(x_p, y_p, order)
# Create a polynomial object from the coefficients
p_ls = np.poly1d(c_ls)
# Evaluate the polynomial at the plotting points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a polynomial of degree {}.'.format(order))
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
```
Starting with degree 3:
```
plot(3)
```
The fit is clearly not as good as for the $5$th order polynomial, nonetheless looks quite good. Now for a quadratic polynomial:
```
plot(2)
```
Clearly the quadratic fit is very poor.
Creating an interactive plot with a slider for the polynomial degree:
```
interact(plot, order=(0, 19));
```
### Sine points with noise
Let's now look at a least-squares fit to the sine data with noise. We start by generalising the plot function to include noise:
```
# Compute least squares fit to sine points with noise
def plot(order=10, noise=0.0):
# Generate points on sine graph with nosie
y_noise = y_p_noise(noise)
# Compute the coefficients of a nth order polynomial that fits the data points (x_p, y_p)
c_ls = np.polyfit(x_p, y_noise, order)
# Create a polynomial object from the coefficients
p_ls = np.poly1d(c_ls)
# Evaluate the polynomial at the plotting points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a polynomial of degree {}.'.format(order))
plt.plot(x_p, y_noise, 'ro', x_fit, y_ls,'b-');
```
We start by fitting a polynomial of degree 12:
```
plot(12, 0.02)
```
The fit looks very good, and note that there is no discernible noise at the ends of the interval.
We now make an interactive plot to explore the interaction between noise and polynomial degree.
```
interact(plot, order=(0, 19), noise=(0.0, 0.4, 0.005));
```
### Conditioning of the normal matrix
We have seen already that the conditioning of the Vandermonde matrix $\boldsymbol{A}$ is poor. If we consider $\boldsymbol{A}^{T}\boldsymbol{A}$, we see that the conditioning is much worse again:
```
A = np.vander(x_p, 20)
print("Condition number of A (Vandermonde matrix, 20): {}".format(np.linalg.cond(A)))
print("Condition number of (A.T)A (Vandermonde matrix, 20): {}".format(np.linalg.cond((A.T).dot(A))))
```
The poor condition number indicates why it is not a good idea to form and solve $\boldsymbol{A}^{T}\boldsymbol{A}$ directly. In practice, robust algorithms do not follow this approach.
|
github_jupyter
|
import numpy as np
N = 20
x_p = np.linspace(-np.pi, np.pi, N)
y_p = np.sin(x_p)
%matplotlib inline
import matplotlib.pyplot as plt
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.title('Points on a sine graph')
plt.plot(x_p, y_p,'ro');
A = np.vander(x_p, N)
c = np.linalg.solve(A, y_p)
p = np.poly1d(c)
print(p)
# Create an array of 200 equally spaced points on [-pi, pi]
x_fit = np.linspace(-np.pi, np.pi, 200)
# Evaluate the polynomial at the points
y_fit = p(x_fit)
# Plot the interpolating polynomial and the sample points
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.title('Points on a sine graph interpolate by a polynomial')
plot = plt.plot(x_p, y_p, 'ro', x_fit, y_fit,'b-');
def y_p_noise(noise):
return y_p + np.random.uniform(-noise/2.0, noise/2.0, len(y_p))
plt.xlabel('x')
plt.ylabel('$y$')
plt.ylim(-1.1, 1.1)
plt.title('Points on a noisy sine')
y_noise = y_p_noise(0.02)
plt.plot(x_p, y_noise, 'ro');
c_noise = np.linalg.solve(A, y_noise)
p_noise = np.poly1d(c_noise)
y_fit = p_noise(x_fit)
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.title('Points on a sine graph with noise interpolated by a polynomial')
plt.plot(x_p, y_p, 'ro', x_fit, y_fit,'b-');
from ipywidgets import widgets
from ipywidgets import interact
@interact(noise=(0.0, 0.25, 0.005))
def plot_interp_sine(noise=0.005):
y_noise = y_p_noise(noise)
c_noise = np.linalg.solve(A, y_noise)
p_noise = np.poly1d(c_noise)
y_noise_fit = p_noise(x_fit)
plt.xlabel('x')
plt.ylabel('$y$')
plt.title('Points on a noisy sine (noise amplitude: {})'.format(noise))
plt.plot(x_p, y_noise, 'ro', x_fit, y_noise_fit,'b-');
print("Condition number of the Vandermonde matrix: {}".format(np.linalg.cond(A, 2)))
A = np.vander(x_p, 6)
ATA = (A.T).dot(A)
c_ls = np.linalg.solve(ATA, (A.T).dot(y_p))
p_ls = np.poly1d(c_ls)
print(p_ls)
# Evaluate polynomial at some points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a $6$th-order polynomial')
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
@interact(order=(0, 19))
def plot(order):
# Create Vandermonde matrix
A = np.vander(x_p, order + 1)
ATA = (A.T).dot(A)
c_ls = np.linalg.solve(ATA, (A.T).dot(y_p))
p_ls = np.poly1d(c_ls)
# Evaluate polynomial at some points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.2, 1.2)
plt.title('Least-squares fit of 20 points on the sine graph with a ${}$th-order polynomial'.format(order))
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
def plot(order=10):
# Compute the coefficients of a nth order polynomial that fits the data points (x_p, y_p)
c_ls = np.polyfit(x_p, y_p, order)
# Create a polynomial object from the coefficients
p_ls = np.poly1d(c_ls)
# Evaluate the polynomial at the plotting points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a polynomial of degree {}.'.format(order))
plt.plot(x_p, y_p, 'ro', x_fit, y_ls,'b-');
plot(3)
plot(2)
interact(plot, order=(0, 19));
# Compute least squares fit to sine points with noise
def plot(order=10, noise=0.0):
# Generate points on sine graph with nosie
y_noise = y_p_noise(noise)
# Compute the coefficients of a nth order polynomial that fits the data points (x_p, y_p)
c_ls = np.polyfit(x_p, y_noise, order)
# Create a polynomial object from the coefficients
p_ls = np.poly1d(c_ls)
# Evaluate the polynomial at the plotting points
y_ls = p_ls(x_fit)
# Plot
plt.xlabel('$x$')
plt.ylabel('$f$')
plt.ylim(-1.1, 1.1)
plt.title('Least-squares fit of 20 points on the sine graph with a polynomial of degree {}.'.format(order))
plt.plot(x_p, y_noise, 'ro', x_fit, y_ls,'b-');
plot(12, 0.02)
interact(plot, order=(0, 19), noise=(0.0, 0.4, 0.005));
A = np.vander(x_p, 20)
print("Condition number of A (Vandermonde matrix, 20): {}".format(np.linalg.cond(A)))
print("Condition number of (A.T)A (Vandermonde matrix, 20): {}".format(np.linalg.cond((A.T).dot(A))))
| 0.805747 | 0.996016 |
# MNIST Image Classification with TensorFlow
This notebook demonstrates how to implement different image models on MNIST using the [tf.keras API](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras).
## Learning Objectives
1. Understand how to build a Dense Neural Network (DNN) for image classification
2. Understand how to use dropout (DNN) for image classification
3. Understand how to use Convolutional Neural Networks (CNN)
```
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
from datetime import datetime
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import (
Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)
```
## Write Input Functions
As usual, we need to specify input functions for training and evaluating. We'll scale each pixel value so it's a decimal value between 0 and 1 as a way of normalizing the data.
```
import tensorflow as tf
def scale(image, label):
"""Scales images from a 0-255 int range to a 0-1 float range"""
image = tf.cast(image, tf.float32)
image /= 255
image = tf.expand_dims(image, -1)
return image, label
def load_dataset(
data, training=True, buffer_size=5000, batch_size=100, nclasses=10):
"""Loads MNIST dataset into a tf.data.Dataset"""
(x_train, y_train), (x_test, y_test) = data
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, nclasses)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(batch_size)
if training:
dataset = dataset.shuffle(buffer_size).repeat()
return dataset
```
Next, let's code the models! The [tf.keras API](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras) accepts an array of [layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers) into a [model object](https://www.tensorflow.org/api_docs/python/tf/keras/Model), so we can create a dictionary of layers based on the different model types we want to use. The below file has two functions: `get_layers` and `build_model`. We will build the structure of our model in `get_layers`.
**TODO 1**: Define the Keras layers for a linear model
**TODO 2**: Define the Keras layers for a DNN model with dropouts
**TODO 3**: Define the Keras layers for a CNN model
Hint: These models progressively build on each other. Look at the imported `tensorflow.keras.layers` modules and the default values for the variables defined in `get_layers` for guidance.
```
# Image Variables
WIDTH = 28
HEIGHT = 28
def get_layers(
model_type,
nclasses=10,
hidden_layer_1_neurons=400,
hidden_layer_2_neurons=100,
dropout_rate=0.25,
num_filters_1=64,
kernel_size_1=3,
pooling_size_1=2,
num_filters_2=32,
kernel_size_2=3,
pooling_size_2=2):
"""Constructs layers for a keras model based on a dict of model types."""
model_layers = {
'linear': [
# TODO 1
],
'dnn': [
# TODO 2
],
'cnn': [
# TODO 3
]
}
return model_layers[model_type]
def build_model(layers):
"""Compiles keras model for image classification."""
model = Sequential(layers)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
```
## Training
With everything set up, let's run train code.
```
MODEL_TYPE = ["linear", "dnn", "cnn"]
model_results = {}
EPOCHS = 10
STEPS_PER_EPOCH = 100
mnist = tf.keras.datasets.mnist.load_data()
train_data = load_dataset(mnist)
validation_data = load_dataset(mnist, training=False)
for model in MODEL_TYPE:
print('Start {} Training'.format(model))
model_layers = get_layers(model)
image_model = build_model(model_layers)
history = image_model.fit(
train_data,
validation_data=validation_data,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH)
model_results.update({model: history})
for model in MODEL_TYPE:
print('accuracy of {}: {}'.format(model, model_results[model].history['accuracy'][-1]))
```
## predict and visualize results
```
test_ds = load_dataset(
mnist,
training=False,
batch_size=200,
)
image, label_ohe = next(iter(test_ds))
label = np.argmax(label_ohe, axis=1)
linear_res = np.argmax(model_results['linear'].model.predict(image), axis=1)
dnn_res = np.argmax(model_results['dnn'].model.predict(image), axis=1)
cnn_res = np.argmax(model_results['cnn'].model.predict(image), axis=1)
for i, im in enumerate(image):
# visualize images which were mispredicted.
if label[i] != linear_res[i] or label[i] != dnn_res[i] or label[i] != cnn_res[i]:
MSG = 'label:{}, linear:{}, dnn:{}, cnn:{}'
print(MSG.format(label[i], linear_res[i], dnn_res[i], cnn_res[i]))
plt.imshow(im)
plt.show()
```
Copyright 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
from datetime import datetime
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import (
Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)
import tensorflow as tf
def scale(image, label):
"""Scales images from a 0-255 int range to a 0-1 float range"""
image = tf.cast(image, tf.float32)
image /= 255
image = tf.expand_dims(image, -1)
return image, label
def load_dataset(
data, training=True, buffer_size=5000, batch_size=100, nclasses=10):
"""Loads MNIST dataset into a tf.data.Dataset"""
(x_train, y_train), (x_test, y_test) = data
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, nclasses)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(batch_size)
if training:
dataset = dataset.shuffle(buffer_size).repeat()
return dataset
# Image Variables
WIDTH = 28
HEIGHT = 28
def get_layers(
model_type,
nclasses=10,
hidden_layer_1_neurons=400,
hidden_layer_2_neurons=100,
dropout_rate=0.25,
num_filters_1=64,
kernel_size_1=3,
pooling_size_1=2,
num_filters_2=32,
kernel_size_2=3,
pooling_size_2=2):
"""Constructs layers for a keras model based on a dict of model types."""
model_layers = {
'linear': [
# TODO 1
],
'dnn': [
# TODO 2
],
'cnn': [
# TODO 3
]
}
return model_layers[model_type]
def build_model(layers):
"""Compiles keras model for image classification."""
model = Sequential(layers)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
MODEL_TYPE = ["linear", "dnn", "cnn"]
model_results = {}
EPOCHS = 10
STEPS_PER_EPOCH = 100
mnist = tf.keras.datasets.mnist.load_data()
train_data = load_dataset(mnist)
validation_data = load_dataset(mnist, training=False)
for model in MODEL_TYPE:
print('Start {} Training'.format(model))
model_layers = get_layers(model)
image_model = build_model(model_layers)
history = image_model.fit(
train_data,
validation_data=validation_data,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH)
model_results.update({model: history})
for model in MODEL_TYPE:
print('accuracy of {}: {}'.format(model, model_results[model].history['accuracy'][-1]))
test_ds = load_dataset(
mnist,
training=False,
batch_size=200,
)
image, label_ohe = next(iter(test_ds))
label = np.argmax(label_ohe, axis=1)
linear_res = np.argmax(model_results['linear'].model.predict(image), axis=1)
dnn_res = np.argmax(model_results['dnn'].model.predict(image), axis=1)
cnn_res = np.argmax(model_results['cnn'].model.predict(image), axis=1)
for i, im in enumerate(image):
# visualize images which were mispredicted.
if label[i] != linear_res[i] or label[i] != dnn_res[i] or label[i] != cnn_res[i]:
MSG = 'label:{}, linear:{}, dnn:{}, cnn:{}'
print(MSG.format(label[i], linear_res[i], dnn_res[i], cnn_res[i]))
plt.imshow(im)
plt.show()
| 0.623606 | 0.981058 |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
```
Training and Testing Data
=====================================
To evaluate how well our supervised models generalize, we can split our data into a training and a test set:
<img src="figures/train_test_split_matrix.svg" width="100%">
```
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
```
Thinking about how machine learning is normally performed, the idea of a train/test split makes sense. Real world systems train on the data they have, and as other data comes in (from customers, sensors, or other sources) the classifier that was trained must predict on fundamentally *new* data. We can simulate this during training using a train/test split - the test data is a simulation of "future data" which will come into the system during production.
Specifically for iris, the 150 labels in iris are sorted, which means that if we split the data using a proportional split, this will result in fudamentally altered class distributions. For instance, if we'd perform a common 2/3 training data and 1/3 test data split, our training dataset will only consists of flower classes 0 and 1 (Setosa and Versicolor), and our test set will only contain samples with class label 2 (Virginica flowers).
Under the assumption that all samples are independent of each other (in contrast time series data), we want to **randomly shuffle the dataset before we split the dataset** as illustrated above.
```
y
```
Now we need to split the data into training and testing. Luckily, this is a common pattern in machine learning and scikit-learn has a pre-built function to split data into training and testing sets for you. Here, we use 50% of the data as training, and 50% testing. 80% and 20% is another common split, but there are no hard and fast rules. The most important thing is to fairly evaluate your system on data it *has not* seen during training!
```
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123)
print("Labels for training data:")
print(train_y)
print("Labels for test data:")
print(test_y)
```
---
**Tip: Stratified Split**
Especially for relatively small datasets, it's better to stratify the split. Stratification means that we maintain the original class proportion of the dataset in the test and training sets. For example, after we randomly split the dataset as shown in the previous code example, we have the following class proportions in percent:
```
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
```
So, in order to stratify the split, we can pass the label array as an additional option to the `train_test_split` function:
```
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123,
stratify=y)
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
```
---
By evaluating our classifier performance on data that has been seen during training, we could get false confidence in the predictive power of our model. In the worst case, it may simply memorize the training samples but completely fails classifying new, similar samples -- we really don't want to put such a system into production!
Instead of using the same dataset for training and testing (this is called "resubstitution evaluation"), it is much much better to use a train/test split in order to estimate how well your trained model is doing on new data.
```
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier().fit(train_X, train_y)
pred_y = classifier.predict(test_X)
print("Fraction Correct [Accuracy]:")
print(np.sum(pred_y == test_y) / float(len(test_y)))
```
We can also visualize the correct predictions ...
```
print('Samples correctly classified:')
correct_idx = np.where(pred_y == test_y)[0]
print(correct_idx)
```
... as well as the failed predictions
```
print('Samples incorrectly classified:')
incorrect_idx = np.where(pred_y != test_y)[0]
print(incorrect_idx)
# Plot two dimensions
for n in np.unique(test_y):
idx = np.where(test_y == n)[0]
plt.scatter(test_X[idx, 1], test_X[idx, 2], label="Class %s" % str(iris.target_names[n]))
plt.scatter(test_X[incorrect_idx, 1], test_X[incorrect_idx, 2], color="darkred")
plt.xlabel('sepal width [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc=3)
plt.title("Iris Classification results")
plt.show()
```
We can see that the errors occur in the area where green (class 1) and gray (class 2) overlap. This gives us insight about what features to add - any feature which helps separate class 1 and class 2 should improve classifier performance.
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
Print the true labels of 3 wrong predictions and modify the scatterplot code, which we used above, to visualize and distinguish these three samples with different markers in the 2D scatterplot. Can you explain why our classifier made these wrong predictions?
</li>
</ul>
</div>
```
# %load solutions/04_wrong-predictions.py
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
y
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123)
print("Labels for training data:")
print(train_y)
print("Labels for test data:")
print(test_y)
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
train_X, test_X, train_y, test_y = train_test_split(X, y,
train_size=0.5,
test_size=0.5,
random_state=123,
stratify=y)
print('All:', np.bincount(y) / float(len(y)) * 100.0)
print('Training:', np.bincount(train_y) / float(len(train_y)) * 100.0)
print('Test:', np.bincount(test_y) / float(len(test_y)) * 100.0)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier().fit(train_X, train_y)
pred_y = classifier.predict(test_X)
print("Fraction Correct [Accuracy]:")
print(np.sum(pred_y == test_y) / float(len(test_y)))
print('Samples correctly classified:')
correct_idx = np.where(pred_y == test_y)[0]
print(correct_idx)
print('Samples incorrectly classified:')
incorrect_idx = np.where(pred_y != test_y)[0]
print(incorrect_idx)
# Plot two dimensions
for n in np.unique(test_y):
idx = np.where(test_y == n)[0]
plt.scatter(test_X[idx, 1], test_X[idx, 2], label="Class %s" % str(iris.target_names[n]))
plt.scatter(test_X[incorrect_idx, 1], test_X[incorrect_idx, 2], color="darkred")
plt.xlabel('sepal width [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc=3)
plt.title("Iris Classification results")
plt.show()
# %load solutions/04_wrong-predictions.py
| 0.654784 | 0.990006 |
# Putting It All Together: A Realistic Example
In this section we're going to work through a realistic example of a deep learning workflow. We'll be working with a smallish dataset featuring different kinds of flowers from Kaggle. We're going to apply data augmentation to synthetically expand the size of our dataset. And we'll attempt transfer learning using networks pretrained on the ImageNet dataset, which includes some flower species already.
```
# All of this should look familiar from previous notebooks:
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image, ImageOps
from keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model, Sequential
from keras.preprocessing.image import ImageDataGenerator, image as keras_image
from keras.utils import to_categorical
# Our function to load an image from a path and fix it up, but
# modified slightly to accomodate MobileNetV2.
def load_maintain_aspect_ratio(input_image_path, target_size):
image = Image.open(input_image_path)
width, height = image.size
w_pad = 0
h_pad = 0
bonus_h_pad = 0
bonus_w_pad = 0
if width > height:
pix_diff = (width - height)
h_pad = pix_diff // 2
bonus_h_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side.
elif height > width:
pix_diff = (height - width)
w_pad = pix_diff // 2
bonus_w_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side.
# else: image is already square. Both pads stay 0
image = ImageOps.expand(image, (w_pad, h_pad, w_pad+bonus_w_pad, h_pad+bonus_h_pad))
image = image.resize(target_size)
# Get the image data as a numpy array.
image_data = np.array(image.getdata()).reshape(image.size[0], image.size[1], 3)
# The preprocess function from MobileNetV2
# It expects a numpy array with RGB values between 0-255
return preprocess_input(image_data)
# Our recurring plot function.
def plot_training_history(history, model):
figure = plt.figure()
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.tight_layout()
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.tight_layout()
figure.tight_layout()
plt.show()
# Some constants:
# 224x224 is MobileNetV2's default, so lets stick with it
image_size = 224
batch_size = 32
validation_split = 0.2
# Start small, for the sake of learning speed.
num_epochs = 5
# The dataset is too large to reasonably redistribute as part of this repository, so you will
# have to download it separately from: https://www.kaggle.com/alxmamaev/flowers-recognition/
# The download as a flowers folder, this variable should point to the
# loccation of that folder. Inside that folder there should be 5 folders
# each named for the type of flower.
flower_dataset_directory = 'flowers_dataset/flowers/'
# The image classes
classes = {
'daisy': 0,
'dandelion': 1,
'rose': 2,
'sunflower': 3,
'tulip': 4
}
# Process all of the images into an array
images = []
labels = []
for subdir in classes.keys():
current_location = os.path.join(flower_dataset_directory, subdir)
print(f'Processing {subdir}')
sub_dir_count = 0
for file in os.listdir(current_location):
try:
image = load_maintain_aspect_ratio(os.path.join(current_location, file), (image_size, image_size))
images.append(image)
labels.append(classes[subdir])
sub_dir_count += 1
except:
print(f'Failed to load image: {subdir}/{file}. Ignored it.')
print(f'Found {sub_dir_count} images of type {subdir}')
# Just double check.
assert len(images) == len(labels)
# This is a little bit crude, but we'll just randomly select each image/label pair
# to be in the validation set based on our validation split. We could take greater
# care here to ensure that the right amount are represented from each class
# but it will probably be okay...
x_train = []
y_train = []
x_validation = []
y_validation = []
for image, label in zip(images, labels):
if np.random.random() > validation_split:
x_train.append(image)
y_train.append(label)
else:
x_validation.append(image)
y_validation.append(label)
# Properly format the images into a np array
x_train = np.array(x_train)
x_validation = np.array(x_validation)
# Make the labels one-hot encoded:
y_train = to_categorical(y_train, len(classes))
y_validation = to_categorical(y_validation, len(classes))
print(f'Loaded {len(images)}')
print(f'Training size: {len(x_train)}, validation size: {len(x_validation)}')
# Lots of possible augmentation to the training data
# hopefully allowing us to avoid overfitting
train_generator = ImageDataGenerator(
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=10,
zoom_range=0.5,
fill_mode='constant',
cval=0.0
)
# Don't transform the validation images.
validation_generator = ImageDataGenerator()
# Fit them both, best practice
train_generator.fit(x_train)
validation_generator.fit(x_validation)
# Lets do some sanity checking:
print(x_train.shape)
print(x_validation.shape)
print(y_train.shape)
print(y_validation.shape)
# View a couple, validations are never augmented
for _ in range(3):
plt.imshow(next(validation_generator.flow(x_validation))[0])
plt.show()
# But training data is
for _ in range(3):
plt.imshow(next(train_generator.flow(x_train))[0])
plt.show()
# Loading our pretrained mobilenet
base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(image_size, image_size, 3))
# Make a very simple new classifier
old_top = base_model.output
old_top = GlobalAveragePooling2D()(old_top)
new_top = Dense(len(classes), activation='softmax')(old_top)
model = Model(inputs=base_model.input, outputs=new_top)
# We have a small amount of data, but the data is pretty similar
# to imagenet, which does train on many flower images, so we can
# expect the existing weights to be pretty good. Freeze all.
for layer in base_model.layers:
layer.trainable = False
# Go for it!
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=num_epochs,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
# Things are going great! Lets unfreeze some of our model's layers and see if "forgetting"
# some of the stuff our network learned about dogs, buildings, and waterbottles can
# improve our results further...
# This number was chosen specifically for MobileNetV2, it is the
# start of the 15th block.
for layer in model.layers[134:]:
layer.trainable = True
# Recompile to ensure the layers get set to trainable
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=num_epochs,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
```
Right on, things still look okay. The dip in validation accuracy is definitely concerning though. After this maybe we could get an even bigger dataset and buy some cloud compute time to train the model for longer... But if we continue to see the validation accuracy decline (overfitting) then we NEED to try something to set it right. More data, more augmentation of the data would both be good ideas. We could also try an alternate model or adjust our classification layers — but that should probably be **in addition** to more data and more data.
```
# I ran this one overnight, just for fun, and to see if we started to overfit
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=30,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
```
I love this result — we do see validation continue to diverge from training, implying we are overfitting even with the augmentation tactics. But we also see that validation is really unstable. Sometimes we're getting lucky, but some of the adjustments are hurting us. We probably need more data to improve much more.
|
github_jupyter
|
# All of this should look familiar from previous notebooks:
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image, ImageOps
from keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.models import Model, Sequential
from keras.preprocessing.image import ImageDataGenerator, image as keras_image
from keras.utils import to_categorical
# Our function to load an image from a path and fix it up, but
# modified slightly to accomodate MobileNetV2.
def load_maintain_aspect_ratio(input_image_path, target_size):
image = Image.open(input_image_path)
width, height = image.size
w_pad = 0
h_pad = 0
bonus_h_pad = 0
bonus_w_pad = 0
if width > height:
pix_diff = (width - height)
h_pad = pix_diff // 2
bonus_h_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side.
elif height > width:
pix_diff = (height - width)
w_pad = pix_diff // 2
bonus_w_pad = pix_diff % 2 # If the difference was odd, add one pixel on one side.
# else: image is already square. Both pads stay 0
image = ImageOps.expand(image, (w_pad, h_pad, w_pad+bonus_w_pad, h_pad+bonus_h_pad))
image = image.resize(target_size)
# Get the image data as a numpy array.
image_data = np.array(image.getdata()).reshape(image.size[0], image.size[1], 3)
# The preprocess function from MobileNetV2
# It expects a numpy array with RGB values between 0-255
return preprocess_input(image_data)
# Our recurring plot function.
def plot_training_history(history, model):
figure = plt.figure()
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.tight_layout()
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.tight_layout()
figure.tight_layout()
plt.show()
# Some constants:
# 224x224 is MobileNetV2's default, so lets stick with it
image_size = 224
batch_size = 32
validation_split = 0.2
# Start small, for the sake of learning speed.
num_epochs = 5
# The dataset is too large to reasonably redistribute as part of this repository, so you will
# have to download it separately from: https://www.kaggle.com/alxmamaev/flowers-recognition/
# The download as a flowers folder, this variable should point to the
# loccation of that folder. Inside that folder there should be 5 folders
# each named for the type of flower.
flower_dataset_directory = 'flowers_dataset/flowers/'
# The image classes
classes = {
'daisy': 0,
'dandelion': 1,
'rose': 2,
'sunflower': 3,
'tulip': 4
}
# Process all of the images into an array
images = []
labels = []
for subdir in classes.keys():
current_location = os.path.join(flower_dataset_directory, subdir)
print(f'Processing {subdir}')
sub_dir_count = 0
for file in os.listdir(current_location):
try:
image = load_maintain_aspect_ratio(os.path.join(current_location, file), (image_size, image_size))
images.append(image)
labels.append(classes[subdir])
sub_dir_count += 1
except:
print(f'Failed to load image: {subdir}/{file}. Ignored it.')
print(f'Found {sub_dir_count} images of type {subdir}')
# Just double check.
assert len(images) == len(labels)
# This is a little bit crude, but we'll just randomly select each image/label pair
# to be in the validation set based on our validation split. We could take greater
# care here to ensure that the right amount are represented from each class
# but it will probably be okay...
x_train = []
y_train = []
x_validation = []
y_validation = []
for image, label in zip(images, labels):
if np.random.random() > validation_split:
x_train.append(image)
y_train.append(label)
else:
x_validation.append(image)
y_validation.append(label)
# Properly format the images into a np array
x_train = np.array(x_train)
x_validation = np.array(x_validation)
# Make the labels one-hot encoded:
y_train = to_categorical(y_train, len(classes))
y_validation = to_categorical(y_validation, len(classes))
print(f'Loaded {len(images)}')
print(f'Training size: {len(x_train)}, validation size: {len(x_validation)}')
# Lots of possible augmentation to the training data
# hopefully allowing us to avoid overfitting
train_generator = ImageDataGenerator(
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=10,
zoom_range=0.5,
fill_mode='constant',
cval=0.0
)
# Don't transform the validation images.
validation_generator = ImageDataGenerator()
# Fit them both, best practice
train_generator.fit(x_train)
validation_generator.fit(x_validation)
# Lets do some sanity checking:
print(x_train.shape)
print(x_validation.shape)
print(y_train.shape)
print(y_validation.shape)
# View a couple, validations are never augmented
for _ in range(3):
plt.imshow(next(validation_generator.flow(x_validation))[0])
plt.show()
# But training data is
for _ in range(3):
plt.imshow(next(train_generator.flow(x_train))[0])
plt.show()
# Loading our pretrained mobilenet
base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(image_size, image_size, 3))
# Make a very simple new classifier
old_top = base_model.output
old_top = GlobalAveragePooling2D()(old_top)
new_top = Dense(len(classes), activation='softmax')(old_top)
model = Model(inputs=base_model.input, outputs=new_top)
# We have a small amount of data, but the data is pretty similar
# to imagenet, which does train on many flower images, so we can
# expect the existing weights to be pretty good. Freeze all.
for layer in base_model.layers:
layer.trainable = False
# Go for it!
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=num_epochs,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
# Things are going great! Lets unfreeze some of our model's layers and see if "forgetting"
# some of the stuff our network learned about dogs, buildings, and waterbottles can
# improve our results further...
# This number was chosen specifically for MobileNetV2, it is the
# start of the 15th block.
for layer in model.layers[134:]:
layer.trainable = True
# Recompile to ensure the layers get set to trainable
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=num_epochs,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
# I ran this one overnight, just for fun, and to see if we started to overfit
history = model.fit_generator(train_generator.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train) // batch_size,
epochs=30,
validation_data=validation_generator.flow(x_validation, y_validation),
validation_steps=len(x_validation) // batch_size
)
plot_training_history(history, model)
| 0.755907 | 0.962603 |
# Temporal-Difference Methods
In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
---
### Part 0: Explore CliffWalkingEnv
We begin by importing the necessary packages.
```
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
```
Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.
```
env = gym.make('CliffWalking-v0')
```
The agent moves through a $4\times 12$ gridworld, with states numbered as follows:
```
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
```
At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.
The agent has 4 potential actions:
```
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
```
Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below.
```
print(env.action_space)
print(env.observation_space)
```
In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function.
_**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._
```
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0][0:13] = -np.arange(3, 15)[::-1]
V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1
V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
```
### Part 1: TD Control: Sarsa
In this section, you will write your own implementation of the Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = Q[next_state][next_action] if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return np.argmax(Q[state])
else: # otherwise, select an action randomly
return random.choice(np.arange(env.action_space.n))
def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
while True:
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward, next_state, next_action)
state = next_state # S <- S'
action = next_action # A <- A'
if done:
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward)
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 3000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
```
### Part 2: TD Control: Q-learning
In this section, you will write your own implementation of the Q-learning control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):
"""Q-Learning - TD Control
Params
======
num_episodes (int): number of episodes to run the algorithm
alpha (float): learning rate
gamma (float): discount factor
plot_every (int): number of episodes to use when calculating average score
"""
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
```
### Part 3: TD Control: Expected Sarsa
In this section, you will write your own implementation of the Expected Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
policy_s = np.ones(nA) * eps / nA # current policy (for next state S')
policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action
Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step
target = reward + (gamma * Qsa_next) # construct target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
"""Expected SARSA - TD Control
Params
======
num_episodes (int): number of episodes to run the algorithm
alpha (float): step-size parameters for the update step
gamma (float): discount factor
plot_every (int): number of episodes to use when calculating average score
"""
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 0.005 # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
# update Q
Q[state][action] = update_Q_expsarsa(alpha, gamma, nA, eps, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 5000, 0.01)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
```
|
github_jupyter
|
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
env = gym.make('CliffWalking-v0')
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
print(env.action_space)
print(env.observation_space)
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0][0:13] = -np.arange(3, 15)[::-1]
V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1
V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = Q[next_state][next_action] if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return np.argmax(Q[state])
else: # otherwise, select an action randomly
return random.choice(np.arange(env.action_space.n))
def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
while True:
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward, next_state, next_action)
state = next_state # S <- S'
action = next_action # A <- A'
if done:
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward)
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 3000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):
"""Q-Learning - TD Control
Params
======
num_episodes (int): number of episodes to run the algorithm
alpha (float): learning rate
gamma (float): discount factor
plot_every (int): number of episodes to use when calculating average score
"""
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
policy_s = np.ones(nA) * eps / nA # current policy (for next state S')
policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action
Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step
target = reward + (gamma * Qsa_next) # construct target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
"""Expected SARSA - TD Control
Params
======
num_episodes (int): number of episodes to run the algorithm
alpha (float): step-size parameters for the update step
gamma (float): discount factor
plot_every (int): number of episodes to use when calculating average score
"""
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 0.005 # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
# update Q
Q[state][action] = update_Q_expsarsa(alpha, gamma, nA, eps, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 5000, 0.01)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
| 0.522446 | 0.960878 |
# 1. Descarga Pubmed
### Hacemos una consulta por cada especialidad para obtener los IDs de los documentos de PubMed
```
import os
from Bio import Entrez
from urllib.request import urlopen
path_file_queries = 'total_queries.txt'
path_files_xmls = 'specialties_subespecialties_xml'
path_casesreports_xml = 'specialties_subespecialties_case_report_xml'
url_entrez = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id='
def open_url(url, file):
try:
xml = open(file, "a+")
xml.write(urlopen(url).read().decode('utf-8'))
xml.close()
except urllib.error.HTTPError:
print("**** URL demasiado larga")
def search_pubmed(query):
Entrez.email = '[email protected]'
handle = Entrez.esearch(db='pubmed',
sort='relevance',
retmax='1000000',
retmode='xml',
term=query)
results = Entrez.read(handle)
return results
def download_xmls(list_pmid, file_output):
if len(list_pmid) < 400:
url = ','.join(list_pmid)
url_search = url_entrez + url
print('[{}/{}]'.format(len(list_pmid), len(list_pmid)))
open_url(url_search, file_output)
else:
# hacemos peripecias para hacer la llamada a la URL
# creamos un fichero auxiliar para albergar varias llamadas a la URL
new_list_pmid = [list_pmid[i:i+400] for i in range(0, len(list_pmid), 400)]
for pos, list_files in enumerate(new_list_pmid):
url = ','.join(list_files)
url_search = url_entrez + url
print('[{}/{}]'.format(pos, len(new_list_pmid)))
open_url(url_search, file_output.replace(".xml", ".mal"))
# construímos el fichero correcto
with open(file_output.replace(".xml", ".mal"),"r") as f:
content = f.read()
content = content.replace('</PubmedArticleSet><?xml version="1.0" ?>\n<!DOCTYPE PubmedArticleSet PUBLIC "-//NLM//DTD PubMedArticle, 1st January 2019//EN" "https://dtd.nlm.nih.gov/ncbi/pubmed/out/pubmed_190101.dtd">\n<PubmedArticleSet>', '')
with open(file_output, 'w') as f:
f.write(content)
def read_file_queries():
with open(path_file_queries, 'r') as fquery:
for index, line in enumerate(fquery):
line = line.strip()
# contruímos un buen nombre para el documento
tree_num = line.split("#")[0]
specialty = line.split("#")[1]
specialty = specialty.lower()
specialty = specialty.replace(", ", " ")
specialty = specialty.replace(" ", "_")
specialty = specialty.replace("-", "_")
specialty = tree_num + '_' + specialty
query = line.split("#")[2]
# comprobamos que no esté generado anteriormente
if not os.path.isfile(os.path.join(path_files_xmls, specialty) + '.xml'):
# query normal (sin cases reports)
normal_query = query + ' and not Case Reports[PT]'
# query para obtener los cases reports
case_report_query = query + ' and Case Reports[PT]'
results_total = search_pubmed(query)
results_files = search_pubmed(normal_query)
results_cr = search_pubmed(case_report_query)
list_pmid_total = results_total['IdList']
list_pmid_files = results_files['IdList']
list_pmid_cr = results_cr['IdList']
# comprobamos que: numFilesSinCasesReports + numFilesCasesReports = numFilesTotal
print("{} - {}, nº doc total: {}, files: {}, cases reports: {}. -> {}".format(index, specialty,
len(list_pmid_total),
len(list_pmid_files),
len(list_pmid_cr),
len(list_pmid_files) + len(list_pmid_cr) == len(list_pmid_total)
))
if len(list_pmid_total) > 10000:
#DOWNLOAD NORMAL PUBLICATION
print("Download normal publication type.")
file_output = os.path.join(path_files_xmls, specialty) + '.xml'
download_xmls(list_pmid_files, file_output)
#DOWNLOAD CASES REPORTS
print("Download cases reports publication type.")
file_output = os.path.join(path_casesreports_xml, specialty) + '.xml'
download_xmls(list_pmid_cr, file_output)
read_file_queries()
```
|
github_jupyter
|
import os
from Bio import Entrez
from urllib.request import urlopen
path_file_queries = 'total_queries.txt'
path_files_xmls = 'specialties_subespecialties_xml'
path_casesreports_xml = 'specialties_subespecialties_case_report_xml'
url_entrez = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&retmode=xml&id='
def open_url(url, file):
try:
xml = open(file, "a+")
xml.write(urlopen(url).read().decode('utf-8'))
xml.close()
except urllib.error.HTTPError:
print("**** URL demasiado larga")
def search_pubmed(query):
Entrez.email = '[email protected]'
handle = Entrez.esearch(db='pubmed',
sort='relevance',
retmax='1000000',
retmode='xml',
term=query)
results = Entrez.read(handle)
return results
def download_xmls(list_pmid, file_output):
if len(list_pmid) < 400:
url = ','.join(list_pmid)
url_search = url_entrez + url
print('[{}/{}]'.format(len(list_pmid), len(list_pmid)))
open_url(url_search, file_output)
else:
# hacemos peripecias para hacer la llamada a la URL
# creamos un fichero auxiliar para albergar varias llamadas a la URL
new_list_pmid = [list_pmid[i:i+400] for i in range(0, len(list_pmid), 400)]
for pos, list_files in enumerate(new_list_pmid):
url = ','.join(list_files)
url_search = url_entrez + url
print('[{}/{}]'.format(pos, len(new_list_pmid)))
open_url(url_search, file_output.replace(".xml", ".mal"))
# construímos el fichero correcto
with open(file_output.replace(".xml", ".mal"),"r") as f:
content = f.read()
content = content.replace('</PubmedArticleSet><?xml version="1.0" ?>\n<!DOCTYPE PubmedArticleSet PUBLIC "-//NLM//DTD PubMedArticle, 1st January 2019//EN" "https://dtd.nlm.nih.gov/ncbi/pubmed/out/pubmed_190101.dtd">\n<PubmedArticleSet>', '')
with open(file_output, 'w') as f:
f.write(content)
def read_file_queries():
with open(path_file_queries, 'r') as fquery:
for index, line in enumerate(fquery):
line = line.strip()
# contruímos un buen nombre para el documento
tree_num = line.split("#")[0]
specialty = line.split("#")[1]
specialty = specialty.lower()
specialty = specialty.replace(", ", " ")
specialty = specialty.replace(" ", "_")
specialty = specialty.replace("-", "_")
specialty = tree_num + '_' + specialty
query = line.split("#")[2]
# comprobamos que no esté generado anteriormente
if not os.path.isfile(os.path.join(path_files_xmls, specialty) + '.xml'):
# query normal (sin cases reports)
normal_query = query + ' and not Case Reports[PT]'
# query para obtener los cases reports
case_report_query = query + ' and Case Reports[PT]'
results_total = search_pubmed(query)
results_files = search_pubmed(normal_query)
results_cr = search_pubmed(case_report_query)
list_pmid_total = results_total['IdList']
list_pmid_files = results_files['IdList']
list_pmid_cr = results_cr['IdList']
# comprobamos que: numFilesSinCasesReports + numFilesCasesReports = numFilesTotal
print("{} - {}, nº doc total: {}, files: {}, cases reports: {}. -> {}".format(index, specialty,
len(list_pmid_total),
len(list_pmid_files),
len(list_pmid_cr),
len(list_pmid_files) + len(list_pmid_cr) == len(list_pmid_total)
))
if len(list_pmid_total) > 10000:
#DOWNLOAD NORMAL PUBLICATION
print("Download normal publication type.")
file_output = os.path.join(path_files_xmls, specialty) + '.xml'
download_xmls(list_pmid_files, file_output)
#DOWNLOAD CASES REPORTS
print("Download cases reports publication type.")
file_output = os.path.join(path_casesreports_xml, specialty) + '.xml'
download_xmls(list_pmid_cr, file_output)
read_file_queries()
| 0.115836 | 0.44903 |
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
%reload_ext lab_black
```
## Number of Tetrodes Active >= 5
```
import logging
import string
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from src.figure_utilities import (
PAGE_HEIGHT,
ONE_COLUMN,
TWO_COLUMN,
save_figure,
set_figure_defaults,
)
from src.parameters import (
STATE_COLORS,
TRANSITION_TO_CATEGORY,
STATE_ORDER,
PROBABILITY_THRESHOLD,
)
set_figure_defaults()
from src.analysis import load_all_replay_info
replay_info = load_all_replay_info(
n_unique_spiking=5,
data_type="clusterless",
dim="1D",
probability_threshold=PROBABILITY_THRESHOLD,
speed_threshold=4,
exclude_interneuron_spikes=False,
)
from src.visualization import plot_upset_classification
classified_replay_info = replay_info.loc[
replay_info.is_classified & (replay_info.duration_classified > 0.015)
]
ax_dict, upset = plot_upset_classification(
classified_replay_info, intersection_frac_threshold=0.01,
)
save_figure(os.path.join("Figure5-supplemental2", "figure5_upset_n_tetrodes"))
n_states = classified_replay_info.loc[:, STATE_ORDER[:-1]].sum(axis=1)
print(
f"Number of single dynamic: {(n_states == 1).sum()} / {len(classified_replay_info)} or {(n_states == 1).mean() * 100:0.0f}%\n"
f"Number of multiple dynamics: {(n_states > 1).sum()} / {len(classified_replay_info)} or {(n_states > 1).mean() * 100:0.0f}%\n"
f"Number of >2 dynamics: {(n_states > 2).sum()} / {len(classified_replay_info)} or {(n_states > 2).mean() * 100:0.0f}%\n"
)
num = (classified_replay_info["Hover-Continuous-Mix"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(
f"Number of Only Stationary-Continuous-Mix: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
is_scm = classified_replay_info["Hover-Continuous-Mix"] & (n_states == 1)
scm_duration = classified_replay_info.loc[is_scm].duration.median() * 1000
scm_distance_from_animal = classified_replay_info.loc[
is_scm
].replay_distance_from_actual_position.median()
print(f"Only Stationary-Continuous-Mix duration: {scm_duration:0.0f} ms")
print(
f"Only Stationary-Continuous-Mix distance from animal: {scm_distance_from_animal:0.0f} cm"
)
is_continuous = classified_replay_info["Continuous"]
continuous_duration = classified_replay_info.loc[is_continuous].duration.median() * 1000
continuous_distance_from_animal = classified_replay_info.loc[
is_continuous
].replay_distance_from_actual_position.median()
print(f"continuous duration: {continuous_duration:0.0f} ms")
print(f"continuous distance from animal: {continuous_distance_from_animal:0.0f} cm")
num = (classified_replay_info["Hover"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(f"Number of Only Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
num = (classified_replay_info["Fragmented"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(f"Number of Only Fragmented: {num} / {denom} or {num / denom * 100:0.0f}%\n")
has_short_duration_jump = (
(
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
)
& (
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
& (
(classified_replay_info["Fragmented_duration"] < 0.010)
| (classified_replay_info["Fragmented-Continuous-Mix_duration"] < 0.010)
)
)
num = has_short_duration_jump.sum()
denom = len(classified_replay_info)
print(f"Number of short duration jump: {num} / {denom} or {num / denom * 100:0.0f}%\n")
has_spatially_coherent_and_incoherent = (
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
) & (
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
num = has_spatially_coherent_and_incoherent.sum()
denom = len(classified_replay_info)
print(
f"Number of spatially coherent and incoherent: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
has_no_spatially_coherent_and_incoherent = (
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
) & ~(
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
num = has_no_spatially_coherent_and_incoherent.sum()
denom = len(classified_replay_info)
print(
f"Number of not spatially coherent and incoherent: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
import re
import ast
def convert_object_to_array(fixed_string):
pattern = r"""# Match (mandatory) whitespace between...
(?<=\]) # ] and
\s+
(?= \[) # [, or
|
(?<=[^\[\]\s])
\s+
(?= [^\[\]\s]) # two non-bracket non-whitespace characters
"""
# Replace such whitespace with a comma
fixed_string = re.sub(pattern, ",", fixed_string, flags=re.VERBOSE)
return np.array(ast.literal_eval(fixed_string))
def get_norm_linear_position(replay_info):
non_local_stationary = replay_info.loc[
replay_info.Hover_replay_distance_from_actual_position > 30
]
norm_linear_position = []
for ripple_id, df in non_local_stationary.iterrows():
try:
temp = (
convert_object_to_array(df.Hover_replay_linear_position)
/ df.left_well_position
)
for pos in temp:
norm_linear_position.append(pos)
except TypeError:
norm_linear_position.append(
df.Hover_replay_linear_position / df.left_well_position
)
return np.asarray(norm_linear_position)
from src.visualization import (
plot_replay_distance_from_actual_position,
plot_category_duration,
plot_linear_position_markers,
plot_population_rate,
_plot_category,
)
import glob
saturation, fliersize = 0.7, 1
fig, axes = plt.subplots(
nrows=2, ncols=2, figsize=(TWO_COLUMN, PAGE_HEIGHT / 2), constrained_layout=True
)
# Duration of Dynamic
plot_category_duration(
classified_replay_info,
kind="box",
ax=axes[0, 0],
fliersize=fliersize,
saturation=saturation,
)
axes[0, 0].set_title("Duration")
axes[0, 0].set_xlim((0, 400))
sns.despine(ax=axes[0, 0], offset=5)
# Distance from Animal
plot_replay_distance_from_actual_position(
classified_replay_info,
kind="box",
ax=axes[0, 1],
fliersize=fliersize,
saturation=saturation,
)
axes[0, 1].set_title("Distance from Animal")
sns.despine(ax=axes[0, 1], offset=5)
axes[0, 1].set_xlim((0, 250))
axes[0, 1].set_yticks([])
axes[0, 1].spines["left"].set_visible(False)
# Non-Local Stationary Position
norm_non_local_hover = get_norm_linear_position(classified_replay_info)
sns.distplot(
norm_non_local_hover,
kde_kws=dict(
bw=0.020,
clip=(0, 1),
shade=True,
facecolor=STATE_COLORS["Hover"],
legend=False,
),
rug_kws=dict(color="black", alpha=0.5),
kde=True,
rug=True,
hist=False,
color=STATE_COLORS["Hover"],
ax=axes[1, 0],
)
axes[1, 0].set_xlabel("Normalized Position")
axes[1, 0].set_ylabel("Probability Density")
plot_linear_position_markers(
classified_replay_info,
is_normalized=True,
jitter=0.00,
zorder=101,
alpha=1,
ax=axes[1, 0],
linestyle="-",
fontsize=14,
)
sns.despine(ax=axes[1, 0], offset=5)
axes[1, 0].set_xlim((0, 1))
axes[1, 0].set_title("Non-Local Stationary Position")
n_non_local = norm_non_local_hover.size
axes[1, 0].text(0.75, 3.5, f"N = {n_non_local}", zorder=100, fontsize=9)
# Population firing rate
_plot_category(
classified_replay_info,
"population_rate",
kind="box",
ax=axes[1, 1],
fliersize=fliersize,
saturation=saturation,
)
axes[1, 1].set_xlim((0, 400))
axes[1, 1].set_xlabel("Rate [spikes / s]")
axes[1, 1].set_title("Multiunit Population Rate")
sns.despine(ax=axes[1, 1], offset=5)
axes[1, 1].set_yticks([])
axes[1, 1].spines["left"].set_visible(False)
# save_figure(os.path.join("Figure5", "figure5_dynamics_summary"))
from scipy.stats import ranksums
ranksums(
classified_replay_info.Hover_population_rate,
classified_replay_info.Continuous_population_rate,
)
np.nanmedian(classified_replay_info.Hover_population_rate), np.nanmedian(
classified_replay_info.Continuous_population_rate
)
num = ((classified_replay_info.Hover_replay_distance_from_actual_position > 30)).sum()
denom = len(classified_replay_info)
print(f"Number of Non-Local Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
num = (
(classified_replay_info.Hover_replay_distance_from_actual_position > 30)
& (n_states == 1)
).sum()
denom = ((classified_replay_info.Hover_replay_distance_from_actual_position > 30)).sum()
print(f"Number of Non-Local Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
f"{classified_replay_info.Hover_replay_distance_from_actual_position.max():0.0f}"
```
|
github_jupyter
|
%matplotlib inline
%reload_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
%reload_ext lab_black
import logging
import string
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from src.figure_utilities import (
PAGE_HEIGHT,
ONE_COLUMN,
TWO_COLUMN,
save_figure,
set_figure_defaults,
)
from src.parameters import (
STATE_COLORS,
TRANSITION_TO_CATEGORY,
STATE_ORDER,
PROBABILITY_THRESHOLD,
)
set_figure_defaults()
from src.analysis import load_all_replay_info
replay_info = load_all_replay_info(
n_unique_spiking=5,
data_type="clusterless",
dim="1D",
probability_threshold=PROBABILITY_THRESHOLD,
speed_threshold=4,
exclude_interneuron_spikes=False,
)
from src.visualization import plot_upset_classification
classified_replay_info = replay_info.loc[
replay_info.is_classified & (replay_info.duration_classified > 0.015)
]
ax_dict, upset = plot_upset_classification(
classified_replay_info, intersection_frac_threshold=0.01,
)
save_figure(os.path.join("Figure5-supplemental2", "figure5_upset_n_tetrodes"))
n_states = classified_replay_info.loc[:, STATE_ORDER[:-1]].sum(axis=1)
print(
f"Number of single dynamic: {(n_states == 1).sum()} / {len(classified_replay_info)} or {(n_states == 1).mean() * 100:0.0f}%\n"
f"Number of multiple dynamics: {(n_states > 1).sum()} / {len(classified_replay_info)} or {(n_states > 1).mean() * 100:0.0f}%\n"
f"Number of >2 dynamics: {(n_states > 2).sum()} / {len(classified_replay_info)} or {(n_states > 2).mean() * 100:0.0f}%\n"
)
num = (classified_replay_info["Hover-Continuous-Mix"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(
f"Number of Only Stationary-Continuous-Mix: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
is_scm = classified_replay_info["Hover-Continuous-Mix"] & (n_states == 1)
scm_duration = classified_replay_info.loc[is_scm].duration.median() * 1000
scm_distance_from_animal = classified_replay_info.loc[
is_scm
].replay_distance_from_actual_position.median()
print(f"Only Stationary-Continuous-Mix duration: {scm_duration:0.0f} ms")
print(
f"Only Stationary-Continuous-Mix distance from animal: {scm_distance_from_animal:0.0f} cm"
)
is_continuous = classified_replay_info["Continuous"]
continuous_duration = classified_replay_info.loc[is_continuous].duration.median() * 1000
continuous_distance_from_animal = classified_replay_info.loc[
is_continuous
].replay_distance_from_actual_position.median()
print(f"continuous duration: {continuous_duration:0.0f} ms")
print(f"continuous distance from animal: {continuous_distance_from_animal:0.0f} cm")
num = (classified_replay_info["Hover"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(f"Number of Only Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
num = (classified_replay_info["Fragmented"] & (n_states == 1)).sum()
denom = len(classified_replay_info)
print(f"Number of Only Fragmented: {num} / {denom} or {num / denom * 100:0.0f}%\n")
has_short_duration_jump = (
(
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
)
& (
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
& (
(classified_replay_info["Fragmented_duration"] < 0.010)
| (classified_replay_info["Fragmented-Continuous-Mix_duration"] < 0.010)
)
)
num = has_short_duration_jump.sum()
denom = len(classified_replay_info)
print(f"Number of short duration jump: {num} / {denom} or {num / denom * 100:0.0f}%\n")
has_spatially_coherent_and_incoherent = (
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
) & (
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
num = has_spatially_coherent_and_incoherent.sum()
denom = len(classified_replay_info)
print(
f"Number of spatially coherent and incoherent: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
has_no_spatially_coherent_and_incoherent = (
classified_replay_info["Fragmented"]
| classified_replay_info["Fragmented-Continuous-Mix"]
) & ~(
classified_replay_info["Hover"]
| classified_replay_info["Hover-Continuous-Mix"]
| classified_replay_info["Continuous"]
)
num = has_no_spatially_coherent_and_incoherent.sum()
denom = len(classified_replay_info)
print(
f"Number of not spatially coherent and incoherent: {num} / {denom} or {num / denom * 100:0.0f}%\n"
)
import re
import ast
def convert_object_to_array(fixed_string):
pattern = r"""# Match (mandatory) whitespace between...
(?<=\]) # ] and
\s+
(?= \[) # [, or
|
(?<=[^\[\]\s])
\s+
(?= [^\[\]\s]) # two non-bracket non-whitespace characters
"""
# Replace such whitespace with a comma
fixed_string = re.sub(pattern, ",", fixed_string, flags=re.VERBOSE)
return np.array(ast.literal_eval(fixed_string))
def get_norm_linear_position(replay_info):
non_local_stationary = replay_info.loc[
replay_info.Hover_replay_distance_from_actual_position > 30
]
norm_linear_position = []
for ripple_id, df in non_local_stationary.iterrows():
try:
temp = (
convert_object_to_array(df.Hover_replay_linear_position)
/ df.left_well_position
)
for pos in temp:
norm_linear_position.append(pos)
except TypeError:
norm_linear_position.append(
df.Hover_replay_linear_position / df.left_well_position
)
return np.asarray(norm_linear_position)
from src.visualization import (
plot_replay_distance_from_actual_position,
plot_category_duration,
plot_linear_position_markers,
plot_population_rate,
_plot_category,
)
import glob
saturation, fliersize = 0.7, 1
fig, axes = plt.subplots(
nrows=2, ncols=2, figsize=(TWO_COLUMN, PAGE_HEIGHT / 2), constrained_layout=True
)
# Duration of Dynamic
plot_category_duration(
classified_replay_info,
kind="box",
ax=axes[0, 0],
fliersize=fliersize,
saturation=saturation,
)
axes[0, 0].set_title("Duration")
axes[0, 0].set_xlim((0, 400))
sns.despine(ax=axes[0, 0], offset=5)
# Distance from Animal
plot_replay_distance_from_actual_position(
classified_replay_info,
kind="box",
ax=axes[0, 1],
fliersize=fliersize,
saturation=saturation,
)
axes[0, 1].set_title("Distance from Animal")
sns.despine(ax=axes[0, 1], offset=5)
axes[0, 1].set_xlim((0, 250))
axes[0, 1].set_yticks([])
axes[0, 1].spines["left"].set_visible(False)
# Non-Local Stationary Position
norm_non_local_hover = get_norm_linear_position(classified_replay_info)
sns.distplot(
norm_non_local_hover,
kde_kws=dict(
bw=0.020,
clip=(0, 1),
shade=True,
facecolor=STATE_COLORS["Hover"],
legend=False,
),
rug_kws=dict(color="black", alpha=0.5),
kde=True,
rug=True,
hist=False,
color=STATE_COLORS["Hover"],
ax=axes[1, 0],
)
axes[1, 0].set_xlabel("Normalized Position")
axes[1, 0].set_ylabel("Probability Density")
plot_linear_position_markers(
classified_replay_info,
is_normalized=True,
jitter=0.00,
zorder=101,
alpha=1,
ax=axes[1, 0],
linestyle="-",
fontsize=14,
)
sns.despine(ax=axes[1, 0], offset=5)
axes[1, 0].set_xlim((0, 1))
axes[1, 0].set_title("Non-Local Stationary Position")
n_non_local = norm_non_local_hover.size
axes[1, 0].text(0.75, 3.5, f"N = {n_non_local}", zorder=100, fontsize=9)
# Population firing rate
_plot_category(
classified_replay_info,
"population_rate",
kind="box",
ax=axes[1, 1],
fliersize=fliersize,
saturation=saturation,
)
axes[1, 1].set_xlim((0, 400))
axes[1, 1].set_xlabel("Rate [spikes / s]")
axes[1, 1].set_title("Multiunit Population Rate")
sns.despine(ax=axes[1, 1], offset=5)
axes[1, 1].set_yticks([])
axes[1, 1].spines["left"].set_visible(False)
# save_figure(os.path.join("Figure5", "figure5_dynamics_summary"))
from scipy.stats import ranksums
ranksums(
classified_replay_info.Hover_population_rate,
classified_replay_info.Continuous_population_rate,
)
np.nanmedian(classified_replay_info.Hover_population_rate), np.nanmedian(
classified_replay_info.Continuous_population_rate
)
num = ((classified_replay_info.Hover_replay_distance_from_actual_position > 30)).sum()
denom = len(classified_replay_info)
print(f"Number of Non-Local Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
num = (
(classified_replay_info.Hover_replay_distance_from_actual_position > 30)
& (n_states == 1)
).sum()
denom = ((classified_replay_info.Hover_replay_distance_from_actual_position > 30)).sum()
print(f"Number of Non-Local Stationary: {num} / {denom} or {num / denom * 100:0.0f}%\n")
f"{classified_replay_info.Hover_replay_distance_from_actual_position.max():0.0f}"
| 0.399812 | 0.63375 |
# A History of NLP
The history of NLP can be broken down in many different ways. We will be taking a look at the long-term history and the *approach* that researchers have taken through the decades. There are three *eras* of NLP that we can consider, which correlate with the trends in Machine Learning as a whole.
## Symbolic AI
The first era of NLP and AI was dominated with **symbolic** AI. This era covered the period from the mid 1950s to the late 1980s. It consists of a collection of methods that view AI as being solvable using *symbolic* representations of problems. For example, researches would manually label and categorize every scenario that an AI may encounter, and write a set of logic/rule-based instructions on how to deal with each scenerio.
Applied to NLP, we would write a fixed set of rules. So for a sentiment classification task, our rules may look something like:
```
IF 'happy' IN SENTENCE
SENTIMENT IS POSITIVE
IF 'sad' IN SENTENCE
SENTIMENT IS NEGATIVE
```
These sets of rules would ofcourse be much more complex, a researcher may add `IF 'happy' IN SENTENCE AND 'not' BEFORE 'happy'; SENTIMENT IS NEGATIVE` as a simple toy example.
The **benefit** of this is interpretability, all of the rules are written by humans, and are human readable, so we can understand what is happening.
However, there are many **drawbacks** to this approach. Eeven a simple symbolic representation of language is incredibly complex, the researcher designin such as system must be an expert in so many different areas of language, and even if they did understand all there is to know about language (which as far as I am aware, is not possible), there will always be strange nuances (for example context) which are so complex that no reasonable person could ever expect to express them in a logical, symbolic representation. Encoding every possible scenerio is simply not feasible, there are many edge-cases, maybe a AI has excellent conversational skills with English speakers, but if an English speaker from some isolated Welsh village, with a unique set of slang words, attempts to converse with the AI - it will fail because the symbolic representation of language simply is not flexible to function even with one unknown *'symbol'*.
Despite these drawbacks, we still use symbolic methods in present day NLP. When we tokenize words we are creating *symbolic representations* of those words, although these methods are significantly less manual, and form one part of a solution rather than the full thing.
[Symbolic artificial intelligence, Wikipedia](https://en.wikipedia.org/wiki/Symbolic_artificial_intelligence)
[Physical symbol system, Wikipedia](https://en.wikipedia.org/wiki/Physical_symbol_system)
## Statistical AI
Statistical AI dominated from the 1980s until ~2010. During this time we relied very heavily on more statistical machine learning methods such as logistic regression, Naive Bayes classification and so on. These methods became dominant with the increase of computational power which allowed these models to be tested and tuned quicker, and on larger (but still limited) amounts of data.
The **benefits** of these statistical approaches was primarily a greater ability to generalize and deal with outliers (to an extent), while also requiring less domain-specific understanding from researchers. The **drawbacks** were the limitations on these benefits, yes the models could generalize better and handle outliers better, but they were still significantly limited in this regard. These models stuggle to adapt to other, even if only slightly different use-cases, and so they were siloed into very specific use-cases.
Today, we do still use many of these methods, although many of their original applications have been superceeded by the greater performance of neural nets, nonetheless we wills till find them used in some places. They are often used as part of a larger model/process, and much of the knowledge and methods discovered and used during this statistical age led directly into the current age.
[Statistical learning theory, Wikipedia](https://en.wikipedia.org/wiki/Statistical_learning_theory)
## Neural AI
Neural AI exploded from 2010 onwards with the 'rediscovery' of the neural network. By 2010 the computational power and high availability of data led to the perfect conditions for neural nets to take centre stage. Neural networks require significant amount of computational power, and are incredibly data-hungry, pre-2010 there was simply not enough compute power, and very few 'big data' databases in the world. Researchers found that multilayer neural networks (deep learning) provided massively improved performance when compared to the previous cutting-edge statistical models, and the same models could be applied to a huge range of use-cases with ease.
**Benefits** of the new neural age of AI are incredibly diverse. We now have models that are reasonably adaptable, they can deal with outliers, they're incredibly accurate, to apply them to real problems is becoming easier and easier everyday. The **drawbacks** of neural are perhaps harder to identify due to a lack of hindsight, but we can say that despite being more adaptable than symbolic or statistical methods, the neural approach is still fundamentally brittle - many of the models that we see playing games and beating human scores will break if the screen is rotated by a few degrees, or if you ask GPT-3 *"How many eyes does my foot have?*, it will happily answer *"Your foot has two eyes."* [source](https://lacker.io/ai/2020/07/06/giving-gpt-3-a-turing-test.html).
Another drawback which gains a lot of attention at the moment is **interpretability**, neural models have become so complex and evolve in such a way that the people that build them often can't explain certain behaviors of the models, which raises concerns in how we can trust these models with important tasks, like self-driving, screening resumés for hiring (and not being racist or [sexist](https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-idUSKCN1MK08G)), or making financial decisions in the stock markets (see [2010 flash crash](https://en.wikipedia.org/wiki/2010_flash_crash) and [2017 Ethereum flash crash](https://www.cnbc.com/2017/06/22/ethereum-price-crash-10-cents-gdax-exchange-after-multimillion-dollar-trade.html)).
|
github_jupyter
|
IF 'happy' IN SENTENCE
SENTIMENT IS POSITIVE
IF 'sad' IN SENTENCE
SENTIMENT IS NEGATIVE
| 0.351311 | 0.987711 |
```
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.train
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testa
def parse(file):
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART-' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[1])
return left, right
left_train, right_train = parse('eng.train')
left_test, right_test = parse('eng.testa')
import re
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def process_string(string):
string = re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
np.unique(right_train,return_counts=True)
word2idx = {'PAD': 0,'NUM':1,'UNK':2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
def parse_XY(texts, labels):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0],batch.shape[1],maxlen),dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i,k]]):
temp[i,k,-1-no] = char2idx[c]
return temp
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
X_seq.shape
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
X_seq_test.shape
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
class Model:
def __init__(
self,
dim_word,
dim_char,
dropout,
learning_rate,
hidden_size_char,
hidden_size_word,
num_layers,
):
def cells(size, reuse = False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size,
initializer = tf.orthogonal_initializer(),
reuse = reuse,
),
output_keep_prob = dropout,
)
def luong(embedded, size):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = hidden_size_word, memory = embedded
)
return tf.contrib.seq2seq.AttentionWrapper(
cell = cells(hidden_size_word),
attention_mechanism = attention_mechanism,
attention_layer_size = hidden_size_word,
)
self.word_ids = tf.placeholder(tf.int32, shape = [None, None])
self.char_ids = tf.placeholder(tf.int32, shape = [None, None, None])
self.labels = tf.placeholder(tf.int32, shape = [None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(
tf.truncated_normal(
[len(word2idx), dim_word], stddev = 1.0 / np.sqrt(dim_word)
)
)
self.char_embeddings = tf.Variable(
tf.truncated_normal(
[len(char2idx), dim_char], stddev = 1.0 / np.sqrt(dim_char)
)
)
word_embedded = tf.nn.embedding_lookup(
self.word_embeddings, self.word_ids
)
char_embedded = tf.nn.embedding_lookup(
self.char_embeddings, self.char_ids
)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(
char_embedded, shape = [s[0] * s[1], s[-2], dim_char]
)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(hidden_size_char),
cell_bw = cells(hidden_size_char),
inputs = char_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_char_%d' % (n),
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(
char_embedded[:, -1], shape = [s[0], s[1], 2 * hidden_size_char]
)
word_embedded = tf.concat([word_embedded, output], axis = -1)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = luong(word_embedded, hidden_size_word),
cell_bw = luong(word_embedded, hidden_size_word),
inputs = word_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_word_%d' % (n),
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name = 'logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 128
model = Model(dim_word,dim_char,dropout,learning_rate,
hidden_size_char,hidden_size_word,num_layers)
sess.run(tf.global_variables_initializer())
import time
for e in range(3):
lasttime = time.time()
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = train_X[i : min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i : min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc)
)
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
predicted = pred2label(sess.run(model.tags_seq,
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
},
))
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
from sklearn.metrics import classification_report
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
```
|
github_jupyter
|
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.train
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testa
def parse(file):
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART-' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[1])
return left, right
left_train, right_train = parse('eng.train')
left_test, right_test = parse('eng.testa')
import re
import numpy as np
import tensorflow as tf
from tqdm import tqdm
def process_string(string):
string = re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
np.unique(right_train,return_counts=True)
word2idx = {'PAD': 0,'NUM':1,'UNK':2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
def parse_XY(texts, labels):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0],batch.shape[1],maxlen),dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i,k]]):
temp[i,k,-1-no] = char2idx[c]
return temp
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
X_seq.shape
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
X_seq_test.shape
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
class Model:
def __init__(
self,
dim_word,
dim_char,
dropout,
learning_rate,
hidden_size_char,
hidden_size_word,
num_layers,
):
def cells(size, reuse = False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size,
initializer = tf.orthogonal_initializer(),
reuse = reuse,
),
output_keep_prob = dropout,
)
def luong(embedded, size):
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units = hidden_size_word, memory = embedded
)
return tf.contrib.seq2seq.AttentionWrapper(
cell = cells(hidden_size_word),
attention_mechanism = attention_mechanism,
attention_layer_size = hidden_size_word,
)
self.word_ids = tf.placeholder(tf.int32, shape = [None, None])
self.char_ids = tf.placeholder(tf.int32, shape = [None, None, None])
self.labels = tf.placeholder(tf.int32, shape = [None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(
tf.truncated_normal(
[len(word2idx), dim_word], stddev = 1.0 / np.sqrt(dim_word)
)
)
self.char_embeddings = tf.Variable(
tf.truncated_normal(
[len(char2idx), dim_char], stddev = 1.0 / np.sqrt(dim_char)
)
)
word_embedded = tf.nn.embedding_lookup(
self.word_embeddings, self.word_ids
)
char_embedded = tf.nn.embedding_lookup(
self.char_embeddings, self.char_ids
)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(
char_embedded, shape = [s[0] * s[1], s[-2], dim_char]
)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(hidden_size_char),
cell_bw = cells(hidden_size_char),
inputs = char_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_char_%d' % (n),
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(
char_embedded[:, -1], shape = [s[0], s[1], 2 * hidden_size_char]
)
word_embedded = tf.concat([word_embedded, output], axis = -1)
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = luong(word_embedded, hidden_size_word),
cell_bw = luong(word_embedded, hidden_size_word),
inputs = word_embedded,
dtype = tf.float32,
scope = 'bidirectional_rnn_word_%d' % (n),
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name = 'logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 128
model = Model(dim_word,dim_char,dropout,learning_rate,
hidden_size_char,hidden_size_word,num_layers)
sess.run(tf.global_variables_initializer())
import time
for e in range(3):
lasttime = time.time()
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = train_X[i : min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i : min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
assert not np.isnan(cost)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc)
)
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
predicted = pred2label(sess.run(model.tags_seq,
feed_dict = {
model.word_ids: batch_x,
model.char_ids: batch_char,
},
))
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
from sklearn.metrics import classification_report
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
| 0.486819 | 0.374705 |
# Machine Learning Engineer Nanodegree
## Unsupervised Learning
## Project: Creating Customer Segments
Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
## Getting Started
In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.
The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers.
Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
```
## Data Exploration
In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.
Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase.
```
# Display a description of the dataset
display(data.describe())
```
### Implementation: Selecting Samples
To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.
```
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [11, 100 ,200]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
```
### Question 1
Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers.
*What kind of establishment (customer) could each of the three samples you've chosen represent?*
**Hint:** Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant.
**Answer:**
- Index 11 - Restaurant. The purchase cost of fresh product is more than average while those of other product categories are below average. A restaurant uses fresh product to serve meal.
- Index 100 - Market or supermarket (mid or large size). Except for the fresh product of which the purchase cost is close to the average, the purchase costs of all other categories are above the average especially the grocery, the detergents paper and delicatessen. A market usually sells all kinds of items.
- Index 200 - Retailer. The purchase costs of milk, grocery, frozen and detergents paper are above the average while the costs of others are much less than the average. Usually people go to retailers like convenient stores for fast food, milk and daily supplies.
### Implementation: Feature Relevance
One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.
In the code block below, you will need to implement the following:
- Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function.
- Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets.
- Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`.
- Import a decision tree regressor, set a `random_state`, and fit the learner to the training data.
- Report the prediction score of the testing set using the regressor's `score` function.
```
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
def feature_relevance(selected_feature):
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.copy()
new_data.drop([selected_feature], axis = 1, inplace = True)
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = train_test_split(new_data, data[selected_feature], test_size=0.25, random_state=0)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X_train, y_train)
# TODO: Report the score of the prediction using the testing set
score = regressor.score(X_test, y_test)
return score
for feature in data.columns:
score = feature_relevance(feature)
print('The score for {} is {}.'.format(feature, score))
```
### Question 2
*Which feature did you attempt to predict? What was the reported prediction score? Is this feature necessary for identifying customers' spending habits?*
**Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data.
**Answer:**
I tried a few features.
- Detergents_Paper. The score is 0.73. This feature can be predicted from other features so it isn't necessary for identifying customers' spending habits.
- Delicatessen. The score is -11.66. This feature cannot be predicted from other features so it is necessary for identifying customers' spending habits.
- Milk. The score is 0.36. This feature can barely be predicted from other features so it is necessary for identifying customers' spending habits.
### Visualize Feature Distributions
To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.
```
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### Question 3
*Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?*
**Hint:** Is the data normally distributed? Where do most of the data points lie?
**Answer:**
There are pairs of features which exhibit some degree of correlation. For example, Detergents_Paper has a positive correlation with Milk and Grocery respectively. This result agrees with the high R^2 score.
The data is skewed right. Most of the data points are on the left side.
## Data Preprocessing
In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.
### Implementation: Feature Scaling
If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.
In the code block below, you will need to implement the following:
- Assign a copy of the data to `log_data` after applying logarithmic scaling. Use the `np.log` function for this.
- Assign a copy of the sample data to `log_samples` after applying logarithmic scaling. Again, use `np.log`.
```
# TODO: Scale the data using the natural logarithm
log_data = data.apply(np.log)
# TODO: Scale the sample data using the natural logarithm
log_samples = samples.apply(np.log)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### Observation
After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).
Run the code below to see how the sample data has changed after having the natural logarithm applied to it.
```
# Display the log-transformed sample data
display(log_samples)
```
### Implementation: Outlier Detection
Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
In the code block below, you will need to implement the following:
- Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this.
- Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`.
- Assign the calculation of an outlier step for the given feature to `step`.
- Optionally remove data points from the dataset by adding indices to the `outliers` list.
**NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points!
Once you have performed this implementation, the dataset will be stored in the variable `good_data`.
```
# For each feature find the data points with extreme high or low values
# Returns a dict to hold (feature, outlier_index_list)
def find_outliers():
feature_outliers = {}
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5 * (Q3 - Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
outlier = ~(log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step)
display(log_data[outlier])
outlier_idx = log_data[outlier].index.tolist()
feature_outliers[feature] = outlier_idx
return feature_outliers
# OPTIONAL: Select the indices for data points you wish to remove
# If an index appears more than twice in any features, add it to outliers
outliers_set = set()
seen = []
feature_outliers = find_outliers()
for k1, v1 in feature_outliers.iteritems():
for k2, v2 in feature_outliers.iteritems():
if k1 != k2 and not (k2 in seen):
same = set(v1).intersection(set(v2))
if len(same) > 0:
print('{} are in {} and {}'.format(same, k1, k2))
outliers_set.update(same)
seen.append(k1)
outliers = list(outliers_set)
print('Outlier indexes {} '.format(outliers))
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
```
### Question 4
*Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the `outliers` list to be removed, explain why.*
**Answer:**
There are 5 data points that appears in more than one feature list. These data points should be removed because they don't represent the data well and tend to make the analysis inaccurately.
- 65 - outlier in Frozen and Fresh
- 66 - outlier in Delicatessen and Fresh
- 75 - outlier in Grocery and Detergents_Paper
- 128 - outlier in Delicatessen and Fresh
- 154 - outlier in Grocery, Milk, Delicatesse
## Feature Transformation
In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.
### Implementation: PCA
Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data.
In the code block below, you will need to implement the following:
- Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`.
- Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
from sklearn.decomposition import PCA
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
pca = PCA(n_components=6)
pca.fit(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = vs.pca_results(good_data, pca)
```
### Question 5
*How much variance in the data is explained* ***in total*** *by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.*
**Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the individual feature weights.
```
print('First two {}'.format(0.4430 + 0.2638))
print('First four {}'.format(0.4430 + 0.2638 + 0.1231 + 0.1012))
```
**Answer:**
The variance by the first and second PCs is 0.7068 and that by the first four PCs is 0.9311.
- 1st PC: The customer may be retailer. The spending has large positive weights on Detergents_Paper, Grocery and Milk, small positive weight on Delicatessen and negative weights on Fresh and Frozen.
- 2nd PC: The customer may be restaurant. The spending has large positive weights on Fresh, Frozen, Delicatesse, small positive weight on the rest.
- 3rd PC: The customer may be some simple cafeteria that sells delicatessen and frozen food. The spending has large positive weights on Frozen and Delicatessen, small positive weight on Milk, large negative weights on Fresh and small negative weights on Detergents_Paper and Grocery.
- 4th PC: The customer could be company with its employees work in conditions that mostly consume frozen food. The spending has positive weights on Frozen, small positive weight on Detergents_Paper, Grocery and Milk, large negative weights on Delicatessen and small negative weights on Fresh.
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.
```
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
```
### Implementation: Dimensionality Reduction
When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.
In the code block below, you will need to implement the following:
- Assign the results of fitting PCA in two dimensions with `good_data` to `pca`.
- Apply a PCA transformation of `good_data` using `pca.transform`, and assign the results to `reduced_data`.
- Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2)
pca.fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.transform(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
```
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.
```
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
```
## Visualizing a Biplot
A biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case `Dimension 1` and `Dimension 2`). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features.
Run the code cell below to produce a biplot of the reduced-dimension data.
```
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
```
### Observation
Once we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on `'Milk'`, `'Grocery'` and `'Detergents_Paper'`, but not so much on the other product categories.
From the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier?
## Clustering
In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale.
### Question 6
*What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?*
**Answer:**
Referred to [This Answer](https://www.quora.com/What-is-the-difference-between-K-means-and-the-mixture-model-of-Gaussian)
#### K-means
- The assignment of a cluster is hard, meaning one data point belongs to one cluster
- Advantages
* Running Time
* Better for high dimensional data.
* Easy to interpret and Implement.
- Disadvantages
* Assumes the clusters as spherical, so does not work efficiently with complex geometrical shaped data(Mostly Non-Linear)
* Hard Assignment might lead to mis grouping.
#### Gaussian Mixture Model
- The assignment of a cluster is soft, meaning it uses probability of a sample to determine the feasibility of it belonging to a cluster.
- Advantages
* Does not assume clusters to be of any geometry. Works well with non-linear geometric distributions as well.
* Does not bias the cluster sizes to have specific structures as does by K-Means (Circular).
- Disadvantages
* Uses all the components it has access to, so initialization of clusters will be difficult when dimensionality of data is high.
* Difficult to interpret.
#### Choice
At the beginning I chose Gaussian Mixture Model because the dimensions aren't high, the shape of the clusters are unknown and hard assignment may not be proper as the customer data showed a mixture of different categories.
But after compared the result of the two models, I found K-Means has better score. Also after compared the result of the hidden variables, I found K-Means is still better. So I guess I'd better choose the simple one to start with.
### Implementation: Creating Clusters
Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering.
In the code block below, you will need to implement the following:
- Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`.
- Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`.
- Find the cluster centers using the algorithm's respective attribute and assign them to `centers`.
- Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`.
- Import `sklearn.metrics.silhouette_score` and calculate the silhouette score of `reduced_data` against `preds`.
- Assign the silhouette score to `score` and print the result.
```
# TODO: Apply your clustering algorithm of choice to the reduced data
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
def kmeans_clustering(n_classes):
clusterer = KMeans(n_clusters=n_classes, random_state=0)
clusterer.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.cluster_centers_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data, preds, metric='euclidean', random_state=0)
return (score, reduced_data, preds, centers, pca_samples, sample_preds)
for i in range(2,7):
score = kmeans_clustering(i)[0]
print('n_classes={}, score={}'.format(i, score))
```
### Question 7
*Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?*
**Answer:**
- n_classes=2, score=0.426281015469
- n_classes=3, score=0.39689092645
- n_classes=4, score=0.332009582682
- n_classes=5, score=0.350990778931
- n_classes=6, score=0.366608987343
From the above data, the best silhouette score occurs when there are two clusters.
### Cluster Visualization
Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters.
```
# Display the results of the clustering from implementation
_, reduced_data, preds, centers, pca_samples, _ = kmeans_clustering(2)
vs.cluster_results(reduced_data, preds, centers, pca_samples)
```
## Extra
I am cusrious at the result of GMM. The result is not as good as KMean.
```
from sklearn.mixture import GMM
from sklearn.metrics import silhouette_score
def gmm_clustering(n_classes):
clusterer = GMM(n_components=n_classes, covariance_type='full', random_state=0)
clusterer.fit(reduced_data)
preds = clusterer.predict(reduced_data)
centers = clusterer.means_
sample_preds = clusterer.predict(pca_samples)
score = silhouette_score(reduced_data, preds, metric='euclidean', random_state=0)
return (score, reduced_data, preds, centers, pca_samples, sample_preds)
for i in range(2,7):
score = gmm_clustering(i)[0]
print('n_classes={}, score={}'.format(i, score))
_, reduced_data, preds, centers, pca_samples, _ = gmm_clustering(2)
vs.cluster_results(reduced_data, preds, centers, pca_samples)
```
Since the result of K-Means is better, I use the result of K-Mean in the following exercise.
```
_, reduced_data, preds, centers, pca_samples, sample_preds = kmeans_clustering(2)
```
### Implementation: Data Recovery
Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.
In the code block below, you will need to implement the following:
- Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`.
- Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`.
```
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
```
### Question 8
Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. *What set of establishments could each of the customer segments represent?*
**Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`.
**Answer:**
- Segment0: Restaurant. The amount of spending of all categories are lower than the average, meaning the size of the customer isn't big. The customer spends mostly on Fresh, Milk, Grocery and Frozen.
- Segment1: Retailer. The customer spends more than average on Milk, Grocery, Detergents_Paper, meaning the size of the customer is large. It also spends good amount of Fresh and Delicatessen.
### Question 9
*For each sample point, which customer segment from* ***Question 8*** *best represents it? Are the predictions for each sample point consistent with this?*
Run the code block below to find which cluster each sample point is predicted to be.
```
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
```
**Answer:**
- Sample point 1
Index 11 - Restaurant. The prediction is the same as the assumption.
- Sample point 2
Index 100 - Retailer. The assumption is market or supermarket which is one kind of retailer.
- Sample point 3
Index 200 - Retailer. The prediction is the same as the assumption.
Although the last two data points are both retailers, they sell products that focus on different categories.
## Conclusion
In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships.
### Question 10
Companies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. *How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?*
**Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most?
**Answer:**
The change will affect customers differently. More frequent delivery is import for customer 1) who spend a lot on Fresh, Milk and Delicatessen; and 2) whose size is small that doesn't stock too much.
For the two segments, the segment 0 will react more positively than segment 1.
In the PCA analysis, the dimension 2 puts more weights on Fresh, Frozen and Delicatessen. We should definitely include this dimension when doing the analysis.
### Question 11
Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service.
*How can the wholesale distributor label the new customers using only their estimated product spending and the* ***customer segment*** *data?*
**Hint:** A supervised learner could be used to train on the original customers. What would be the target variable?
**Answer:**
The steps of labeling the new customers are as follows.
1. Data preprocessing: apply natural logrithm to the new customers' data.
2. Feature transformation: use PCA to transform the preprocessed data.
3. Classification: use one of the classification methods (Decision Trees, SVN, Logistic Regression...) to predict the label. The target variable is the cluster label. Treat the preprocessed and transformed existing customers' data as the training features and the assigned cluster as the training label. The test features are the data after the first two steps.
### Visualizing Underlying Distributions
At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset.
Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.
```
# Display the clustering results based on 'Channel' data
vs.channel_results(reduced_data, outliers, pca_samples)
```
### Question 12
*How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? Would you consider these classifications as consistent with your previous definition of the customer segments?*
**Answer:**
- The number of the clusters are two which matches what I chosen.
- The distribution of the data points of the graphs before and after adding the 'Channel' and 'Region' features are somewhat different. Some datapoints overlapped in the two clusters in the underlying distributions while there is almost no overlap in my analysis. But there still are many data points located away from the overlapping area that could be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes'.
- The result is mostly consistent with my previoius definition of the customer segments. For example, sample point 2 and 3 have the same label. But sample point 1 is miss classified which located in the heart of cluster 1 but labelled as cluster 2 in the underlying distribution.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
|
github_jupyter
|
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# Display a description of the dataset
display(data.describe())
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [11, 100 ,200]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
def feature_relevance(selected_feature):
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.copy()
new_data.drop([selected_feature], axis = 1, inplace = True)
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = train_test_split(new_data, data[selected_feature], test_size=0.25, random_state=0)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X_train, y_train)
# TODO: Report the score of the prediction using the testing set
score = regressor.score(X_test, y_test)
return score
for feature in data.columns:
score = feature_relevance(feature)
print('The score for {} is {}.'.format(feature, score))
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# TODO: Scale the data using the natural logarithm
log_data = data.apply(np.log)
# TODO: Scale the sample data using the natural logarithm
log_samples = samples.apply(np.log)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# Display the log-transformed sample data
display(log_samples)
# For each feature find the data points with extreme high or low values
# Returns a dict to hold (feature, outlier_index_list)
def find_outliers():
feature_outliers = {}
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5 * (Q3 - Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
outlier = ~(log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step)
display(log_data[outlier])
outlier_idx = log_data[outlier].index.tolist()
feature_outliers[feature] = outlier_idx
return feature_outliers
# OPTIONAL: Select the indices for data points you wish to remove
# If an index appears more than twice in any features, add it to outliers
outliers_set = set()
seen = []
feature_outliers = find_outliers()
for k1, v1 in feature_outliers.iteritems():
for k2, v2 in feature_outliers.iteritems():
if k1 != k2 and not (k2 in seen):
same = set(v1).intersection(set(v2))
if len(same) > 0:
print('{} are in {} and {}'.format(same, k1, k2))
outliers_set.update(same)
seen.append(k1)
outliers = list(outliers_set)
print('Outlier indexes {} '.format(outliers))
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
from sklearn.decomposition import PCA
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
pca = PCA(n_components=6)
pca.fit(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = vs.pca_results(good_data, pca)
print('First two {}'.format(0.4430 + 0.2638))
print('First four {}'.format(0.4430 + 0.2638 + 0.1231 + 0.1012))
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2)
pca.fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.transform(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
# TODO: Apply your clustering algorithm of choice to the reduced data
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
def kmeans_clustering(n_classes):
clusterer = KMeans(n_clusters=n_classes, random_state=0)
clusterer.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.cluster_centers_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data, preds, metric='euclidean', random_state=0)
return (score, reduced_data, preds, centers, pca_samples, sample_preds)
for i in range(2,7):
score = kmeans_clustering(i)[0]
print('n_classes={}, score={}'.format(i, score))
# Display the results of the clustering from implementation
_, reduced_data, preds, centers, pca_samples, _ = kmeans_clustering(2)
vs.cluster_results(reduced_data, preds, centers, pca_samples)
from sklearn.mixture import GMM
from sklearn.metrics import silhouette_score
def gmm_clustering(n_classes):
clusterer = GMM(n_components=n_classes, covariance_type='full', random_state=0)
clusterer.fit(reduced_data)
preds = clusterer.predict(reduced_data)
centers = clusterer.means_
sample_preds = clusterer.predict(pca_samples)
score = silhouette_score(reduced_data, preds, metric='euclidean', random_state=0)
return (score, reduced_data, preds, centers, pca_samples, sample_preds)
for i in range(2,7):
score = gmm_clustering(i)[0]
print('n_classes={}, score={}'.format(i, score))
_, reduced_data, preds, centers, pca_samples, _ = gmm_clustering(2)
vs.cluster_results(reduced_data, preds, centers, pca_samples)
_, reduced_data, preds, centers, pca_samples, sample_preds = kmeans_clustering(2)
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
# Display the clustering results based on 'Channel' data
vs.channel_results(reduced_data, outliers, pca_samples)
| 0.559771 | 0.994688 |
# "Hong Kong Elevation map with rayshader (with R)"
> Inspired by https://www.reddit.com/r/dataisbeautiful/comments/bjp8bg/the_united_states_of_elevation_oc/. This is my little weekend project, Hong Kong elevation tile with `rayshader`, powered by `fastpages` with Jupyter notebook! I haven't used R in years, so I spent a lot more time than expected to finish this.
- toc: true
- badges: true
- comments: true
- categories: [R]
- hide: false
This blog is mainly reproducing the blog with different data https://www.tylermw.com/a-step-by-step-guide-to-making-3d-maps-with-satellite-imagery-in-r/. My impression is that R is doing so much better for graph compare to Python. (`ggplot` and now `rayshader` for 3D plots!)
## Data
Two datasets was used for this images.
Landset for RGB
* LC08_L1TP_122044_20200218_20200225_01_T1.TIF
SRTM 30M resolution elevation map
* n21e113.hgt
* n21e114.hgt
* n22e113.hgt
* n22e114.hgt
The USGS explorer is a very nice tool to search data.
I actually couldn't find a Landsat image cover entire hong kong (some western part is missing). Further enhancement is needed for stitching together different images.
## Setup
1. conda with R Kernel
2. Jupyter Notebook
3. fastpages
4. rayshader
> Use conda install even for R Packages, I spend hours to get the environment going back and forth in Windows and Linux
```
## Library
library(rayshader)
library(sp)
library(raster)
library(scales)
library(dplyr)
elevation1 = raster::raster("../data/rayshader/HongKong/N21E113.hgt")
elevation2 = raster::raster("../data/rayshader/HongKong/N21E114.hgt")
elevation3 = raster::raster("../data/rayshader/HongKong/N22E113.hgt")
elevation4 = raster::raster("../data/rayshader/HongKong/N22E114.hgt")
```
Let's plot the elevation map. The whole image is green-ish because most of the area is ocean, so they are at sea-level. The orange color indicate a higher elevation.
```
hk_elevation = raster::merge(elevation1,elevation2, elevation3, elevation4)
height_shade(raster_to_matrix(hk_elevation)) %>%
plot_map();
```

Next, we are going to process the RGB image from Landsat-8 ,The raw jpeg look like this.

Satellite raw images requries some preprocessing, before they look like what we expected.
```
hk_r = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B4.TIF")
hk_g = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B3.TIF")
hk_b = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B2.TIF")
hk_rbg_corrected = sqrt(raster::stack(hk_r, hk_g, hk_b))
raster::plotRGB(hk_rbg_corrected);
```

The image is quite hazzy, which doesn't look like the jpeg we saw earlier. We need to improve the contrast.
```
# Since the RGB image and elevation map does not use the same coordinate system, we need to do some projections.
hk_elevation_utm = raster::projectRaster(hk_elevation, crs = crs(hk_r), method = "bilinear")
crs(hk_elevation_utm)
bottom_left = c(y=113.888, x=22.1365)
top_right = c(y=114.330, x=22.5493)
extent_latlong = sp::SpatialPoints(rbind(bottom_left, top_right), proj4string=sp::CRS("+proj=longlat +ellps=WGS84 +datum=WGS84"))
extent_utm = sp::spTransform(extent_latlong, raster::crs(hk_elevation_utm))
e = raster::extent(extent_utm)
e
hk_rgb_cropped = raster::crop(hk_rbg_corrected, e)
elevation_cropped = raster::crop(hk_elevation_utm, e)
names(hk_rgb_cropped) = c("r","g","b")
hk_r_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$r)
hk_g_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$g)
hk_b_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$b)
hkel_matrix = rayshader::raster_to_matrix(elevation_cropped)
hk_rgb_array = array(0,dim=c(nrow(hk_r_cropped),ncol(hk_r_cropped),3))
hk_rgb_array[,,1] = hk_r_cropped/255 #Red layer
hk_rgb_array[,,2] = hk_g_cropped/255 #Blue layer
hk_rgb_array[,,3] = hk_b_cropped/255 #Green layer
hk_rgb_array = aperm(hk_rgb_array, c(2,1,3))
plot_map(hk_rgb_array)
```

The whole image is bright because we have some dark pixels in the corner. It's similiar to taking images in a dark room, any light source will become a bright spot.
We can improve this by stretching the intensity. It's really no different than how you fine tune your images on Instagram.
```
hk_rgb_cropped = raster::crop(hk_rbg_corrected, e)
elevation_cropped = raster::crop(hk_elevation_utm, e)
# Stretch the images
hk_rgb_cropped <-
raster::stretch(hk_rgb_cropped,
minq = .01,
maxq = .999,
)
names(hk_rgb_cropped) = c("r","g","b")
hk_r_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$r)
hk_g_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$g)
hk_b_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$b)
hkel_matrix = rayshader::raster_to_matrix(elevation_cropped)
hk_rgb_array = array(0,dim=c(nrow(hk_r_cropped),ncol(hk_r_cropped),3))
hk_rgb_array[,,1] = hk_r_cropped/255 #Red layer
hk_rgb_array[,,2] = hk_g_cropped/255 #Blue layer
hk_rgb_array[,,3] = hk_b_cropped/255 #Green layer
hk_rgb_array = aperm(hk_rgb_array, c(2,1,3))
hk_rgb_contrast = scales::rescale(hk_rgb_array,to=c(0,1))
plot_map(hk_rgb_contrast)
```

Now we get a much better image
```
plot_3d(hk_rgb_contrast, hkel_matrix, windowsize = c(1100,900), zscale = 15, shadowdepth = -50,
zoom=0.5, phi=45,theta=-15,fov=70, background = "#F2E1D0", shadowcolor = "#523E2B")
render_scalebar(limits=c(0, 5, 10),label_unit = "km",position = "W", y=50,
scale_length = c(0.33,1))
render_compass(position = "N")
render_snapshot(title_text = "Hong Kong | Imagery: Landsat 8 | DEM: 30m SRTM",
title_bar_color = "#000000", title_color = "white", title_bar_alpha = 1,
clear=TRUE, )
```

|
github_jupyter
|
## Library
library(rayshader)
library(sp)
library(raster)
library(scales)
library(dplyr)
elevation1 = raster::raster("../data/rayshader/HongKong/N21E113.hgt")
elevation2 = raster::raster("../data/rayshader/HongKong/N21E114.hgt")
elevation3 = raster::raster("../data/rayshader/HongKong/N22E113.hgt")
elevation4 = raster::raster("../data/rayshader/HongKong/N22E114.hgt")
hk_elevation = raster::merge(elevation1,elevation2, elevation3, elevation4)
height_shade(raster_to_matrix(hk_elevation)) %>%
plot_map();
hk_r = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B4.TIF")
hk_g = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B3.TIF")
hk_b = raster::raster("../data/rayshader/HongKong/LC08_L1TP_122044_20200218_20200225_01_T1_B2.TIF")
hk_rbg_corrected = sqrt(raster::stack(hk_r, hk_g, hk_b))
raster::plotRGB(hk_rbg_corrected);
# Since the RGB image and elevation map does not use the same coordinate system, we need to do some projections.
hk_elevation_utm = raster::projectRaster(hk_elevation, crs = crs(hk_r), method = "bilinear")
crs(hk_elevation_utm)
bottom_left = c(y=113.888, x=22.1365)
top_right = c(y=114.330, x=22.5493)
extent_latlong = sp::SpatialPoints(rbind(bottom_left, top_right), proj4string=sp::CRS("+proj=longlat +ellps=WGS84 +datum=WGS84"))
extent_utm = sp::spTransform(extent_latlong, raster::crs(hk_elevation_utm))
e = raster::extent(extent_utm)
e
hk_rgb_cropped = raster::crop(hk_rbg_corrected, e)
elevation_cropped = raster::crop(hk_elevation_utm, e)
names(hk_rgb_cropped) = c("r","g","b")
hk_r_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$r)
hk_g_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$g)
hk_b_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$b)
hkel_matrix = rayshader::raster_to_matrix(elevation_cropped)
hk_rgb_array = array(0,dim=c(nrow(hk_r_cropped),ncol(hk_r_cropped),3))
hk_rgb_array[,,1] = hk_r_cropped/255 #Red layer
hk_rgb_array[,,2] = hk_g_cropped/255 #Blue layer
hk_rgb_array[,,3] = hk_b_cropped/255 #Green layer
hk_rgb_array = aperm(hk_rgb_array, c(2,1,3))
plot_map(hk_rgb_array)
hk_rgb_cropped = raster::crop(hk_rbg_corrected, e)
elevation_cropped = raster::crop(hk_elevation_utm, e)
# Stretch the images
hk_rgb_cropped <-
raster::stretch(hk_rgb_cropped,
minq = .01,
maxq = .999,
)
names(hk_rgb_cropped) = c("r","g","b")
hk_r_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$r)
hk_g_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$g)
hk_b_cropped = rayshader::raster_to_matrix(hk_rgb_cropped$b)
hkel_matrix = rayshader::raster_to_matrix(elevation_cropped)
hk_rgb_array = array(0,dim=c(nrow(hk_r_cropped),ncol(hk_r_cropped),3))
hk_rgb_array[,,1] = hk_r_cropped/255 #Red layer
hk_rgb_array[,,2] = hk_g_cropped/255 #Blue layer
hk_rgb_array[,,3] = hk_b_cropped/255 #Green layer
hk_rgb_array = aperm(hk_rgb_array, c(2,1,3))
hk_rgb_contrast = scales::rescale(hk_rgb_array,to=c(0,1))
plot_map(hk_rgb_contrast)
plot_3d(hk_rgb_contrast, hkel_matrix, windowsize = c(1100,900), zscale = 15, shadowdepth = -50,
zoom=0.5, phi=45,theta=-15,fov=70, background = "#F2E1D0", shadowcolor = "#523E2B")
render_scalebar(limits=c(0, 5, 10),label_unit = "km",position = "W", y=50,
scale_length = c(0.33,1))
render_compass(position = "N")
render_snapshot(title_text = "Hong Kong | Imagery: Landsat 8 | DEM: 30m SRTM",
title_bar_color = "#000000", title_color = "white", title_bar_alpha = 1,
clear=TRUE, )
| 0.533397 | 0.802981 |
## Task 1
Вектор – это частный случай матрицы 1хN и Nх1. Повторите материал для векторов, уделяя особое внимание умножению A∙B.
Вычислите, по возможности не используя программирование: $(5Е)^{–1}$, где Е – единичная матрица размера 5х5
```
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
E = np.identity(5)
E
A = 5*E
A
#Определитель диагональной матрицы равен произведению элементов стоящих на главной диагонали
D = 5**5
D
A11 = 5**4
A11
```
A11 = Ann = 625
```
A_1 = np.identity(5)*(A11/D)
A_1
```
Проверим: $A \cdot A^{-1} = E$
```
np.dot(A_1, A)
```
## Task 2
Вычислите определитель:
$
\begin{equation}
\begin{vmatrix}
1 & 2 & 3 \\
4 & 0 & 6 \\
7 & 8 & 9
\end{vmatrix}
\end{equation}
$
$
\Delta = 1 \cdot 0 \cdot 9 + 2 \cdot 6 \cdot 7 + 3 \cdot 8 \cdot 4 - 7 \cdot 0 \cdot 3 - 6 \cdot 8 \cdot 1 - 9 \cdot 4 \cdot 2
$
$\Delta = 60$
## Task 3
Вычислите матрицу, обратную данной:
$
\begin{equation}
\begin{vmatrix}
1 & 2 & 3 \\
4 & 0 & 6 \\
7 & 8 & 9
\end{vmatrix}
\end{equation}
$
```
A = np.matrix([[1, 2,3], [4,0,6],[7,8,9]])
A
A11 = -6*8
A12 = -(4*9-6*7)
A13 = 4*8
A21 = -(2*9 - 3*8)
A22 = 1*9 - 3*7
A23 = -(1*8 - 2*7)
A31 = 2*6
A32 = -(1*6 - 3*4)
A33 = -2*4
A_1 = np.matrix([[A11, A12, A13], [A21, A22, A23], [A31, A32, A33]])/60
A_inv = A_1.T
A_inv
np.linalg.inv(A)
```
Проверим:
```
np.dot(A, A_inv)
np.dot(A_inv, A)
```
## Task 4
Приведите пример матрицы 4х4, ранг которой равен 1
```
a = np.matrix([[1,2,3,4], [2,4,6,8], [3,6,9,12], [4,8,12,16]])
a
#np.ndim(a)
np.linalg.matrix_rank(a)
```
## Task 5
Вычислите скалярное произведение двух векторов:
(1, 5) и (2, 8)
```
a = np.array([1,5])
b = np.array([2,8])
X, Y = np.array([0, 0]), np.array([0, 0])
U, V = np.array([a[0], b[0]]), np.array([a[1], b[1]])
plt.quiver(X, Y, U, V, angles='xy', scale_units = 'xy', scale=1)
plt.xlim(-1, 3)
plt.ylim(-1, 9)
plt.grid()
plt.show()
s = 2 + 5*8
s
```
## Task 6
Вычислите смешанное произведение трех векторов:
(1, 5, 0), (2, 8, 7) и (7, 1.5, 3)
```
a = np.array([1,5,0])
b = np.array([2,8,7])
c = np.array([7,1.5,3])
```
$\vec a \,x \, \vec b=
\begin{vmatrix}
i & j & k \\
1 & 5 & 0 \\
2 & 8 & 7
\end{vmatrix}
$
```
ab = np.array([(5*7), -7, 8-10])
ab
v = np.cross(a, b)
v
vc = 35*7 - 7*1.5 - 2*3
vc
np.inner(v, c)
```
## Task 7
Решите линейную систему:
$
\begin{bmatrix}
1 & 2 & 3 \\
4 & 0 & 6 \\
7 & 8 & 9
\end{bmatrix} \cdot X =
\begin{bmatrix}
12 \\ 2 \\ 1
\end{bmatrix}
$
```
A = np.matrix([[1, 2, 3], [4, 0, 6], [7, 8, 9]])
A
B = np.matrix([[12], [2], [1]])
B
#np.linalg.solve(A, B)
X = np.dot(np.linalg.inv(A), B)
X
```
## Task 8
Найдите псевдорешение:
$
x + 2y - z = 1 \\
3x - 4y + 0z = 7 \\
8x - 5y + 2z = 12 \\
2x + 0y - 5z = 7 \\
11x + 4y - 7z = 15
$
```
A = np.matrix([[1, 2, -1], [3, -4, 0], [8, -5, 2], [2, 0, -5], [11, 4, -7]])
A
B = np.matrix([1, 7, 12, 7, 15]).T
B
X, residuals, rnk, s = np.linalg.lstsq(A, B, rcond=None)
X
np.dot(A, X)
```
## Task 9
Сколько решений имеет линейная система:
$
\begin{bmatrix}
1 & 2 & 3 \\
4 & 5 & 6 \\
7 & 8 & 9
\end{bmatrix} \cdot X =
\begin{bmatrix}
12 \\ 2 \\ 1
\end{bmatrix}
$
```
A = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
B = np.matrix([[12], [2], [1]])
np.linalg.det(A)
```
Определитель равен нулю, решений не имеет
```
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = 4 - 2/3*Y - 1/3*X
Z2 = 2/6 - 2/3*X - 5/6*Y
Z3 = 1/9 - 7/9*X - 8/9*Y
ax.plot_wireframe(X, Y, Z1, color='red')
ax.plot_wireframe(X, Y, Z2, color='green')
ax.plot_wireframe(X, Y, Z3, color='blue')
show()
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = 0 - 2/3*Y - 1/3*X
Z2 = 0/6 - 2/3*X - 5/6*Y
Z3 = 0/9 - 7/9*X - 8/9*Y
ax.plot_surface(X, Y, Z1, color='red')
ax.plot_surface(X, Y, Z2, color='green')
ax.plot_surface(X, Y, Z3, color='blue')
show()
A = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
B = np.matrix([[0], [0], [0]])
```
Чтобы система стала совместной изменим вектор B на [0,0,0]. В таком случае система будет иметь тривиальное решение [0,0,0]
## Task 10
Вычислите LU-разложение матрицы:
$
\begin{vmatrix}
1 & 2 & 3 \\
2 & 16 & 21 \\
4 & 28 & 73
\end{vmatrix}
$
После этого придумайте вектор правых частей и решите полученную линейную систему трех уравнений с данной матрицей.
```
import scipy
import scipy.linalg
A = np.matrix([[1, 2, 3], [2, 16, 21], [4, 28, 73]])
P, L, U = scipy.linalg.lu(A)
print('P\n',P,'\nL\n', L, '\nU\n', U)
np.dot(P.T,A)-np.dot(L,U)
B = np.matrix([1,2,3]).T
B
Y = np.dot(np.linalg.inv(L), B)
Y
X = np.dot(np.linalg.inv(U), Y)
X
```
Проверим:
```
np.dot(A, X)
```
Значения совпали но строки почему-то сместились... `¯\_(ツ)_/¯`
## Task 11
Найдите нормальное псевдорешение недоопределенной системы:
$x + 2y - z = 1 \\ 8x - 5y + 2z = 12$
```
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = X + 2*Y - 1
Z2 = 6 - 4*X + 5/2*Y
ax.plot_surface(X, Y, Z1, color='blue')
ax.plot_surface(X, Y, Z2, color='green')
show()
A = np.matrix([[1, 2, -1], [8, -5, 2]])
B = np.matrix([1, 12]).T
X, res, r, s = np.linalg.lstsq(A,B, rcond=None)
np.dot(A,X)
# минимум в точке
X
```
## Task 12
Найдите одно из псевдорешений вырожденной системы:
$
\begin{bmatrix}
1 & 2 & 3 \\
4 & 5 & 6 \\
7 & 8 & 9
\end{bmatrix} \cdot X =
\begin{bmatrix}
2 \\ 5 \\ 11
\end{bmatrix}
$
```
A = np.matrix([[1,2,3],[4,5,6],[7,8,9]])
B = np.matrix([2,5,11]).T
np.linalg.det(A)
X, res, r, s = np.linalg.lstsq(A,B, rcond=None)
X
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
E = np.identity(5)
E
A = 5*E
A
#Определитель диагональной матрицы равен произведению элементов стоящих на главной диагонали
D = 5**5
D
A11 = 5**4
A11
A_1 = np.identity(5)*(A11/D)
A_1
np.dot(A_1, A)
A = np.matrix([[1, 2,3], [4,0,6],[7,8,9]])
A
A11 = -6*8
A12 = -(4*9-6*7)
A13 = 4*8
A21 = -(2*9 - 3*8)
A22 = 1*9 - 3*7
A23 = -(1*8 - 2*7)
A31 = 2*6
A32 = -(1*6 - 3*4)
A33 = -2*4
A_1 = np.matrix([[A11, A12, A13], [A21, A22, A23], [A31, A32, A33]])/60
A_inv = A_1.T
A_inv
np.linalg.inv(A)
np.dot(A, A_inv)
np.dot(A_inv, A)
a = np.matrix([[1,2,3,4], [2,4,6,8], [3,6,9,12], [4,8,12,16]])
a
#np.ndim(a)
np.linalg.matrix_rank(a)
a = np.array([1,5])
b = np.array([2,8])
X, Y = np.array([0, 0]), np.array([0, 0])
U, V = np.array([a[0], b[0]]), np.array([a[1], b[1]])
plt.quiver(X, Y, U, V, angles='xy', scale_units = 'xy', scale=1)
plt.xlim(-1, 3)
plt.ylim(-1, 9)
plt.grid()
plt.show()
s = 2 + 5*8
s
a = np.array([1,5,0])
b = np.array([2,8,7])
c = np.array([7,1.5,3])
ab = np.array([(5*7), -7, 8-10])
ab
v = np.cross(a, b)
v
vc = 35*7 - 7*1.5 - 2*3
vc
np.inner(v, c)
A = np.matrix([[1, 2, 3], [4, 0, 6], [7, 8, 9]])
A
B = np.matrix([[12], [2], [1]])
B
#np.linalg.solve(A, B)
X = np.dot(np.linalg.inv(A), B)
X
A = np.matrix([[1, 2, -1], [3, -4, 0], [8, -5, 2], [2, 0, -5], [11, 4, -7]])
A
B = np.matrix([1, 7, 12, 7, 15]).T
B
X, residuals, rnk, s = np.linalg.lstsq(A, B, rcond=None)
X
np.dot(A, X)
A = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
B = np.matrix([[12], [2], [1]])
np.linalg.det(A)
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = 4 - 2/3*Y - 1/3*X
Z2 = 2/6 - 2/3*X - 5/6*Y
Z3 = 1/9 - 7/9*X - 8/9*Y
ax.plot_wireframe(X, Y, Z1, color='red')
ax.plot_wireframe(X, Y, Z2, color='green')
ax.plot_wireframe(X, Y, Z3, color='blue')
show()
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = 0 - 2/3*Y - 1/3*X
Z2 = 0/6 - 2/3*X - 5/6*Y
Z3 = 0/9 - 7/9*X - 8/9*Y
ax.plot_surface(X, Y, Z1, color='red')
ax.plot_surface(X, Y, Z2, color='green')
ax.plot_surface(X, Y, Z3, color='blue')
show()
A = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
B = np.matrix([[0], [0], [0]])
import scipy
import scipy.linalg
A = np.matrix([[1, 2, 3], [2, 16, 21], [4, 28, 73]])
P, L, U = scipy.linalg.lu(A)
print('P\n',P,'\nL\n', L, '\nU\n', U)
np.dot(P.T,A)-np.dot(L,U)
B = np.matrix([1,2,3]).T
B
Y = np.dot(np.linalg.inv(L), B)
Y
X = np.dot(np.linalg.inv(U), Y)
X
np.dot(A, X)
fig = figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.5)
Y = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(X, Y)
Z1 = X + 2*Y - 1
Z2 = 6 - 4*X + 5/2*Y
ax.plot_surface(X, Y, Z1, color='blue')
ax.plot_surface(X, Y, Z2, color='green')
show()
A = np.matrix([[1, 2, -1], [8, -5, 2]])
B = np.matrix([1, 12]).T
X, res, r, s = np.linalg.lstsq(A,B, rcond=None)
np.dot(A,X)
# минимум в точке
X
A = np.matrix([[1,2,3],[4,5,6],[7,8,9]])
B = np.matrix([2,5,11]).T
np.linalg.det(A)
X, res, r, s = np.linalg.lstsq(A,B, rcond=None)
X
| 0.301465 | 0.983847 |
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense, LSTM, Conv1D, MaxPooling1D, Flatten, TimeDistributed, ConvLSTM2D, Reshape
import tensorflow as tf
import sklearn.metrics as sm
import keras
from keras import backend as K
data = pd.read_csv("train.csv")
dtest = pd.read_csv("test.csv")
data.head()
data = data.drop(["ID","X0","X1","X2","X3","X4","X5","X6","X8" ], axis = 1)
data.head()
data.shape
dtest
dtest = dtest.drop(["ID","X0","X1","X2","X3","X4","X5","X6","X8" ], axis = 1)
dtest.head()
dtest.shape
X_train, y_train = data.values[0:3499,1:], data.values[0:3499, :1].ravel()
X_valid, y_valid = data.values[3500:4208,1:], data.values[3500:4208, :1].ravel()
X_test = dtest.values[:, :]
print("X_train----",X_train.shape)
print("y_train----",y_train.shape)
print("X_valid----",X_valid.shape)
print("y_valid----",y_valid.shape)
print("X_test-----",X_test.shape)
X_train[0]
y_train[0]
# y_train = min_max_scaler.fit_transform(y_train)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def metrics(pred, y_test):
evs = sm.explained_variance_score(y_test, pred)
me = sm.max_error(y_test, pred)
mae = sm.mean_absolute_error(y_test, pred)
mse = sm.mean_squared_error(y_test, pred)
rmse = np.sqrt(mse)
#msle = sm.mean_squared_log_error(y_test, pred)
m_ae = sm.median_absolute_error(y_test, pred)
r2 = sm.r2_score(y_test, pred)
#mpd = sm.mean_poisson_deviance(y_test, pred)
#mgd = sm.mean_gamma_deviance(y_test, pred)
mape = mean_absolute_percentage_error(pred, y_test)
return({'Explained Variance Score': evs,
'Max Error': me,
'Mean Absolute Error': mae,
'Mean Squared Error': mse,
'Root Mean Squared Error': rmse,
#'Mean Squared Log Error': msle,
'Median Absolute Error': m_ae,
'R² Score': r2,
#'Mean Poisson Deviance': mpd,
#'Mean Gamma Deviance': mgd,
'Mean Absolute Percentage Error': mape
})
subsequences = 2
timesteps = X_train.shape[1]//subsequences
timesteps
X_train = X_train.reshape((X_train.shape[0], subsequences, 1, timesteps, 1))
X_valid = X_valid.reshape((X_valid.shape[0], subsequences, 1, timesteps, 1))
X_test = X_test.reshape((X_test.shape[0], subsequences, 1, timesteps, 1))
X_train.shape
early_stop = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=50)
modelConvLSTM = Sequential()
modelConvLSTM.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', return_sequences=True,input_shape=(X_train.shape[1], 1, X_train.shape[3], X_train.shape[4])))
modelConvLSTM.add(ConvLSTM2D(filters=32, kernel_size=(1,2), activation='relu'))
modelConvLSTM.add(Flatten())
modelConvLSTM.add(Dense(64))
modelConvLSTM.add(Dense(32))
modelConvLSTM.add(Dense(1))
modelConvLSTM.compile(optimizer='adam', loss='mse')
history = modelConvLSTM.fit(X_train, y_train, batch_size=512, epochs=700, verbose=2, callbacks=[early_stop], validation_data = (X_valid, y_valid))
ConvLSTMpred = modelConvLSTM.predict(X_valid, verbose=0)
ConvLSTMpred = ConvLSTMpred.reshape((ConvLSTMpred.shape[0]))
ConvLSTMresults = metrics(ConvLSTMpred, y_valid)
ConvLSTMresults
ConvLSTMpred = modelConvLSTM.predict(X_test, verbose=0)
ConvLSTMpred = ConvLSTMpred.reshape((ConvLSTMpred.shape[0]))
ConvLSTMpred
```
|
github_jupyter
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense, LSTM, Conv1D, MaxPooling1D, Flatten, TimeDistributed, ConvLSTM2D, Reshape
import tensorflow as tf
import sklearn.metrics as sm
import keras
from keras import backend as K
data = pd.read_csv("train.csv")
dtest = pd.read_csv("test.csv")
data.head()
data = data.drop(["ID","X0","X1","X2","X3","X4","X5","X6","X8" ], axis = 1)
data.head()
data.shape
dtest
dtest = dtest.drop(["ID","X0","X1","X2","X3","X4","X5","X6","X8" ], axis = 1)
dtest.head()
dtest.shape
X_train, y_train = data.values[0:3499,1:], data.values[0:3499, :1].ravel()
X_valid, y_valid = data.values[3500:4208,1:], data.values[3500:4208, :1].ravel()
X_test = dtest.values[:, :]
print("X_train----",X_train.shape)
print("y_train----",y_train.shape)
print("X_valid----",X_valid.shape)
print("y_valid----",y_valid.shape)
print("X_test-----",X_test.shape)
X_train[0]
y_train[0]
# y_train = min_max_scaler.fit_transform(y_train)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def metrics(pred, y_test):
evs = sm.explained_variance_score(y_test, pred)
me = sm.max_error(y_test, pred)
mae = sm.mean_absolute_error(y_test, pred)
mse = sm.mean_squared_error(y_test, pred)
rmse = np.sqrt(mse)
#msle = sm.mean_squared_log_error(y_test, pred)
m_ae = sm.median_absolute_error(y_test, pred)
r2 = sm.r2_score(y_test, pred)
#mpd = sm.mean_poisson_deviance(y_test, pred)
#mgd = sm.mean_gamma_deviance(y_test, pred)
mape = mean_absolute_percentage_error(pred, y_test)
return({'Explained Variance Score': evs,
'Max Error': me,
'Mean Absolute Error': mae,
'Mean Squared Error': mse,
'Root Mean Squared Error': rmse,
#'Mean Squared Log Error': msle,
'Median Absolute Error': m_ae,
'R² Score': r2,
#'Mean Poisson Deviance': mpd,
#'Mean Gamma Deviance': mgd,
'Mean Absolute Percentage Error': mape
})
subsequences = 2
timesteps = X_train.shape[1]//subsequences
timesteps
X_train = X_train.reshape((X_train.shape[0], subsequences, 1, timesteps, 1))
X_valid = X_valid.reshape((X_valid.shape[0], subsequences, 1, timesteps, 1))
X_test = X_test.reshape((X_test.shape[0], subsequences, 1, timesteps, 1))
X_train.shape
early_stop = tf.keras.callbacks.EarlyStopping(monitor="loss", patience=50)
modelConvLSTM = Sequential()
modelConvLSTM.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', return_sequences=True,input_shape=(X_train.shape[1], 1, X_train.shape[3], X_train.shape[4])))
modelConvLSTM.add(ConvLSTM2D(filters=32, kernel_size=(1,2), activation='relu'))
modelConvLSTM.add(Flatten())
modelConvLSTM.add(Dense(64))
modelConvLSTM.add(Dense(32))
modelConvLSTM.add(Dense(1))
modelConvLSTM.compile(optimizer='adam', loss='mse')
history = modelConvLSTM.fit(X_train, y_train, batch_size=512, epochs=700, verbose=2, callbacks=[early_stop], validation_data = (X_valid, y_valid))
ConvLSTMpred = modelConvLSTM.predict(X_valid, verbose=0)
ConvLSTMpred = ConvLSTMpred.reshape((ConvLSTMpred.shape[0]))
ConvLSTMresults = metrics(ConvLSTMpred, y_valid)
ConvLSTMresults
ConvLSTMpred = modelConvLSTM.predict(X_test, verbose=0)
ConvLSTMpred = ConvLSTMpred.reshape((ConvLSTMpred.shape[0]))
ConvLSTMpred
| 0.753104 | 0.548613 |
```
# default_exp cloudsearch
```
# cloudsearch
> a library to trigger aws cloudsearch endpoint
```
#hide
from nbdev.showdoc import *
#export
import pandas as pd
from pprint import pprint
import boto3
#hide
import pickle, os
KEY = ''
PW = ''
keypath = '/Users/nic/.villa-search-2'
if KEY and PW:
with open (keypath, 'wb') as f:
pickle.dump({
'KEY': KEY,
'PW': PW
}, f)
with open(keypath, 'rb') as f:
creden = pickle.load(f)
USER = creden['KEY']
PW = creden['PW']
ACCESS_KEY_ID = USER
SECRET_ACCESS_KEY = PW
os.environ['DATABASE_TABLE_NAME'] = 'product-table-dev-manual'
os.environ['REGION'] = 'ap-southeast-1'
os.environ['INVENTORY_BUCKET_NAME'] = 'product-bucket-dev-manual'
os.environ['INPUT_BUCKET_NAME'] = 'input-product-bucket-dev-manual'
# os.environ['DAX_ENDPOINT'] = None
REGION = 'ap-southeast-1'
#export
class Search:
''' a search class to return search result'''
def __init__(self, searchTerm:str, key, pw, region = 'ap-southeast-1',
endpoint = '',
requiredFields = [
'villa_category_l1_en',
'villa_category_l2_en',
'villa_category_l3_en',
'villa_category_l4_en'
] ):
self.searchTerm = searchTerm
self.cloudSearch = boto3.client('cloudsearchdomain' ,
aws_access_key_id=key,
aws_secret_access_key=pw,
region_name=region,
endpoint_url= endpoint)
self.requiredFields = requiredFields
def createCriticalColumns(self, df):
''' fill in required fields if not exist'''
for col in self.requiredFields:
if col not in df:
df[col] = 'noData'
return df.fillna('noData')
def search(self,size = 50):
return self.returnFullSearch(size=size)
def returnFullSearch(self, size = 50):
query = self.searchTerm
searchResults = self.cloudSearch.search(query = query, size=size)['hits']
results = []
items = map(lambda x: x.get('fields'),searchResults.get('hit'))
items = map(lambda x: dict(zip(x.keys(),map(lambda y: y[0],x.values()))),items)
return list(items)
def sortedSearch(self, size = 1000):
items = self.returnFullSearch(size = size)
print(f'raw search result is {pd.DataFrame(items, columns= self.requiredFields)}')
if not items: return []
df = self.sortResultsV2(items)
output_dict = list(
df.drop(
['isNotFresh', 'cat2Count', 'finalCat'], axis = 1
).T.to_dict().values()
)
return output_dict
def sortResultsV2(self, items):
df = pd.DataFrame(items)
df = self.createCriticalColumns(df)
df['isNotFresh'] = df['villa_category_l1_en'] != 'Fresh'
cat2Count = df.groupby('villa_category_l2_en').count()['pr_code']
df['cat2Count'] = df['villa_category_l2_en'].apply(lambda x: -cat2Count[x])
df['finalCat'] = df['villa_category_l4_en'].fillna(df['villa_category_l3_en'])
return df.sort_values(by=['isNotFresh', 'cat2Count', 'finalCat', 'pr_engname'], ascending= True)
searchEndpoint = 'https://search-villa-cloudsearch-2-4izacsoytzqf6kztcyjhssy2ti.ap-southeast-1.cloudsearch.amazonaws.com'
searcher = Search(searchTerm = 'banana', key=USER, pw= PW , endpoint=searchEndpoint)
result = searcher.sortedSearch(size=1000)
len(list(result))
len(searcher.cloudSearch.search(query = 'pork', size = 1000)['hits']['hit'])
import sys, json
sys.getsizeof(json.dumps(result))
```
|
github_jupyter
|
# default_exp cloudsearch
#hide
from nbdev.showdoc import *
#export
import pandas as pd
from pprint import pprint
import boto3
#hide
import pickle, os
KEY = ''
PW = ''
keypath = '/Users/nic/.villa-search-2'
if KEY and PW:
with open (keypath, 'wb') as f:
pickle.dump({
'KEY': KEY,
'PW': PW
}, f)
with open(keypath, 'rb') as f:
creden = pickle.load(f)
USER = creden['KEY']
PW = creden['PW']
ACCESS_KEY_ID = USER
SECRET_ACCESS_KEY = PW
os.environ['DATABASE_TABLE_NAME'] = 'product-table-dev-manual'
os.environ['REGION'] = 'ap-southeast-1'
os.environ['INVENTORY_BUCKET_NAME'] = 'product-bucket-dev-manual'
os.environ['INPUT_BUCKET_NAME'] = 'input-product-bucket-dev-manual'
# os.environ['DAX_ENDPOINT'] = None
REGION = 'ap-southeast-1'
#export
class Search:
''' a search class to return search result'''
def __init__(self, searchTerm:str, key, pw, region = 'ap-southeast-1',
endpoint = '',
requiredFields = [
'villa_category_l1_en',
'villa_category_l2_en',
'villa_category_l3_en',
'villa_category_l4_en'
] ):
self.searchTerm = searchTerm
self.cloudSearch = boto3.client('cloudsearchdomain' ,
aws_access_key_id=key,
aws_secret_access_key=pw,
region_name=region,
endpoint_url= endpoint)
self.requiredFields = requiredFields
def createCriticalColumns(self, df):
''' fill in required fields if not exist'''
for col in self.requiredFields:
if col not in df:
df[col] = 'noData'
return df.fillna('noData')
def search(self,size = 50):
return self.returnFullSearch(size=size)
def returnFullSearch(self, size = 50):
query = self.searchTerm
searchResults = self.cloudSearch.search(query = query, size=size)['hits']
results = []
items = map(lambda x: x.get('fields'),searchResults.get('hit'))
items = map(lambda x: dict(zip(x.keys(),map(lambda y: y[0],x.values()))),items)
return list(items)
def sortedSearch(self, size = 1000):
items = self.returnFullSearch(size = size)
print(f'raw search result is {pd.DataFrame(items, columns= self.requiredFields)}')
if not items: return []
df = self.sortResultsV2(items)
output_dict = list(
df.drop(
['isNotFresh', 'cat2Count', 'finalCat'], axis = 1
).T.to_dict().values()
)
return output_dict
def sortResultsV2(self, items):
df = pd.DataFrame(items)
df = self.createCriticalColumns(df)
df['isNotFresh'] = df['villa_category_l1_en'] != 'Fresh'
cat2Count = df.groupby('villa_category_l2_en').count()['pr_code']
df['cat2Count'] = df['villa_category_l2_en'].apply(lambda x: -cat2Count[x])
df['finalCat'] = df['villa_category_l4_en'].fillna(df['villa_category_l3_en'])
return df.sort_values(by=['isNotFresh', 'cat2Count', 'finalCat', 'pr_engname'], ascending= True)
searchEndpoint = 'https://search-villa-cloudsearch-2-4izacsoytzqf6kztcyjhssy2ti.ap-southeast-1.cloudsearch.amazonaws.com'
searcher = Search(searchTerm = 'banana', key=USER, pw= PW , endpoint=searchEndpoint)
result = searcher.sortedSearch(size=1000)
len(list(result))
len(searcher.cloudSearch.search(query = 'pork', size = 1000)['hits']['hit'])
import sys, json
sys.getsizeof(json.dumps(result))
| 0.238018 | 0.352787 |
# Course set-up
```
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
```
This notebook covers the steps you'll need to take to get set up for [CS224u](http://web.stanford.edu/class/cs224u/).
## Contents
1. [Anaconda](#Anaconda)
1. [The course Github repository](#The-course-Github-repository)
1. [Main data distribution](#Main-data-distribution)
1. [Additional installations](#Additional-installations)
1. [Installing the package requirements](#Installing-the-package-requirements)
1. [PyTorch](#PyTorch)
1. [TensorFlow](#TensorFlow)
1. [NLTK data](#NLTK-data)
1. [Jupyter notebooks](#Jupyter-notebooks)
## Anaconda
We recommend installing [the free Anaconda Python distribution](https://www.anaconda.com/distribution/), which includes IPython, Numpy, Scipy, matplotlib, scikit-learn, NLTK, and many other useful packages. This is not required, but it's an easy way to get all these packages installed. Unless you're very comfortable with Python package management and like installing things, this is the option for you!
Please be sure that you download the __Python 3__ version, which currently installs Python 3.7. Although our code is largely compatible with Python 2, __we're not supporting Python 2__.
One you have Anaconda installed, it makes sense to create a virtual environment for the course. In a terminal, run
```conda create -n nlu python=3.7 anaconda```
to create an environment called `nlu`.
Then, to enter the environment, run
```conda activate nlu```
To leave it, you can just close the window, or run
```conda deactivate```
If your version of Anaconda is older than version 4.4 (see `conda --version`), then replace `conda` with `source` in the above (and consider upgrading your Anaconda!).
[This page](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) has more detailed instructions on managing virtual environments with Anaconda.
## The course Github repository
The core materials for the course are on Github:
https://github.com/cgpotts/cs224u
We'll be working in this repository a lot, and it will receive updates throughout the quarter, as we add new materials and correct bugs.
If you're new to git and Github, we recommend using [Github's Desktop Apps](https://desktop.github.com). Then you just have to clone our repository and sync your local copy with the official one when there are updates.
If you are comfortable with git in the command line, you can type the following command to clone the course's Github repo:
```git clone https://github.com/cgpotts/cs224u```
## Main data distribution
The datasets needed to run the course notebooks and complete the assignments are in the following zip archive:
http://web.stanford.edu/class/cs224u/data/data.tgz
We recommend that you download it, unzip it, and place it in the same directory as your local copy of this Github repository. If you decide to put it somewhere else, you'll need to adjust the paths given in the "Set-up" sections of essentially all the notebooks.
## Additional installations
Be sure to do these additional installations from __inside your virtual environment__ for the course!
### Installing the package requirements
Just run
```pip install -r requirements.txt```
from inside the course directory to install the core additional packages.
People who aren't using Anaconda should edit `requirements.txt` so that it installs all the prerequisites that come with Anaconda. For Anaconda users, there's no need to edit it or even open it.
### PyTorch
The PyTorch library has special installation instructions depending on your computing environment. For Anaconda users, we recommend
```conda install pytorch torchvision -c pytorch```
For non-Anaconda users, or if you have a [CUDA-enabled GPU](https://developer.nvidia.com/cuda-gpus), we recommend following the instructions posted here:
https://pytorch.org/get-started/locally/
For this course, you should be running at least version `1.3.0`:
```
import torch
torch.__version__
```
### TensorFlow
We won't work too much with TensorFlow in this course, but the `mittens` package, which implements `GloVe`, will be much faster if TensorFlow is available. It should work under both TensorFlow v1 and v2.
To install TensorFlow, with Anaconda or another environment:
```pip install tensorflow```
If you have a CUDA-enabled GPU, you should instead do
```pip install tensorflow-gpu```
If you're using an older version of Anaconda, you might be better off with
```conda install -c conda-forge tensorflow```
For additional instructions:
https://www.tensorflow.org/install/
For this course, you should be running at least version 1.15.0.
### NLTK data
Anaconda comes with NLTK but not with its data distribution. To install that, open a Python interpreter and run `import nltk; nltk.download()`. If you decide to download the data to a different directory than the default, then you'll have to set `NLTK_DATA` in your shell profile. (If that doesn't make sense to you, then we recommend choosing the default download directory!)
## Jupyter notebooks
The majority of the materials for this course are Jupyter notebooks, which allow you to work in a browser, mixing code and description. It's a powerful form of [literate programming](https://en.wikipedia.org/wiki/Literate_programming), and increasingly a standard for open science.
To start a notebook server, navigate to the directory where you want to work and run
```jupyter notebook --port 5656```
The port specification is optional.
This should launch a browser that takes you to a view of the directory you're in. You can then open notebooks for working and create new notebooks.
A major advantage of working with Anaconda is that you can switch virtual environments from inside a notebook, via the __Kernel__ menu. If this isn't an option for you, then run this command while inside your virtual environment:
```python -m ipykernel install --user --name nlu --display-name "nlu"```
(If you named your environment something other than `nlu`, then change the `--name` and `--display-name` values.)
[Additional discussion of Jupyter and kernels.](https://stackoverflow.com/questions/39604271/conda-environments-not-showing-up-in-jupyter-notebook)
|
github_jupyter
|
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
to create an environment called `nlu`.
Then, to enter the environment, run
To leave it, you can just close the window, or run
If your version of Anaconda is older than version 4.4 (see `conda --version`), then replace `conda` with `source` in the above (and consider upgrading your Anaconda!).
[This page](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) has more detailed instructions on managing virtual environments with Anaconda.
## The course Github repository
The core materials for the course are on Github:
https://github.com/cgpotts/cs224u
We'll be working in this repository a lot, and it will receive updates throughout the quarter, as we add new materials and correct bugs.
If you're new to git and Github, we recommend using [Github's Desktop Apps](https://desktop.github.com). Then you just have to clone our repository and sync your local copy with the official one when there are updates.
If you are comfortable with git in the command line, you can type the following command to clone the course's Github repo:
## Main data distribution
The datasets needed to run the course notebooks and complete the assignments are in the following zip archive:
http://web.stanford.edu/class/cs224u/data/data.tgz
We recommend that you download it, unzip it, and place it in the same directory as your local copy of this Github repository. If you decide to put it somewhere else, you'll need to adjust the paths given in the "Set-up" sections of essentially all the notebooks.
## Additional installations
Be sure to do these additional installations from __inside your virtual environment__ for the course!
### Installing the package requirements
Just run
from inside the course directory to install the core additional packages.
People who aren't using Anaconda should edit `requirements.txt` so that it installs all the prerequisites that come with Anaconda. For Anaconda users, there's no need to edit it or even open it.
### PyTorch
The PyTorch library has special installation instructions depending on your computing environment. For Anaconda users, we recommend
For non-Anaconda users, or if you have a [CUDA-enabled GPU](https://developer.nvidia.com/cuda-gpus), we recommend following the instructions posted here:
https://pytorch.org/get-started/locally/
For this course, you should be running at least version `1.3.0`:
### TensorFlow
We won't work too much with TensorFlow in this course, but the `mittens` package, which implements `GloVe`, will be much faster if TensorFlow is available. It should work under both TensorFlow v1 and v2.
To install TensorFlow, with Anaconda or another environment:
If you have a CUDA-enabled GPU, you should instead do
If you're using an older version of Anaconda, you might be better off with
For additional instructions:
https://www.tensorflow.org/install/
For this course, you should be running at least version 1.15.0.
### NLTK data
Anaconda comes with NLTK but not with its data distribution. To install that, open a Python interpreter and run `import nltk; nltk.download()`. If you decide to download the data to a different directory than the default, then you'll have to set `NLTK_DATA` in your shell profile. (If that doesn't make sense to you, then we recommend choosing the default download directory!)
## Jupyter notebooks
The majority of the materials for this course are Jupyter notebooks, which allow you to work in a browser, mixing code and description. It's a powerful form of [literate programming](https://en.wikipedia.org/wiki/Literate_programming), and increasingly a standard for open science.
To start a notebook server, navigate to the directory where you want to work and run
The port specification is optional.
This should launch a browser that takes you to a view of the directory you're in. You can then open notebooks for working and create new notebooks.
A major advantage of working with Anaconda is that you can switch virtual environments from inside a notebook, via the __Kernel__ menu. If this isn't an option for you, then run this command while inside your virtual environment:
| 0.623262 | 0.939637 |
<h2 style="color:#B22222">Ejercicios</h2>
1. Lea la documentación de la función ```find``` y pruébela con el siguiente texto
```python
"Una gran gran máquina"
```
¿cómo localizaría la posición del segundo ```gran```?
2. ¿Qué sucede al correr el siguiente programa: `"Una gran maquina".find()`?
3. Crea un programa que transforme el siguiente string en minúsculas `"MINUSCULAS"` (*hint*: usa el método `.lower`)
```
"Una gran gran máquina".find('gran', "Una gran gran máquina".find('gran') + 1)
#Error
"Una gran maquina".find()
"MINÚSCULAS".lower()
```
<h2 style="color:#B22222">Ejercicios</h2>
Dado el string `"Python"`,
1. Obten del primer al tercer carácter
2. Obten del tercer al último carácter
3. Obten del segundo al cuárto carácter
4. ¿En que resulta la siguiente línea de código: `"Python"[::2]`? ¿Qué efecto tuvo sobre el string?
5. ¿En que resulta la siguiente línea de código: `"Python"[::-1]`?
6. Explica que hace el siguiente slicing `"string"[ini:fin:k]`, para enteros no negativos `ini`, `fin` y `k`.
```
cadena = "Python"
print(cadena[0:3])
print(cadena[2:])
print(cadena[1:4])
print(cadena[::2])
print(cadena[::-1])
print(cadena[0::2])
print(cadena[-1:-5:-1])
```
<h2 style="color:#B22222">Ejercicios</h2>
1. Calcula el valor presente de un bono cupón zero con valor nominal $C=100$, tasa de interés pagadera anual $i=7\%$ y vencimiento a $t=10$ años. El precio del bono cupón zero está dado por
$$
\frac{C}{(1 + i)^t}
$$
2. Modifica el programa anterior: por medio de la función `input`, pídele al usuario un valor nominal `C`, una tasa de interés `i` y un vencimiento a `y` años. Calcula el valor del bono cupón zero y guárdala dentro de la variable `B`.
3. Escribe un programa que pida el nombre y la edad del usuario. Posteriormente, deberá imprimir el nombre del usuario repetido el mismo número de su edad; cada repetición del nombre en una nueva línea. Por ejemplo, para un usuario `"Toño"` con `5` años de edad, el programa deberá mostrar lo siguiente:
```
¿Cuál es tu nombre? Toño
¿Cuál es tu edad? 5
Toño
Toño
Toño
Toño
Toño
```
**Sugerencia** http://python-ds.com/python-3-escape-sequences
4. Considerando la variable `trabalenguas` elimine todas las `"r"` y `"s"` del string.
```python
trabalenguas = """En tres tristes trastos de trigo,
tres tristes tigres comían trigo.
Comían trigo, tres tristes tigres,
en tres tristes trastos de trigo."""
```
5. Escribe un programa que le pida al usuario una `frase` y una `palabra_objetivo`; el programa deberá imprimir en la pantalla todos los carácteres posteriores (sin espacios extras) a la palabra objetivo. Por ejemplo, si `frase = "Como valuar un forward-starting swap"` y `palabra_objetivo = "un"`, el programa debe regresar `forward-starting swap`.
```
# 1 y 2
nominal = float(input('Dame nominal'))
tasa = float(input('Dame tasa'))
plazo = float(input('Dame plazo'))
B = nominal / (1 + tasa)**plazo
print(f'El valor del bono es {format(B, ",.2f")}')
# 3
nombre = input('¿Cuál es tu nombre?')
rep = int(input('¿Cuál es tu edad?'))
print((nombre + '\n') * rep)
# 4
trabalenguas = """En treS tRistes trastos de trigo,
tres tristes tigres comían tRigo.
Comían trigo, tres tristes tigres,
en tres tristes trastos de tRigo."""
resultado = trabalenguas.lower().replace('r', '').replace('s','')
#resultado = trabalenguas.lower()
#resultado = resultado.replace('r', '')
#resultado = resultado.replace('s', '')
print(resultado)
# 5
frase = 'Como valuar un forward-starting swap'
palabra_objetivo = 'Como'
resultado = frase[frase.find(palabra_objetivo) + len(palabra_objetivo) : ].strip()
print(resultado)
```
|
github_jupyter
|
"Una gran gran máquina"
"Una gran gran máquina".find('gran', "Una gran gran máquina".find('gran') + 1)
#Error
"Una gran maquina".find()
"MINÚSCULAS".lower()
cadena = "Python"
print(cadena[0:3])
print(cadena[2:])
print(cadena[1:4])
print(cadena[::2])
print(cadena[::-1])
print(cadena[0::2])
print(cadena[-1:-5:-1])
¿Cuál es tu nombre? Toño
¿Cuál es tu edad? 5
Toño
Toño
Toño
Toño
Toño
trabalenguas = """En tres tristes trastos de trigo,
tres tristes tigres comían trigo.
Comían trigo, tres tristes tigres,
en tres tristes trastos de trigo."""
# 1 y 2
nominal = float(input('Dame nominal'))
tasa = float(input('Dame tasa'))
plazo = float(input('Dame plazo'))
B = nominal / (1 + tasa)**plazo
print(f'El valor del bono es {format(B, ",.2f")}')
# 3
nombre = input('¿Cuál es tu nombre?')
rep = int(input('¿Cuál es tu edad?'))
print((nombre + '\n') * rep)
# 4
trabalenguas = """En treS tRistes trastos de trigo,
tres tristes tigres comían tRigo.
Comían trigo, tres tristes tigres,
en tres tristes trastos de tRigo."""
resultado = trabalenguas.lower().replace('r', '').replace('s','')
#resultado = trabalenguas.lower()
#resultado = resultado.replace('r', '')
#resultado = resultado.replace('s', '')
print(resultado)
# 5
frase = 'Como valuar un forward-starting swap'
palabra_objetivo = 'Como'
resultado = frase[frase.find(palabra_objetivo) + len(palabra_objetivo) : ].strip()
print(resultado)
| 0.340376 | 0.948202 |
```
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
```
|
github_jupyter
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
| 0.949658 | 0.787482 |
```
# autoreload nangs
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
# Basic use
We want to solve the following PDE:
\begin{equation}
\frac{\partial \phi}{\partial t} + u \frac{\partial \phi}{\partial x} = 0
\end{equation}
The independent variables (i.e, $x$ and $t$) are used as input values for the NN, and the solution (i.e. $\phi$) is the output. In order to find the solution, at each step the NN outputs are derived w.r.t the inputs. Then, a loss function that matches the PDE is built and the weights are updated accordingly. If the loss function goes to zero, we can assume that our NN is indeed the solution to our PDE. We will try to find a general solution for different values of $u$, so it will be set also as an input.
```
# imports
import numpy as np
import matplotlib.pyplot as plt
import nangs
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
nangs.__version__, torch.__version__
```
## Define your PDE
We provide the class `PDE` with the default functionality to solve the PDE. First, create a new class inheriting from the `PDE` class and overload the `computePDELoss` function to return one loss function for each PDE in your system. You must initialize this class with your inputs and outputs names.
```
from nangs import PDE
class Adv1d(PDE):
def computePDELoss(self, inputs, outputs):
# compute gradients
grads = self.computeGrads(outputs, inputs)
# compute loss
dpdx, dpdt = grads[:, 0], grads[:, 1]
u = inputs[:,2]
return {'pde': dpdt + u*dpdx}
pde = Adv1d(inputs=('x', 't', 'u'), outputs='p')
pde.inputs, pde.outputs
```
## Define your data
To solve the PDE we first need a set of points to evaluate it, we will use this points as the dataset for training de NN.
```
# define the mesh
x = np.linspace(0,1,20)
t = np.linspace(0,1,30)
u = np.linspace(-1,1,10)
```
The class `Mesh` will combine all the posible values on the previous arrays to build a mesh. You must pass the data as a dict, providing one (unique) name for each variable matching the names of the inputs defined in the PDE.
```
from nangs import Mesh
mesh = Mesh({'x': x, 't': t, 'u': u})
mesh.vars
```
In order to train a NN we need a `Dataset`. A `Mesh` has in fact a `Dataset` that you can iterate over.
```
len(mesh.dataset)
mesh.dataset[:3]
```
You can also specify the device to cache the data
```
mesh = Mesh({'x': x, 't': t, 'u': u}, device=device)
mesh.dataset[:3]
```
Finally, add the mesh to your PDE.
```
pde.set_mesh(mesh)
mesh.vars
pde.inputs
```
## Boundary Conditions
In order to find a non-trivial solution to our PDE we need to specify a set of boundary conditions. Nangs provide different classes for common boundary conditions.
```
# initial condition (t = 0)
t0 = np.array([0])
_x, _t, _u = np.meshgrid(x, t0, u)
p0 = np.sin(2*np.pi*(_x- _u*_t))
p0.shape
plt.plot(x, p0[0,:,0])
plt.grid(True)
plt.xlabel('x')
plt.ylabel('$p_0$')
plt.show()
```
A boundary condition has its own `Mesh` and logic to compute values and gradients. In a `Dirichlet` boundary conditions we are fixing some values, so we need the inputs and also the target outputs (in this case, our initial condition). If you are including some free-parameters as NN inputs, make sure you specify an initial condition for each one of them (here we use the same for every value of $u$).
```
from nangs import Dirichlet
initial_condition = Dirichlet({'x': x, 't': t0, 'u': u}, {'p': p0.reshape(-1)}, device=device, name="initial")
initial_condition.vars
initial_condition.dataset[:5]
pde.add_boco(initial_condition)
```
We use a periodic boundary condition at $x=0$ and $x=1$. During training we will enforce this values to match.
```
# boundary conditions (peridic conditions at x = 0 and x = 1)
xb0 = np.array([0])
mb0 = np.meshgrid(xb0, t)
mb0 = np.stack(mb0, -1).reshape(-1, 2)
mb0.shape
from nangs import Periodic
x1 = np.array([0.])
x2 = np.array([1.])
periodic = Periodic({'x': x1, 't': t, 'u': u}, {'x': x2, 't': t, 'u': u}, device=device)
periodic.vars
periodic.dataset[:5]
pde.add_boco(periodic)
```
## Define your solution
We provide a basic `MLP` class to impement a Multilayer Perceptron as solution approxiamtion to the PDE, but you can always define you own NN (just be sure to match the number of inputs and outputs).
```
from nangs import MLP
mlp = MLP(inputs=len(pde.inputs), outputs=len(pde.outputs), layers=3, neurons=100)
mlp
assert mlp(torch.randn(32, len(pde.inputs))).shape == torch.randn(32, len(pde.outputs)).shape
```
## Solve the PDE
To solve the PDE, you must first call de `compile` function to specify the NN, optimizer and optionally the loss function (we use MSE by default) or a learning rate scheduler.
```
EPOCHS = 40
optimizer = torch.optim.Adam(mlp.parameters())
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, pct_start=0.1, total_steps=EPOCHS)
pde.compile(mlp.to(device), optimizer, scheduler)
```
To solve the PDE, call the `solve` function with a number of epochs and batch size for the optimization process. It returns a hitory with all the tracked metrics.
```
BATCH_SIZE = 256
%time hist = pde.solve(EPOCHS, BATCH_SIZE)
```
## Evaluate your solution
Finally, you can evaluate your solution
```
from matplotlib import animation, rc
rc('animation', html='html5')
def update_plot(i, x, u, t, p):
ax.clear()
pe = np.sin(2.*np.pi*(x-u*t[i]))
ax.plot(x, pe, label=f"exact (u = {U})")
ax.plot(x, p[i], '.k', label="solution")
ax.set_xlabel("x", fontsize=14)
ax.set_ylabel("p", fontsize=14, rotation=np.pi/2)
ax.legend(loc="upper right")
ax.grid(True)
ax.set_xlim([0, 1])
ax.set_ylim([-1.2, 1.2])
l2 = np.sqrt(np.sum((p[i]-pe)**2))
ax.set_title(f"t = {t[i]:.3f} (L2 = {l2:.5f})")
return ax
#t = np.linspace(0,1,10)
U = -0.5
x = np.linspace(0,1,30)
t = np.linspace(0,1,20)
u = np.array([U])
eval_mesh = Mesh({'x': x, 't': t, 'u': u}, device=device)
p = pde.eval(eval_mesh)
p = p.cpu().numpy().reshape(len(t), -1)
fig = plt.figure()
ax = plt.subplot(111)
anim = animation.FuncAnimation(fig, update_plot, frames=len(t), fargs=(x, U, t, p), interval=300)
plt.close()
anim
```
|
github_jupyter
|
# autoreload nangs
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# imports
import numpy as np
import matplotlib.pyplot as plt
import nangs
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
nangs.__version__, torch.__version__
from nangs import PDE
class Adv1d(PDE):
def computePDELoss(self, inputs, outputs):
# compute gradients
grads = self.computeGrads(outputs, inputs)
# compute loss
dpdx, dpdt = grads[:, 0], grads[:, 1]
u = inputs[:,2]
return {'pde': dpdt + u*dpdx}
pde = Adv1d(inputs=('x', 't', 'u'), outputs='p')
pde.inputs, pde.outputs
# define the mesh
x = np.linspace(0,1,20)
t = np.linspace(0,1,30)
u = np.linspace(-1,1,10)
from nangs import Mesh
mesh = Mesh({'x': x, 't': t, 'u': u})
mesh.vars
len(mesh.dataset)
mesh.dataset[:3]
mesh = Mesh({'x': x, 't': t, 'u': u}, device=device)
mesh.dataset[:3]
pde.set_mesh(mesh)
mesh.vars
pde.inputs
# initial condition (t = 0)
t0 = np.array([0])
_x, _t, _u = np.meshgrid(x, t0, u)
p0 = np.sin(2*np.pi*(_x- _u*_t))
p0.shape
plt.plot(x, p0[0,:,0])
plt.grid(True)
plt.xlabel('x')
plt.ylabel('$p_0$')
plt.show()
from nangs import Dirichlet
initial_condition = Dirichlet({'x': x, 't': t0, 'u': u}, {'p': p0.reshape(-1)}, device=device, name="initial")
initial_condition.vars
initial_condition.dataset[:5]
pde.add_boco(initial_condition)
# boundary conditions (peridic conditions at x = 0 and x = 1)
xb0 = np.array([0])
mb0 = np.meshgrid(xb0, t)
mb0 = np.stack(mb0, -1).reshape(-1, 2)
mb0.shape
from nangs import Periodic
x1 = np.array([0.])
x2 = np.array([1.])
periodic = Periodic({'x': x1, 't': t, 'u': u}, {'x': x2, 't': t, 'u': u}, device=device)
periodic.vars
periodic.dataset[:5]
pde.add_boco(periodic)
from nangs import MLP
mlp = MLP(inputs=len(pde.inputs), outputs=len(pde.outputs), layers=3, neurons=100)
mlp
assert mlp(torch.randn(32, len(pde.inputs))).shape == torch.randn(32, len(pde.outputs)).shape
EPOCHS = 40
optimizer = torch.optim.Adam(mlp.parameters())
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, pct_start=0.1, total_steps=EPOCHS)
pde.compile(mlp.to(device), optimizer, scheduler)
BATCH_SIZE = 256
%time hist = pde.solve(EPOCHS, BATCH_SIZE)
from matplotlib import animation, rc
rc('animation', html='html5')
def update_plot(i, x, u, t, p):
ax.clear()
pe = np.sin(2.*np.pi*(x-u*t[i]))
ax.plot(x, pe, label=f"exact (u = {U})")
ax.plot(x, p[i], '.k', label="solution")
ax.set_xlabel("x", fontsize=14)
ax.set_ylabel("p", fontsize=14, rotation=np.pi/2)
ax.legend(loc="upper right")
ax.grid(True)
ax.set_xlim([0, 1])
ax.set_ylim([-1.2, 1.2])
l2 = np.sqrt(np.sum((p[i]-pe)**2))
ax.set_title(f"t = {t[i]:.3f} (L2 = {l2:.5f})")
return ax
#t = np.linspace(0,1,10)
U = -0.5
x = np.linspace(0,1,30)
t = np.linspace(0,1,20)
u = np.array([U])
eval_mesh = Mesh({'x': x, 't': t, 'u': u}, device=device)
p = pde.eval(eval_mesh)
p = p.cpu().numpy().reshape(len(t), -1)
fig = plt.figure()
ax = plt.subplot(111)
anim = animation.FuncAnimation(fig, update_plot, frames=len(t), fargs=(x, U, t, p), interval=300)
plt.close()
anim
| 0.562177 | 0.982774 |
# Table of contents
0. [Introduction](0-Introduction.ipynb)
1. [Variables](1-Variables.ipynb)
2. [Data structures](2-Data-Structures.ipynb)
3. [Conditional statements and loops](3-Conditional-Statements-Loops.ipynb)
4. [Some exercises](4-Some-Exercises.ipynb)
5. [Introduction to functions](5-0-Introduction-function.ipynb)
1. [File manipulation](5-1-File-manipulation.ipynb) $\leftarrow$
6. [From 0D to 1D](6-1-From-0D-to-1D.ipynb)
1. [Adding lateral diffusion](6-2-Adding-lateral-diffusion.ipynb)
7. [From 1D to 2D](7-From-1D-to-2D.ipynb)
8. [Playing with the model](8-Playing-with-the-model.ipynb)
### Exercise 7
**(With an intermezzo about file manipulation)**
You can play with the different parameters to see how the concentration dynamics change according to these parameters.
Here we would like you to systematically try different parameters and save the produced plots as png files with names containing the parameter values (for example `'test_a0.4_i0.15_dt0.001_k-0.005_tau0.1.png'`).
To save the produced plots, you can use the argument `save_path` of the function `plot_concentration_1cell`.
If you set its value to the file name and path you want to create, it will save it there under its name.
Before being able to do so, you might need some information about how to manipulate strings.
In the previous exercises you might have seen that it is possible to insert values from variables within a string using the curved brackets `{` and `}`.
Simply put, the way it works is by putting the character `f` before your string and then everything within curved brackets will be transformed into string if possible.
For example:
```python
f'test_a{A[0]}_i{I[0]}_dt{dt}'
```
will produce the following string:
```python
'test_a0.4_i0.15_dt0.001'
```
Note that if the `f` is not in front of the string, the curved brackets will be interpreted as normal characters.
**For more on string manipulation, you can read [there](https://docs.python.org/3/tutorial/inputoutput.html#input-and-output)**
```
from Resources.UsefulFunctions import *
from Resources.Answers import answer, hint
# Carry over here the previously declared variables and the code from the previous exercises
mu_a = 2.8e-4
mu_i = 5e-3
tau = .1
k = -.005
size = 100
dx = dy = 2. / size
T = 9.0
dt = .001
n = int(T / dt)
def compute_AI(): # Don't forget to add arguments
# A, I = [a], [i]
A, I = [0], [0]
# Uncoment above and
# Do something here
return A, I
A, I = compute_AI()
s1 = f'test_a{A[0]}_i{I[0]}_dt{dt}'
s2 = 'test_a{A[0]}_i{I[0]}_dt{dt}'
print(s1)
print(s2)
```
#### Avoiding cramping up you current folder
If you want to be a little bit cleaner, you can create a folder in which you will save your images.
You can create such a folder directly in python using `Path` from the `pathlib` library and the command:
```python
Path.mkdir('<folder_name>')
```
For example, to create a folder named `question_7` one could run the command
```python
Path.mkdir('question_7')
```
Though, if the folder already exists, the command line will not work and stop the notebook from running.
To avoid such a problem, it is possible to check whether the folder already exists using the method `exists` of `Path` as shown below.
Let's create the folder `question_7`:
```
from pathlib import Path
folder = Path('question_7')
if not folder.exists():
Path.mkdir(folder)
```
#### Path manipulation
Some of you might already be aware that playing with paths can be a pain.
The problem comes from the fact that Windows has a different way to represent a path to a folder than Linux and MacOs.
> **_Side Note: what's a path?!_**
>
> In a computer the folders and files are organised hierarchically.
> What it means is that each file or folder except for one, the root, is in a folder.
> For example, the folder you created earlier (`question_7`) is itself in a folder.
>
> To access a file or folder, it is sometimes necessary to know the sequence of folders it is in so there is no ambiguity for the computer.
> The sequence of folders a folder or a file belongs to is the **path** and it can be represented as a string.
> For example, you can call the function `Path.cwd` (for the current working directory).
> To query the list of directories your notebook is running in:
```
print('Our current path:')
print(Path.cwd())
```
> You can maybe see that the folders are separated by a `/` (or a `\` for Windows).
> This difference between Linux or MacOs and Windows has been quite a source of trouble, some of you might have experienced it.
Now, to save an image in the folder `'question_7'`, as we would like to do, we just need to concatenate the image name to the folder name:
```python
folder / 'test_a0.4_i0.15_dt0.001.png'
```
Note that the `/` in this case is a concatenation operator specific to the objects of the `path` library. The operator concatenates two `Path` or a `Path` and a `str` putting the operating specific folder separator (
`/` or `\`).
**More info about the `pathlib` can be found [there](https://docs.python.org/3/library/pathlib.html)**
```
## folder is the path previously created
# Concatenation of two Paths
print(folder / Path('test_a0.4_i0.15_dt0.001.png'))
# Concatenation of a Path and a str (same result)
print(folder / 'test_a0.4_i0.15_dt0.001.png')
```
Now we can *cleanly* answer question 6.
Let assumes that we want the following values:
- `tau` changes from `0.05` to `3` and that we want `5` values within that interval
- `k` changes from `-1` to `1` and that we also want `5` values within that interval
- and a fixed `dt=0.01`
**Write some lines of code to compute and save the requested plots**
Note: you can use the function `np.linspace` to generate the desired values
```
import numpy as np
folder = Path('question_7')
for test_tau in np.linspace(.05, 1, 5):
for test_k in np.linspace(-1, 1, 5):
# A, I = answer_results(4, A=0.4, I=0.15, dt=dt, k=test_k, tau=test_tau, n=n)
plot_concentration_1cell(A, I,
save_path=folder / f'k{test_k}_tau{test_tau}.png')
```
An interesting configuration where we can see some oscillations:
- `dt=0.01`
- `k=0.05`
- `tau=2`
You can manually change the parameters to try to find other *weird* configurations
```
A, I = answer_results(4, A=0.4, I=0.15, dt=.01, k=.05, tau=2, n=n)
plot_concentration_1cell(A, I)
```
### Exercise 8
**This exercise is difficult, it might take a bit longer to solve. If you are stuck, don't hesitate to look at the following cells for some help**
For this exercise, we will discuss about file manipulation, in other words how to move files automatically.
**Attention here! Proceed with caution for this exercise but also in general. Files removed using Python (or the shell for example) do not end up in the trash but are directly removed!**
In this exercise we would like to sort the files created in the previous exercise. We would like to group the plots generated previously in folders by values of `k` and so that the folders are named according to that `k` value.
For example if you had the following files in your `exercise_7` folder:
```
exercise_7:
| k0_tau0.png
| k0_tau1.png
| k0_tau2.png
| k1_tau0.png
| k1_tau1.png
| k1_tau2.png
| k2_tau0.png
| k2_tau1.png
| k2_tau2.png
```
We would like you to create the following hierarchy:
```
exercise_7:
| k0:
| k0_tau0.png
| k0_tau1.png
| k0_tau2.png
| k1:
| k1_tau0.png
| k1_tau1.png
| k1_tau2.png
| k2:
| k2_tau0.png
| k2_tau1.png
| k2_tau2.png
```
To do so you can use the following functions (assuming `p` is a `Path`):
- `Path.iterdir` allows to loop through all the files of a directory
- `p.name` retrieves the name of the file in `p` as a `str`
- `str.split` splits a string
- `Path.exists` see above
- `p.rename` allows to rename (and therefore move) `p`
Do not hesitate to look at the help of each of these functions (you should do it!).
```
p = Path('question_7')
for file in p.iterdir():
## Do things here
print(file)
```
### Help for exercise 8
Because the difficulty increased significantly with this exercise, here are some leads that hopefully will help you solve the exercise!
One way to solve a coding problem is to decompose it in multiple smaller problems.
There are often multiple ways to decompose a problem, we will show you one here, it might not be the optimal one (regardless of the optimal metric used) but it should be a working one.
To build that decomposition, it can sometimes be useful to rephrase the problem in terms of what you want the code to do:
<details>
<summary><b>Click here to display the pseudo-code<b/></summary>
```
for each file in folder do (1)
if the file is a png file do (2)
k_value <- get what is the value of k for that file (3)
if folder with k value does not exist do (4)
create new folder with k value
end if
move file to folder with k value (5)
end if
end for
```
</details>
This decomposition allows to identify the important points in the code and to organise the code to be produced.
Here, we want to loop on the files (1), check if the file is a file of interest (2), retrieve the value of `k` in the file name (3), create a folder with the `k` value if necessary (4) and move the file in the appropriate folder (5).
Now, one can try to solve the 5 problems independently and ultimately assemble them to answer the question.
|
github_jupyter
|
f'test_a{A[0]}_i{I[0]}_dt{dt}'
'test_a0.4_i0.15_dt0.001'
from Resources.UsefulFunctions import *
from Resources.Answers import answer, hint
# Carry over here the previously declared variables and the code from the previous exercises
mu_a = 2.8e-4
mu_i = 5e-3
tau = .1
k = -.005
size = 100
dx = dy = 2. / size
T = 9.0
dt = .001
n = int(T / dt)
def compute_AI(): # Don't forget to add arguments
# A, I = [a], [i]
A, I = [0], [0]
# Uncoment above and
# Do something here
return A, I
A, I = compute_AI()
s1 = f'test_a{A[0]}_i{I[0]}_dt{dt}'
s2 = 'test_a{A[0]}_i{I[0]}_dt{dt}'
print(s1)
print(s2)
Path.mkdir('<folder_name>')
Path.mkdir('question_7')
from pathlib import Path
folder = Path('question_7')
if not folder.exists():
Path.mkdir(folder)
print('Our current path:')
print(Path.cwd())
folder / 'test_a0.4_i0.15_dt0.001.png'
## folder is the path previously created
# Concatenation of two Paths
print(folder / Path('test_a0.4_i0.15_dt0.001.png'))
# Concatenation of a Path and a str (same result)
print(folder / 'test_a0.4_i0.15_dt0.001.png')
import numpy as np
folder = Path('question_7')
for test_tau in np.linspace(.05, 1, 5):
for test_k in np.linspace(-1, 1, 5):
# A, I = answer_results(4, A=0.4, I=0.15, dt=dt, k=test_k, tau=test_tau, n=n)
plot_concentration_1cell(A, I,
save_path=folder / f'k{test_k}_tau{test_tau}.png')
A, I = answer_results(4, A=0.4, I=0.15, dt=.01, k=.05, tau=2, n=n)
plot_concentration_1cell(A, I)
exercise_7:
| k0_tau0.png
| k0_tau1.png
| k0_tau2.png
| k1_tau0.png
| k1_tau1.png
| k1_tau2.png
| k2_tau0.png
| k2_tau1.png
| k2_tau2.png
exercise_7:
| k0:
| k0_tau0.png
| k0_tau1.png
| k0_tau2.png
| k1:
| k1_tau0.png
| k1_tau1.png
| k1_tau2.png
| k2:
| k2_tau0.png
| k2_tau1.png
| k2_tau2.png
p = Path('question_7')
for file in p.iterdir():
## Do things here
print(file)
for each file in folder do (1)
if the file is a png file do (2)
k_value <- get what is the value of k for that file (3)
if folder with k value does not exist do (4)
create new folder with k value
end if
move file to folder with k value (5)
end if
end for
| 0.368747 | 0.979472 |
# Audio Alignment for Harmonix Set
This notebook tries to align purchased audio with original audio from Harmonix.
More specifically, for each pair of audio files:
- Load both audio files
- Compute chromagrams
- Use DTW to find the correct start and end points of alignment
- Produce the new aligned mp3s from the purchased audio
```
from __future__ import print_function
import glob
import IPython
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import librosa
from librosa import display
from tqdm import tqdm_notebook as tqdm
# ORIG_MP3_PATH = "/Users/onieto/Desktop/Harmonix/audio/"
# PURC_MP3_PATH = "/Users/onieto/Dropbox/drop/HarmonixMP3_YouTube/"
ORIG_MP3_PATH = "/home/uri/Dropbox/drop/HarmonixMP3_original/"
PURC_MP3_PATH = "/home/uri/Dropbox/drop/HarmonixMP3_YouTube/"
METADATA_TSV = "../dataset/metadata.csv"
OUT_DIR = "aligned_mp3s"
N_FFT = 8192
HOP_SIZE = 1024
METRIC = "euclidean"
SR = 22050
N_MELS = 90
%matplotlib inline
# Load metadata
meta_df = pd.read_csv(METADATA_TSV, sep=",")
meta_df.head()
def alignment_score(dtw_curve):
"""The alignment score is simply the average of the difference of
the _purchased_ track's DTW alignment curve."""
return np.mean(np.diff(dtw_curve[:,1][::-1]))
def reconstruct_signal(orig_x, purc_x, dtw_curve):
"""Reconstructs the signal from the purchased signal using the most similar frames
from the original signal.
We basically take exactly as many frames as the original signal and get the
closest to each of these frames from the purchased signal given the dtw curve."""
orig_dict = {}
for w in dtw_curve[::-1]:
orig_dict[w[0]] = w[1]
y = []
for i in range(len(orig_dict)):
samp = orig_dict[i] * HOP_SIZE
y += list(purc_x[samp:samp + HOP_SIZE])
last_samp = samp + HOP_SIZE
y += list(purc_x[last_samp:last_samp + (len(orig_x) - len(y))])
return y
def compute_alignment(file_id, align_thres=0.9, is_plot=False):
"""Main function to do the alignment between two songs of the same id.
"""
# Load mp3s
orig_path = os.path.join(ORIG_MP3_PATH, file_id + ".mp3")
purc_path = os.path.join(PURC_MP3_PATH, file_id + ".mp3")
orig_x, _ = librosa.load(orig_path, sr=SR)
purc_x, _ = librosa.load(purc_path, sr=SR)
# Compute melspecs
orig_mel = librosa.power_to_db(
librosa.feature.melspectrogram(y=orig_x, sr=SR, hop_length=HOP_SIZE, n_mels=N_MELS))
purc_mel = librosa.power_to_db(
librosa.feature.melspectrogram(y=purc_x, sr=SR, hop_length=HOP_SIZE, n_mels=N_MELS))
# Apply DTW
D, wp = librosa.sequence.dtw(X=orig_mel, Y=purc_mel, metric='euclidean')
score = alignment_score(wp)
# Plot
if is_plot:
wp_s = np.asarray(wp) * HOP_SIZE / SR
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
librosa.display.specshow(D, x_axis='time', y_axis='time',
cmap='gray_r', hop_length=HOP_SIZE)
imax = ax.imshow(D, cmap=plt.get_cmap('gray_r'),
origin='lower', interpolation='nearest', aspect='auto')
ax.plot(wp_s[:, 1], wp_s[:, 0], marker='o', color='r')
plt.title('Warping Path on Acc. Cost Matrix $D$')
plt.colorbar()
# Return reconstructed signal and score
return reconstruct_signal(orig_x, purc_x, wp), score
# Compute alignment for all the dataset, creating new audio files and storing the alignment scores
out = {"File": [], "score": []}
for i, row in tqdm(meta_df.iterrows(), total=len(meta_df)):
file_id = row["File"]
# Do alignment
y, score = compute_alignment(file_id)
# Save wav
librosa.output.write_wav(os.path.join(OUT_DIR, file_id + ".wav"), np.asarray(y), sr=SR)
# Save score
out["File"].append(file_id)
out["score"].append(score)
IPython.display.Audio(data=y, rate=SR)
out_df = pd.DataFrame(out)
out_df.to_csv("aligned_scores.tsv", sep=",", index=None)
```
|
github_jupyter
|
from __future__ import print_function
import glob
import IPython
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import librosa
from librosa import display
from tqdm import tqdm_notebook as tqdm
# ORIG_MP3_PATH = "/Users/onieto/Desktop/Harmonix/audio/"
# PURC_MP3_PATH = "/Users/onieto/Dropbox/drop/HarmonixMP3_YouTube/"
ORIG_MP3_PATH = "/home/uri/Dropbox/drop/HarmonixMP3_original/"
PURC_MP3_PATH = "/home/uri/Dropbox/drop/HarmonixMP3_YouTube/"
METADATA_TSV = "../dataset/metadata.csv"
OUT_DIR = "aligned_mp3s"
N_FFT = 8192
HOP_SIZE = 1024
METRIC = "euclidean"
SR = 22050
N_MELS = 90
%matplotlib inline
# Load metadata
meta_df = pd.read_csv(METADATA_TSV, sep=",")
meta_df.head()
def alignment_score(dtw_curve):
"""The alignment score is simply the average of the difference of
the _purchased_ track's DTW alignment curve."""
return np.mean(np.diff(dtw_curve[:,1][::-1]))
def reconstruct_signal(orig_x, purc_x, dtw_curve):
"""Reconstructs the signal from the purchased signal using the most similar frames
from the original signal.
We basically take exactly as many frames as the original signal and get the
closest to each of these frames from the purchased signal given the dtw curve."""
orig_dict = {}
for w in dtw_curve[::-1]:
orig_dict[w[0]] = w[1]
y = []
for i in range(len(orig_dict)):
samp = orig_dict[i] * HOP_SIZE
y += list(purc_x[samp:samp + HOP_SIZE])
last_samp = samp + HOP_SIZE
y += list(purc_x[last_samp:last_samp + (len(orig_x) - len(y))])
return y
def compute_alignment(file_id, align_thres=0.9, is_plot=False):
"""Main function to do the alignment between two songs of the same id.
"""
# Load mp3s
orig_path = os.path.join(ORIG_MP3_PATH, file_id + ".mp3")
purc_path = os.path.join(PURC_MP3_PATH, file_id + ".mp3")
orig_x, _ = librosa.load(orig_path, sr=SR)
purc_x, _ = librosa.load(purc_path, sr=SR)
# Compute melspecs
orig_mel = librosa.power_to_db(
librosa.feature.melspectrogram(y=orig_x, sr=SR, hop_length=HOP_SIZE, n_mels=N_MELS))
purc_mel = librosa.power_to_db(
librosa.feature.melspectrogram(y=purc_x, sr=SR, hop_length=HOP_SIZE, n_mels=N_MELS))
# Apply DTW
D, wp = librosa.sequence.dtw(X=orig_mel, Y=purc_mel, metric='euclidean')
score = alignment_score(wp)
# Plot
if is_plot:
wp_s = np.asarray(wp) * HOP_SIZE / SR
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
librosa.display.specshow(D, x_axis='time', y_axis='time',
cmap='gray_r', hop_length=HOP_SIZE)
imax = ax.imshow(D, cmap=plt.get_cmap('gray_r'),
origin='lower', interpolation='nearest', aspect='auto')
ax.plot(wp_s[:, 1], wp_s[:, 0], marker='o', color='r')
plt.title('Warping Path on Acc. Cost Matrix $D$')
plt.colorbar()
# Return reconstructed signal and score
return reconstruct_signal(orig_x, purc_x, wp), score
# Compute alignment for all the dataset, creating new audio files and storing the alignment scores
out = {"File": [], "score": []}
for i, row in tqdm(meta_df.iterrows(), total=len(meta_df)):
file_id = row["File"]
# Do alignment
y, score = compute_alignment(file_id)
# Save wav
librosa.output.write_wav(os.path.join(OUT_DIR, file_id + ".wav"), np.asarray(y), sr=SR)
# Save score
out["File"].append(file_id)
out["score"].append(score)
IPython.display.Audio(data=y, rate=SR)
out_df = pd.DataFrame(out)
out_df.to_csv("aligned_scores.tsv", sep=",", index=None)
| 0.693992 | 0.675009 |
```
import tensorflow as tf
from utils import *
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
print(adj[0])
import networkx as nx
G = nx.Graph(adj)
#G.add_path([0,1,2])
#list(nx.bfs_edges([0,1,2],0))
print(list(nx.bfs_tree(G,0,0)))
print(nx.shortest_path_length(G,source=0))
one_path = nx.all_pairs_shortest_path(G,cutoff=1)[0]
two_path = nx.all_pairs_shortest_path(G,cutoff=2)[0]
print(len(two_path), len(one_path)-1, len(two_path)-len(one_path)-1)
one_path
two_path
type(two_path)
features = preprocess_features(features)
#print(features.shape)
features[1].shape
#2708*1433
print(adj.toarray())
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from utils import *
from models import GCN, MLP
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
# Some preprocessing
features = preprocess_features(features)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
# Init variables
sess.run(tf.global_variables_initializer())
cost_val = []
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Train model
for epoch in range(FLAGS.epochs):
t = time.time()
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
# Validation
cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
# Testing
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
y_val.shape
features
```
|
github_jupyter
|
import tensorflow as tf
from utils import *
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
print(adj[0])
import networkx as nx
G = nx.Graph(adj)
#G.add_path([0,1,2])
#list(nx.bfs_edges([0,1,2],0))
print(list(nx.bfs_tree(G,0,0)))
print(nx.shortest_path_length(G,source=0))
one_path = nx.all_pairs_shortest_path(G,cutoff=1)[0]
two_path = nx.all_pairs_shortest_path(G,cutoff=2)[0]
print(len(two_path), len(one_path)-1, len(two_path)-len(one_path)-1)
one_path
two_path
type(two_path)
features = preprocess_features(features)
#print(features.shape)
features[1].shape
#2708*1433
print(adj.toarray())
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from utils import *
from models import GCN, MLP
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'pubmed', 'Dataset string.') # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 10, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
# Load data
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(FLAGS.dataset)
# Some preprocessing
features = preprocess_features(features)
if FLAGS.model == 'gcn':
support = [preprocess_adj(adj)]
num_supports = 1
model_func = GCN
elif FLAGS.model == 'gcn_cheby':
support = chebyshev_polynomials(adj, FLAGS.max_degree)
num_supports = 1 + FLAGS.max_degree
model_func = GCN
elif FLAGS.model == 'dense':
support = [preprocess_adj(adj)] # Not used
num_supports = 1
model_func = MLP
else:
raise ValueError('Invalid argument for model: ' + str(FLAGS.model))
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()
# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
# Init variables
sess.run(tf.global_variables_initializer())
cost_val = []
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Train model
for epoch in range(FLAGS.epochs):
t = time.time()
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
# Validation
cost, acc, duration = evaluate(features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
print("Optimization Finished!")
# Testing
test_cost, test_acc, test_duration = evaluate(features, support, y_test, test_mask, placeholders)
print("Test set results:", "cost=", "{:.5f}".format(test_cost),
"accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))
y_val.shape
features
| 0.701406 | 0.291364 |
```
#hide
import sys
path = '/home/ddpham/git/tabint/'
sys.path.insert(1, path)
#default_exp pre_processing
%load_ext autoreload
%autoreload 2
#hide
from nbdev.showdoc import *
#export
from tabint.utils import *
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.preprocessing import StandardScaler
from sklearn_pandas import DataFrameMapper
import pdb
#export
#todo use dask, numba and do things in parallel
#immutation https://www.kaggle.com/dansbecker/handling-missing-values
#use sklearn pipeline and transformner??
def tabular_proc(df, y_fld = None, procs = None, ignore_flds=None):
pp_outp = {}
df = df.copy()
if ignore_flds is not None:
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if y_fld is not None:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
df.drop(y_fld, axis=1, inplace=True)
for f in procs: df = f(df, pp_outp)
if ignore_flds is not None: df = pd.concat([df, ignored_flds], axis=1)
if y_fld is not None: return [df, y, pp_outp]
else: return [df, pp_outp]
class TBPreProc:
def __init__(self, *args): self.args = args
def __call__(self, df, pp_outp): return self.func(df, pp_outp, *self.args)
@staticmethod
def func(*args): None
class skip_flds(TBPreProc):
@staticmethod
def func(df, pp_outp, skip_flds): return df.drop(skip_flds, axis=1)
class remove_outlier(TBPreProc):
@staticmethod
def func(df, pp_outp):
return filter_outlier(df, pp_outp['cons'])[0]
def filter_outlier(df, cons):
mask = np.full(df.shape[0], True)
for v in to_iter(df[cons].values.T):
Min, _, _, _, Max, _ = boxnwhisker_value(v)
inlier = np.logical_and(v >= Min, v <= Max)
mask = np.logical_and(mask, inlier)
return df[mask], mask
def boxnwhisker_value(values):
Median = np.median(values)
Q1, Q3 = np.percentile(values, [25,75])
IQR = Q3 - Q1
Min, Max = Q1 - IQR*1.5, Q3 + IQR*1.5
return max(Min, np.min(values)), Q1, Median, Q3, min(Max,np.max(values)), IQR
class subset(TBPreProc):
@staticmethod
def func(df, pp_outp, ss): return df.sample(ss)
class fill_na(TBPreProc):
@staticmethod
def func(df, pp_outp, na_dict = None):
na_dict = {} if na_dict is None else na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
pp_outp['na_dict'] = na_dict
return df
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
class app_cat(TBPreProc):
@staticmethod
def func(df, pp_outp, max_n_cat=15):
cons = []
for name, value in df.items():
if is_numeric_dtype(value) and value.dtypes != np.bool:
if value.nunique()<=max_n_cat:
if not np.array_equal(value.unique(), np.array([0, 1])): df[name] = value.astype('category').cat.as_ordered()
else: cons.append(name)
else:
if value.nunique()>max_n_cat: df[name] = value.astype('category').cat.codes+1; cons.append(name)
elif value.dtypes.name == 'object': df[name] = value.astype('category').cat.as_ordered()
elif value.dtypes.name == 'category': df[name] = value.cat.as_ordered()
pp_outp['cons'] = cons
return df
class dummies(TBPreProc):
@staticmethod
def func(df, pp_outp):
df = pd.get_dummies(df, dummy_na=True)
if 'cons' in pp_outp.keys(): pp_outp['cats'] = [i for i in df.columns if i not in pp_outp['cons']]
return df
class scale_vars(TBPreProc):
@staticmethod
def func(df, pp_outp, mapper = None):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
pp_outp['mapper'] = mapper
return df
from nbdev.export import *
notebook2script('01_pre-processing.ipynb')
```
|
github_jupyter
|
#hide
import sys
path = '/home/ddpham/git/tabint/'
sys.path.insert(1, path)
#default_exp pre_processing
%load_ext autoreload
%autoreload 2
#hide
from nbdev.showdoc import *
#export
from tabint.utils import *
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.preprocessing import StandardScaler
from sklearn_pandas import DataFrameMapper
import pdb
#export
#todo use dask, numba and do things in parallel
#immutation https://www.kaggle.com/dansbecker/handling-missing-values
#use sklearn pipeline and transformner??
def tabular_proc(df, y_fld = None, procs = None, ignore_flds=None):
pp_outp = {}
df = df.copy()
if ignore_flds is not None:
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if y_fld is not None:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
df.drop(y_fld, axis=1, inplace=True)
for f in procs: df = f(df, pp_outp)
if ignore_flds is not None: df = pd.concat([df, ignored_flds], axis=1)
if y_fld is not None: return [df, y, pp_outp]
else: return [df, pp_outp]
class TBPreProc:
def __init__(self, *args): self.args = args
def __call__(self, df, pp_outp): return self.func(df, pp_outp, *self.args)
@staticmethod
def func(*args): None
class skip_flds(TBPreProc):
@staticmethod
def func(df, pp_outp, skip_flds): return df.drop(skip_flds, axis=1)
class remove_outlier(TBPreProc):
@staticmethod
def func(df, pp_outp):
return filter_outlier(df, pp_outp['cons'])[0]
def filter_outlier(df, cons):
mask = np.full(df.shape[0], True)
for v in to_iter(df[cons].values.T):
Min, _, _, _, Max, _ = boxnwhisker_value(v)
inlier = np.logical_and(v >= Min, v <= Max)
mask = np.logical_and(mask, inlier)
return df[mask], mask
def boxnwhisker_value(values):
Median = np.median(values)
Q1, Q3 = np.percentile(values, [25,75])
IQR = Q3 - Q1
Min, Max = Q1 - IQR*1.5, Q3 + IQR*1.5
return max(Min, np.min(values)), Q1, Median, Q3, min(Max,np.max(values)), IQR
class subset(TBPreProc):
@staticmethod
def func(df, pp_outp, ss): return df.sample(ss)
class fill_na(TBPreProc):
@staticmethod
def func(df, pp_outp, na_dict = None):
na_dict = {} if na_dict is None else na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
pp_outp['na_dict'] = na_dict
return df
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
class app_cat(TBPreProc):
@staticmethod
def func(df, pp_outp, max_n_cat=15):
cons = []
for name, value in df.items():
if is_numeric_dtype(value) and value.dtypes != np.bool:
if value.nunique()<=max_n_cat:
if not np.array_equal(value.unique(), np.array([0, 1])): df[name] = value.astype('category').cat.as_ordered()
else: cons.append(name)
else:
if value.nunique()>max_n_cat: df[name] = value.astype('category').cat.codes+1; cons.append(name)
elif value.dtypes.name == 'object': df[name] = value.astype('category').cat.as_ordered()
elif value.dtypes.name == 'category': df[name] = value.cat.as_ordered()
pp_outp['cons'] = cons
return df
class dummies(TBPreProc):
@staticmethod
def func(df, pp_outp):
df = pd.get_dummies(df, dummy_na=True)
if 'cons' in pp_outp.keys(): pp_outp['cats'] = [i for i in df.columns if i not in pp_outp['cons']]
return df
class scale_vars(TBPreProc):
@staticmethod
def func(df, pp_outp, mapper = None):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
pp_outp['mapper'] = mapper
return df
from nbdev.export import *
notebook2script('01_pre-processing.ipynb')
| 0.263031 | 0.333842 |
<!-- dom:TITLE: Data Analysis and Machine Learning Lectures: Optimization and Gradient Methods -->
# Data Analysis and Machine Learning Lectures: Optimization and Gradient Methods
<!-- dom:AUTHOR: Morten Hjorth-Jensen at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University -->
<!-- Author: -->
**Morten Hjorth-Jensen**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University
Date: **Sep 20, 2018**
Copyright 1999-2018, Morten Hjorth-Jensen. Released under CC Attribution-NonCommercial 4.0 license
## Optimization, the central part of any Machine Learning algortithm
Almost every problem in machine learning and data science starts with
a dataset $X$, a model $g(\beta)$, which is a function of the
parameters $\beta$ and a cost function $C(X, g(\beta))$ that allows
us to judge how well the model $g(\beta)$ explains the observations
$X$. The model is fit by finding the values of $\beta$ that minimize
the cost function. Ideally we would be able to solve for $\beta$
analytically, however this is not possible in general and we must use
some approximative/numerical method to compute the minimum.
## Steepest descent
The method of steepest descent The basic idea of gradient descent is
that a function $F(\mathbf{x})$,
$\mathbf{x} \equiv (x_1,\cdots,x_n)$, decreases fastest if one goes from $\bf {x}$ in the
direction of the negative gradient $-\nabla F(\mathbf{x})$.
It can be shown that if
$$
\mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k), \ \ \gamma_k > 0
$$
for $\gamma_k$ small enough, then $F(\mathbf{x}_{k+1}) \leq
F(\mathbf{x}_k)$. This means that for a sufficiently small $\gamma_k$
we are always moving towards smaller function values, i.e a minimum.
<!-- !split -->
## More on Steepest descent
The previous observation is the basis of the method of steepest
descent, which is also referred to as just gradient descent (GD). One
starts with an initial guess $\mathbf{x}_0$ for a minimum of $F$ and
computes new approximations according to
$$
\mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k), \ \ k \geq 0.
$$
The parameter $\gamma_k$ is often referred to as the step length or
the learning rate within the context of Machine Learning.
<!-- !split -->
## The ideal
Ideally the sequence $\{ \mathbf{x}_k \}_{k=0}$ converges to a global
minimum of the function $F$. In general we do not know if we are in a
global or local minimum. In the special case when $F$ is a convex
function, all local minima are also global minima, so in this case
gradient descent can converge to the global solution. The advantage of
this scheme is that it is conceptually simple and straightforward to
implement. However the method in this form has some severe
limitations:
In machine learing we are often faced with non-convex high dimensional
cost functions with many local minima. Since GD is deterministic we
will get stuck in a local minimum, if the method converges, unless we
have a very good intial guess. This also implies that the scheme is
sensitive to the chosen initial condition.
Note that the gradient is a function of $\mathbf{x} =
(x_1,\cdots,x_n)$ which makes it expensive to compute numerically.
<!-- !split -->
## The sensitiveness of the gradient descent
GD is sensitive to the choice of learning rate $\gamma_k$. This is due
to the fact that we are only guaranteed that $F(\mathbf{x}_{k+1}) \leq
F(\mathbf{x}_k)$ for sufficiently small $\gamma_k$. The problem is to
determine an optimal learning rate. If the learning rate is chosen too
small the method will take a long time to converge and if it is too
large we can experience erratic behavior.
Many of these shortcomings can be alleviated by introducing
randomness. One such method is that of Stochastic Gradient Descent
(SGD), see below.
## Gradient Descent Example
We revisit now our simple linear regression example with a linear polynomial.
```
%matplotlib inline
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
x = 2*np.random.rand(100,1)
y = 4+3*x+np.random.randn(100,1)
xb = np.c_[np.ones((100,1)), x]
beta_linreg = np.linalg.inv(xb.T.dot(xb)).dot(xb.T).dot(y)
print(beta_linreg)
beta = np.random.randn(2,1)
eta = 0.1
Niterations = 1000
m = 100
for iter in range(Niterations):
gradients = 2.0/m*xb.T.dot(xb.dot(beta)-y)
beta -= eta*gradients
print(beta)
xnew = np.array([[0],[2]])
xbnew = np.c_[np.ones((2,1)), xnew]
ypredict = xbnew.dot(beta)
ypredict2 = xbnew.dot(beta_linreg)
plt.plot(xnew, ypredict, "r-")
plt.plot(xnew, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Gradient descent example')
plt.show()
```
## And a corresponding example using **scikit-learn**
```
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
x = 2*np.random.rand(100,1)
y = 4+3*x+np.random.randn(100,1)
xb = np.c_[np.ones((100,1)), x]
beta_linreg = np.linalg.inv(xb.T.dot(xb)).dot(xb.T).dot(y)
print(beta_linreg)
sgdreg = SGDRegressor(n_iter = 50, penalty=None, eta0=0.1)
sgdreg.fit(x,y.ravel())
print(sgdreg.intercept_, sgdreg.coef_)
```
<!-- !split -->
## Convex functions
Ideally we want our cost/loss function to be convex(concave).
First we give the definition of a convex set: A set $C$ in
$\mathbb{R}^n$ is said to be convex if, for all $x$ and $y$ in $C$ and
all $t \in (0,1)$ , the point $(1 − t)x + ty$ also belongs to
C. Geometrically this means that every point on the line segment
connecting $x$ and $y$ is in $C$ as discussed below.
The convex subsets of $\mathbb{R}$ are the intervals of
$\mathbb{R}$. Examples of convex sets of $\mathbb{R}^2$ are the
regular polygons (triangles, rectangles, pentagons, etc...).
## Convex function
**Convex function**: Let $X \subset \mathbb{R}^n$ be a convex set. Assume that the function $f: X \rightarrow \mathbb{R}$ is continuous, then $f$ is said to be convex if $$f(tx_1 + (1-t)x_2) \leq tf(x_1) + (1-t)f(x_2) $$ for all $x_1, x_2 \in X$ and for all $t \in [0,1]$. If $\leq$ is replaced with a strict inequaltiy in the definition, we demand $x_1 \neq x_2$ and $t\in(0,1)$ then $f$ is said to be strictly convex. For a single variable function, convexity means that if you draw a straight line connecting $f(x_1)$ and $f(x_2)$, the value of the function on the interval $[x_1,x_2]$ is always below the line as illustrated below.
## Conditions on convex functions
In the following we state first and second-order conditions which
ensures convexity of a function $f$. We write $D_f$ to denote the
domain of $f$, i.e the subset of $R^n$ where $f$ is defined. For more
details and proofs we refer to: [S. Boyd and L. Vandenberghe. Convex Optimization. Cambridge University Press](http://stanford.edu/boyd/cvxbook/, 2004).
**First order condition.**
Suppose $f$ is differentiable (i.e $\nabla f(x)$ is well defined for
all $x$ in the domain of $f$). Then $f$ is convex if and only if $D_f$
is a convex set and $$f(y) \geq f(x) + \nabla f(x)^T (y-x) $$ holds
for all $x,y \in D_f$. This condition means that for a convex function
the first order Taylor expansion (right hand side above) at any point
a global under estimator of the function. To convince yourself you can
make a drawing of f(x) = x^2+1 and draw the tangent line to $f(x)$ and
note that it is always below the graph.
**Second order condition.**
Assume that $f$ is twice
differentiable, i.e the Hessian matrix exists at each point in
$D_f$. Then $f$ is convex if and only if $D_f$ is a convex set and its
Hessian is positive semi-definite for all $x\in D_f$. For a
single-variable function this reduces to $f''(x) \geq
0$. Geometrically this means that $f$ has nonnegative curvature
everywhere.
This condition is particularly useful since it gives us an procedure for determining if the function under consideration is convex, apart from using the definition.
## More on convex functions
The next result is of great importance to us and the reason why we are
going on about convex functions. In machine learning we frequently
have to minimize a loss/cost function in order to find the best
parameters for the model we are considering.
Ideally we want the
global minimum (for high-dimensional models it is hard to know
if we have local or global minimum). However, if the cost/loss function
is convex the following result provides invaluable information:
**Any minimum is global for convex functions.**
Consider the problem of finding $x \in \mathbb{R}^n$ such that $f(x)$
is minimal, where $f$ is convex and differentiable. Then, any point
$x^*$ that satisfies $\nabla f(x^*) = 0$ is a global minimum.
This result means that if we know that the cost/loss function is convex and we are able to find a minimum, we are guaranteed that it is a global minimum.
## Some simple problems
1. Show that $f(x)=x^2$ is convex for $x \in \mathbb{R}$ using the definition of convexity. Hint: If you re-write the definition, $f$ is convex if the following holds for all $x,y \in D_f$ and any $\lambda \in [0,1] $ $\lambda f(x) + (1-\lambda)f(y) - f(\lambda x + (1-\lambda) y ) \geq 0. $
2. Using the second order condition show that the following functions are convex on the specified domain.
* $f(x) = e^x$ is convex for $x \in \mathbb{R}$.
* $g(x) = -\ln(x)$ is convex for $x \in (0,\infty)$.
3. Let $f(x) = x^2$ and $g(x) = e^x$. Show that $f(g(x))$ and $g(f(x))$ is convex for $x \in \mathbb{R}$. Also show that if $f(x)$ is any convex function than $h(x) = e^{f(x)}$ is convex.
4. A norm is any function that satisfy the following properties
* $f(\alpha x) = |\alpha| f(x)$ for all $\alpha \in \mathbb{R}$.
* $f(x+y) \leq f(x) + f(y)$
* $f(x) \leq 0$ for all $x \in \mathbb{R}^n$ with equality if and only if $x = 0$
Using the definition of convexity, try to show that a function satisfying the properties above is convex (the third condition is not needed to show this).
<!-- !split -->
## Revisiting our first homework
We will use linear regression as a case study for the gradient descent
methods. Linear regression is a great test case for the gradient
descent methods discussed in the lectures since it has several
desirable properties such as:
1. An analytical solution (recall homework set 1).
2. The gradient can be computed analytically.
3. The cost function is convex which guarantees that gradient descent converges for small enough learning rates
We revisit the example from homework set 1 where we had
$$
y_i = 5x_i^2 + 0.1\xi_i, \ i=1,\cdots,100
$$
with $x_i \in [0,1] $ chosen randomly with a uniform distribution. Additionally $\xi_i$ represents stochastic noise chosen according to a normal distribution $\cal {N}(0,1)$.
The linear regression model is given by
$$
h_\beta(x) = \hat{y} = \beta_0 + \beta_1 x,
$$
such that
$$
\hat{y}_i = \beta_0 + \beta_1 x_i.
$$
<!-- !split -->
## Gradient descent example
Let $\mathbf{y} = (y_1,\cdots,y_n)^T$, $\mathbf{\hat{y}} = (\hat{y}_1,\cdots,\hat{y}_n)^T$ and $\beta = (\beta_0, \beta_1)^T$
t is convenient to write $\mathbf{\hat{y}} = X\beta$ where $X \in \mathbb{R}^{100 \times 2} $ is the design matrix given by
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
X \equiv \begin{bmatrix}
1 & x_1 \\
\vdots & \vdots \\
1 & x_{100} & \\
\end{bmatrix}.
\label{_auto1} \tag{1}
\end{equation}
$$
The loss function is given by
$$
C(\beta) = ||X\beta-\mathbf{y}||^2 = ||X\beta||^2 - 2 \mathbf{y}^T X\beta + ||\mathbf{y}||^2 = \sum_{i=1}^{100} (\beta_0 + \beta_1 x_i)^2 - 2 y_i (\beta_0 + \beta_1 x_i) + y_i^2
$$
and we want to find $\beta$ such that $C(\beta)$ is minimized.
## The derivative of the cost/loss function
Computing $\partial C(\beta) / \partial \beta_0$ and $\partial C(\beta) / \partial \beta_1$ we can show that the gradient can be written as
$$
\nabla_\beta C(\beta) = (\partial C(\beta) / \partial \beta_0, \partial C(\beta) / \partial \beta_1)^T = 2\begin{bmatrix} \sum_{i=1}^{100} \left(\beta_0+\beta_1x_i-y_i\right) \\
\sum_{i=1}^{100}\left( x_i (\beta_0+\beta_1x_i)-y_ix_i\right) \\
\end{bmatrix} = 2X^T(X\beta - \mathbf{y}),
$$
where $X$ is the design matrix defined above.
## The Hessian matrix
The Hessian matrix of $C(\beta)$ is given by
$$
\hat{H} \equiv \begin{bmatrix}
\frac{\partial^2 C(\beta)}{\partial \beta_0^2} & \frac{\partial^2 C(\beta)}{\partial \beta_0 \partial \beta_1} \\
\frac{\partial^2 C(\beta)}{\partial \beta_0 \partial \beta_1} & \frac{\partial^2 C(\beta)}{\partial \beta_1^2} & \\
\end{bmatrix} = 2X^T X.
$$
This result implies that $C(\beta)$ is a convex function since the matrix $X^T X$ always is positive semi-definite.
## Simple program
We can now write a program that minimizes $C(\beta)$ using the gradient descent method with a constant learning rate $\gamma$ according to
$$
\beta_{k+1} = \beta_k - \gamma \nabla_\beta C(\beta_k), \ k=0,1,\cdots
$$
We can use the expression we computed for the gradient and let use a
$\beta_0$ be chosen randomly and let $\gamma = 0.001$. Stop iterating
when $||\nabla_\beta C(\beta_k) || < \epsilon = 10^{-8}$.
And finally we can compare our solution for $\beta$ with the analytic result given by
$\beta= (X^TX)^{-1} X^T \mathbf{y}$.
```
import numpy as np
"""
The following setup is just a suggestion, feel free to write it the way you like.
"""
#Setup problem described in the exercise
N = 100 #Nr of datapoints
M = 2 #Nr of features
x = np.random.rand(N) #Uniformly generated x-values in [0,1]
y = 5*x**2 + 0.1*np.random.randn(N)
X = np.c_[np.ones(N),x] #Construct design matrix
#Compute beta according to normal equations to compare with GD solution
Xt_X_inv = np.linalg.inv(np.dot(X.T,X))
Xt_y = np.dot(X.transpose(),y)
beta_NE = np.dot(Xt_X_inv,Xt_y)
print(beta_NE)
```
<!-- !split -->
## Gradient descent and Ridge
We have also discussed Ridge regression where the loss function contains a regularized given by the $L_2$ norm of $\beta$,
$$
C_{\text{ridge}}(\beta) = ||X\beta -\mathbf{y}||^2 + \lambda ||\beta||^2, \ \lambda \geq 0.
$$
In order to minimize $C_{\text{ridge}}(\beta)$ using GD we only have adjust the gradient as follows
$$
\nabla_\beta C_{\text{ridge}}(\beta) = 2\begin{bmatrix} \sum_{i=1}^{100} \left(\beta_0+\beta_1x_i-y_i\right) \\
\sum_{i=1}^{100}\left( x_i (\beta_0+\beta_1x_i)-y_ix_i\right) \\
\end{bmatrix} + 2\lambda\begin{bmatrix} \beta_0 \\ \beta_1\end{bmatrix} = 2 (X^T(X\beta - \mathbf{y})+\lambda \beta).
$$
We can now extend our program to minimize $C_{\text{ridge}}(\beta)$ using gradient descent and compare with the analytical solution given by
$$
\beta_{\text{ridge}} = \left(X^T X + \lambda I_{2 \times 2} \right)^{-1} X^T \mathbf{y},
$$
for $\lambda = {0,1,10,50,100}$ ($\lambda = 0$ corresponds to ordinary least squares).
We can then compute $||\beta_{\text{ridge}}||$ for each $\lambda$.
```
import numpy as np
"""
The following setup is just a suggestion, feel free to write it the way you like.
"""
#Setup problem described in the exercise
N = 100 #Nr of datapoints
M = 2 #Nr of features
x = np.random.rand(N)
y = 5*x**2 + 0.1*np.random.randn(N)
#Compute analytic beta for Ridge regression
X = np.c_[np.ones(N),x]
XT_X = np.dot(X.T,X)
l = 0.1 #Ridge parameter lambda
Id = np.eye(XT_X.shape[0])
Z = np.linalg.inv(XT_X+l*Id)
beta_ridge = np.dot(Z,np.dot(X.T,y))
print(beta_ridge)
print(np.linalg.norm(beta_ridge)) #||beta||
```
## Stochastic Gradient Descent
Stochastic gradient descent (SGD) and variants thereof address some of
the shortcomings of the Gradient descent method discussed above.
The underlying idea of SGD comes from the observation that the cost
function, which we want to minimize, can almost always be written as a
sum over $n$ datapoints $\{\mathbf{x}_i\}_{i=1}^n$,
$$
C(\mathbf{\beta}) = \sum_{i=1}^n c_i(\mathbf{x}_i,
\mathbf{\beta}).
$$
## Computation of gradients
This in turn means that the gradient can be
computed as a sum over $i$-gradients
$$
\nabla_\beta C(\mathbf{\beta}) = \sum_i^n \nabla_\beta c_i(\mathbf{x}_i,
\mathbf{\beta}).
$$
Stochasticity/randomness is introduced by only taking the
gradient on a subset of the data called minibatches. If there are $n$
datapoints and the size of each minibatch is $M$, there will be $n/M$
minibatches. We denote these minibatches by $B_k$ where
$k=1,\cdots,n/M$.
## SGD example
As an example, suppose we have $10$ datapoints $( \mathbf{x}_1,
\cdots, \mathbf{x}_{10} )$ and we choose to have $M=5$ minibathces,
then each minibatch contains two datapoints. In particular we have
$B_1 = (\mathbf{x}_1,\mathbf{x}_2), \cdots, B_5 =
(\mathbf{x}_9,\mathbf{x}_{10})$. Note that if you choose $M=1$ you
have only a single batch with all datapoints and on the other extreme,
you may choose $M=n$ resulting in a minibatch for each datapoint, i.e
$B_k = \mathbf{x}_k$.
The idea is now to approximate the gradient by replacing the sum over
all datapoints with a sum over the datapoints in one the minibatches
picked at random in each gradient descent step
$$
\nabla_\beta
C(\mathbf{\beta}) = \sum_{i=1}^n \nabla_\beta c_i(\mathbf{x}_i,
\mathbf{\beta}) \rightarrow \sum_{i \in B_k}^n \nabla_\beta
c_i(\mathbf{x}_i, \mathbf{\beta}).
$$
## The gradient step
Thus a gradient descent step now looks like
$$
\beta_{j+1} = \beta_j - \gamma_j \sum_{i \in B_k}^n \nabla_\beta c_i(\mathbf{x}_i,
\mathbf{\beta})
$$
where $k$ is picked at random with equal
probability from $[1,n/M]$. An iteration over the number of
minibathces (n/M) is commonly referred to as an epoch. Thus it is
typical to choose a number of epochs and for each epoch iterate over
the number of minibatches, as exemplified in the code below.
## Simple example code
```
import numpy as np
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 10 #number of epochs
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for
j += 1
```
Taking the gradient only on a subset of the data has two important
benefits. First, it introduces randomness which decreases the chance
that our opmization scheme gets stuck in a local minima. Second, if
the size of the minibatches are small relative to the number of
datapoints ($M < n$), the computation of the gradient is much
cheaper since we sum over the datapoints in the k-th minibatch and not
all $n$ datapoints.
## When do we stop?
A natural question is when do we stop the search for a new minimum?
One possibility is to compute the full gradient after a given number
of epochs and check if the norm of the gradient is smaller than some
threshold and stop if true. However, the condition that the gradient
is zero is valid also for local minima, so this would only tell us
that we are close to a local/global minimum. However, we could also
evaluate the cost function at this point, store the result and
continue the search. If the test kicks in at a later stage we can
compare the values of the cost function and keep the $\beta$ that
gave the lowest value.
## Slightly different approach
Another approach is to let the step length $\gamma_j$ depend on the
number of epochs in such a way that it becomes very small after a
reasonable time such that we do not move at all.
As an example, let $e = 0,1,2,3,\cdots$ denote the current epoch and let $t_0, t_1 > 0$ be two fixed numbers. Furthermore, let $t = e \cdot m + i$ where $m$ is the number of minibatches and $i=0,\cdots,m-1$. Then the function $$\gamma_j(t; t_0, t_1) = \frac{t_0}{t+t_1} $$ goes to zero as the number of epochs gets large. I.e. we start with a step length $\gamma_j (0; t_0, t_1) = t_0/t_1$ which decays in *time* $t$.
In this way we can fix the number of epochs, compute $\beta$ and
evaluate the cost function at the end. Repeating the computation will
give a different result since the scheme is random by design. Then we
pick the final $\beta$ that gives the lowest value of the cost
function.
```
import numpy as np
def step_length(t,t0,t1):
return t0/(t+t1)
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 500 #number of epochs
t0 = 1.0
t1 = 10
gamma_j = t0/t1
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for beta
t = epoch*m+i
gamma_j = step_length(t,t0,t1)
j += 1
print("gamma_j after %d epochs: %g" % (n_epochs,gamma_j))
```
## Conjugate gradient (CG) method
The success of the CG method for finding solutions of non-linear problems is based
on the theory of conjugate gradients for linear systems of equations. It belongs
to the class of iterative methods for solving problems from linear algebra of the type
$$
\hat{A}\hat{x} = \hat{b}.
$$
In the iterative process we end up with a problem like
$$
\hat{r}= \hat{b}-\hat{A}\hat{x},
$$
where $\hat{r}$ is the so-called residual or error in the iterative process.
When we have found the exact solution, $\hat{r}=0$.
## Conjugate gradient method
The residual is zero when we reach the minimum of the quadratic equation
$$
P(\hat{x})=\frac{1}{2}\hat{x}^T\hat{A}\hat{x} - \hat{x}^T\hat{b},
$$
with the constraint that the matrix $\hat{A}$ is positive definite and symmetric.
If we search for a minimum of the quantum mechanical variance, then the matrix
$\hat{A}$, which is called the Hessian, is given by the second-derivative of the function we want to minimize. This quantity is always positive definite. In our case this corresponds normally to the second derivative of the energy.
## Conjugate gradient method, Newton's method first
We seek the minimum of the energy or the variance as function of various variational parameters.
In our case we have thus a function $f$ whose minimum we are seeking.
In Newton's method we set $\nabla f = 0$ and we can thus compute the next iteration point
$$
\hat{x}-\hat{x}_i=\hat{A}^{-1}\nabla f(\hat{x}_i).
$$
Subtracting this equation from that of $\hat{x}_{i+1}$ we have
$$
\hat{x}_{i+1}-\hat{x}_i=\hat{A}^{-1}(\nabla f(\hat{x}_{i+1})-\nabla f(\hat{x}_i)).
$$
## Conjugate gradient method
In the CG method we define so-called conjugate directions and two vectors
$\hat{s}$ and $\hat{t}$
are said to be
conjugate if
$$
\hat{s}^T\hat{A}\hat{t}= 0.
$$
The philosophy of the CG method is to perform searches in various conjugate directions
of our vectors $\hat{x}_i$ obeying the above criterion, namely
$$
\hat{x}_i^T\hat{A}\hat{x}_j= 0.
$$
Two vectors are conjugate if they are orthogonal with respect to
this inner product. Being conjugate is a symmetric relation: if $\hat{s}$ is conjugate to $\hat{t}$, then $\hat{t}$ is conjugate to $\hat{s}$.
## Conjugate gradient method
An example is given by the eigenvectors of the matrix
$$
\hat{v}_i^T\hat{A}\hat{v}_j= \lambda\hat{v}_i^T\hat{v}_j,
$$
which is zero unless $i=j$.
## Conjugate gradient method
Assume now that we have a symmetric positive-definite matrix $\hat{A}$ of size
$n\times n$. At each iteration $i+1$ we obtain the conjugate direction of a vector
$$
\hat{x}_{i+1}=\hat{x}_{i}+\alpha_i\hat{p}_{i}.
$$
We assume that $\hat{p}_{i}$ is a sequence of $n$ mutually conjugate directions.
Then the $\hat{p}_{i}$ form a basis of $R^n$ and we can expand the solution
$ \hat{A}\hat{x} = \hat{b}$ in this basis, namely
$$
\hat{x} = \sum^{n}_{i=1} \alpha_i \hat{p}_i.
$$
## Conjugate gradient method
The coefficients are given by
$$
\mathbf{A}\mathbf{x} = \sum^{n}_{i=1} \alpha_i \mathbf{A} \mathbf{p}_i = \mathbf{b}.
$$
Multiplying with $\hat{p}_k^T$ from the left gives
$$
\hat{p}_k^T \hat{A}\hat{x} = \sum^{n}_{i=1} \alpha_i\hat{p}_k^T \hat{A}\hat{p}_i= \hat{p}_k^T \hat{b},
$$
and we can define the coefficients $\alpha_k$ as
$$
\alpha_k = \frac{\hat{p}_k^T \hat{b}}{\hat{p}_k^T \hat{A} \hat{p}_k}
$$
## Conjugate gradient method and iterations
If we choose the conjugate vectors $\hat{p}_k$ carefully,
then we may not need all of them to obtain a good approximation to the solution
$\hat{x}$.
We want to regard the conjugate gradient method as an iterative method.
This will us to solve systems where $n$ is so large that the direct
method would take too much time.
We denote the initial guess for $\hat{x}$ as $\hat{x}_0$.
We can assume without loss of generality that
$$
\hat{x}_0=0,
$$
or consider the system
$$
\hat{A}\hat{z} = \hat{b}-\hat{A}\hat{x}_0,
$$
instead.
## Conjugate gradient method
One can show that the solution $\hat{x}$ is also the unique minimizer of the quadratic form
$$
f(\hat{x}) = \frac{1}{2}\hat{x}^T\hat{A}\hat{x} - \hat{x}^T \hat{x} , \quad \hat{x}\in\mathbf{R}^n.
$$
This suggests taking the first basis vector $\hat{p}_1$
to be the gradient of $f$ at $\hat{x}=\hat{x}_0$,
which equals
$$
\hat{A}\hat{x}_0-\hat{b},
$$
and
$\hat{x}_0=0$ it is equal $-\hat{b}$.
The other vectors in the basis will be conjugate to the gradient,
hence the name conjugate gradient method.
## Conjugate gradient method
Let $\hat{r}_k$ be the residual at the $k$-th step:
$$
\hat{r}_k=\hat{b}-\hat{A}\hat{x}_k.
$$
Note that $\hat{r}_k$ is the negative gradient of $f$ at
$\hat{x}=\hat{x}_k$,
so the gradient descent method would be to move in the direction $\hat{r}_k$.
Here, we insist that the directions $\hat{p}_k$ are conjugate to each other,
so we take the direction closest to the gradient $\hat{r}_k$
under the conjugacy constraint.
This gives the following expression
$$
\hat{p}_{k+1}=\hat{r}_k-\frac{\hat{p}_k^T \hat{A}\hat{r}_k}{\hat{p}_k^T\hat{A}\hat{p}_k} \hat{p}_k.
$$
## Conjugate gradient method
We can also compute the residual iteratively as
$$
\hat{r}_{k+1}=\hat{b}-\hat{A}\hat{x}_{k+1},
$$
which equals
$$
\hat{b}-\hat{A}(\hat{x}_k+\alpha_k\hat{p}_k),
$$
or
$$
(\hat{b}-\hat{A}\hat{x}_k)-\alpha_k\hat{A}\hat{p}_k,
$$
which gives
$$
\hat{r}_{k+1}=\hat{r}_k-\hat{A}\hat{p}_{k},
$$
|
github_jupyter
|
%matplotlib inline
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
x = 2*np.random.rand(100,1)
y = 4+3*x+np.random.randn(100,1)
xb = np.c_[np.ones((100,1)), x]
beta_linreg = np.linalg.inv(xb.T.dot(xb)).dot(xb.T).dot(y)
print(beta_linreg)
beta = np.random.randn(2,1)
eta = 0.1
Niterations = 1000
m = 100
for iter in range(Niterations):
gradients = 2.0/m*xb.T.dot(xb.dot(beta)-y)
beta -= eta*gradients
print(beta)
xnew = np.array([[0],[2]])
xbnew = np.c_[np.ones((2,1)), xnew]
ypredict = xbnew.dot(beta)
ypredict2 = xbnew.dot(beta_linreg)
plt.plot(xnew, ypredict, "r-")
plt.plot(xnew, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Gradient descent example')
plt.show()
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
x = 2*np.random.rand(100,1)
y = 4+3*x+np.random.randn(100,1)
xb = np.c_[np.ones((100,1)), x]
beta_linreg = np.linalg.inv(xb.T.dot(xb)).dot(xb.T).dot(y)
print(beta_linreg)
sgdreg = SGDRegressor(n_iter = 50, penalty=None, eta0=0.1)
sgdreg.fit(x,y.ravel())
print(sgdreg.intercept_, sgdreg.coef_)
import numpy as np
"""
The following setup is just a suggestion, feel free to write it the way you like.
"""
#Setup problem described in the exercise
N = 100 #Nr of datapoints
M = 2 #Nr of features
x = np.random.rand(N) #Uniformly generated x-values in [0,1]
y = 5*x**2 + 0.1*np.random.randn(N)
X = np.c_[np.ones(N),x] #Construct design matrix
#Compute beta according to normal equations to compare with GD solution
Xt_X_inv = np.linalg.inv(np.dot(X.T,X))
Xt_y = np.dot(X.transpose(),y)
beta_NE = np.dot(Xt_X_inv,Xt_y)
print(beta_NE)
import numpy as np
"""
The following setup is just a suggestion, feel free to write it the way you like.
"""
#Setup problem described in the exercise
N = 100 #Nr of datapoints
M = 2 #Nr of features
x = np.random.rand(N)
y = 5*x**2 + 0.1*np.random.randn(N)
#Compute analytic beta for Ridge regression
X = np.c_[np.ones(N),x]
XT_X = np.dot(X.T,X)
l = 0.1 #Ridge parameter lambda
Id = np.eye(XT_X.shape[0])
Z = np.linalg.inv(XT_X+l*Id)
beta_ridge = np.dot(Z,np.dot(X.T,y))
print(beta_ridge)
print(np.linalg.norm(beta_ridge)) #||beta||
import numpy as np
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 10 #number of epochs
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for
j += 1
import numpy as np
def step_length(t,t0,t1):
return t0/(t+t1)
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 500 #number of epochs
t0 = 1.0
t1 = 10
gamma_j = t0/t1
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for beta
t = epoch*m+i
gamma_j = step_length(t,t0,t1)
j += 1
print("gamma_j after %d epochs: %g" % (n_epochs,gamma_j))
| 0.434461 | 0.989801 |
# System Design Primer
## Scalability Basics
### Vertical Scaling - upgrade machine to have more RAM, cores, disks, etc.
### Horizontal Scaling - getting more machines
In order to scale horizontally, we want to make sure we have the same codebase on all our servers. So how can we change code on all servers at once?
Using **clones**. Once we make one server, we can create an image (in AWS terms an AMI) which all our servers will launch from whenever they get spun up.
### Databases
**Replication** means replicating the database contents to improve availability. This concept is similar to **denormalization**, a process to speed up SQL reads at the cost of SQL writes.
To improve scalability, we can switch to a NoSQL database and just perform more complex queries in application code.
**Partitioning** - splitting the database into multiple pieces
* Caching - a way of storing information for faster retrieval
Method 1: Cached queries - hash a DB query and save it in the cache, but makes it hard to delete items
Method 2: Cached objects - store entire objects in cache, making asynchronous processing possible
### Load Balancing
Away of distributing queries to servers to balance the load
### Reverse Proxy
Similar to a load balancer, a reverse proxy is a middleman between the client and server. They are also used in the case of a single server to improve performance, security, and flexibility.
### Asynchronism
Method 1: Precomputing and turning dynamic content into static content to be delivered quickly. But, this doesn't work if user wants something customized.
Method 2: Use a queue to maintain a list running and completed jobs.
## CAP Theorem
A distributed can support only 2 of the following:
1) Consistency - every read receives the most recent write
2) Availability - every request receives a response (that may not be the most recent)
3) Partition tolerance - system works even if network fails
Partition tolerance is a must since networks are unreliable, so we have to give up C or A.
CP - if we want atomic reads/writes guaranteed
AP - system MUST keep working
## Domain Name System (DNS)
How to translate a domain name (www.example.com) to an IP address?
1) User asks for domain name
2) Local DNS server asks root DNS server for IP of domain name
3) Root DNS server sends back response
4) Local DNS servers will save response in cache and forward to user
5) User will get IP, cache the IP, and ask IP directly
## Content Delivery Network (CDN)
Servers located across the world that serve (usually) static content. Benefits include:
1) reduce the amount of requests your server needs to handle
2) allow users to obtain content more quickly due to proximity
## Databases (in-depth)
#### Relational Database Management System (RDBMS)
SQL Databases that are structured like tables. They follow the ACID (atomicity, consistency, isolation, and durability) principle.
How to scale an RDBMS?
- Master-Slave Replication
Master serves reads and writes and copies data to slaves. Slaves only serve reads.
Now, if a master goes down, we're in read-only mode. We also need a load balancer to handle reads.
- Master-Master Replication
Multiple masters to serve both reads and writes. They communicate to one another.
Requires a lot of extra logic on how to serve requests and communication.
- Federation
Separate database by service functionality (e.g. forums, users, products). This reduces the load on each database server.
- Sharding
Similar to federation, except don't break servers apart by functionality.
- Denormalization
Make multiple copies of the data to reduce time to reduce the number of joins when serving requests. Improves read performance at the cost of write performance.
- SQL Tuning
Improving database schema.
#### NoSQL
Data is stored as keys and values (like a Python dictionary or JSON). Data is denormalized and joins are now performed in the application layer.
### SQL vs. NoSQL
SQL pros:
- structured data
- more established
- want data to be consistent
- easy to scale
NoSQL pros:
- flexible data
- fast queries
## Communication
### HTTP
Method of transmiting data between a client and a server. Operates via TCP (less time critical, but reliable) or UDP (lower latency, not guaranteed)
### RPC
Method of asking a server to perform a procedure for the client (like AWS lambda).
|
github_jupyter
|
# System Design Primer
## Scalability Basics
### Vertical Scaling - upgrade machine to have more RAM, cores, disks, etc.
### Horizontal Scaling - getting more machines
In order to scale horizontally, we want to make sure we have the same codebase on all our servers. So how can we change code on all servers at once?
Using **clones**. Once we make one server, we can create an image (in AWS terms an AMI) which all our servers will launch from whenever they get spun up.
### Databases
**Replication** means replicating the database contents to improve availability. This concept is similar to **denormalization**, a process to speed up SQL reads at the cost of SQL writes.
To improve scalability, we can switch to a NoSQL database and just perform more complex queries in application code.
**Partitioning** - splitting the database into multiple pieces
* Caching - a way of storing information for faster retrieval
Method 1: Cached queries - hash a DB query and save it in the cache, but makes it hard to delete items
Method 2: Cached objects - store entire objects in cache, making asynchronous processing possible
### Load Balancing
Away of distributing queries to servers to balance the load
### Reverse Proxy
Similar to a load balancer, a reverse proxy is a middleman between the client and server. They are also used in the case of a single server to improve performance, security, and flexibility.
### Asynchronism
Method 1: Precomputing and turning dynamic content into static content to be delivered quickly. But, this doesn't work if user wants something customized.
Method 2: Use a queue to maintain a list running and completed jobs.
## CAP Theorem
A distributed can support only 2 of the following:
1) Consistency - every read receives the most recent write
2) Availability - every request receives a response (that may not be the most recent)
3) Partition tolerance - system works even if network fails
Partition tolerance is a must since networks are unreliable, so we have to give up C or A.
CP - if we want atomic reads/writes guaranteed
AP - system MUST keep working
## Domain Name System (DNS)
How to translate a domain name (www.example.com) to an IP address?
1) User asks for domain name
2) Local DNS server asks root DNS server for IP of domain name
3) Root DNS server sends back response
4) Local DNS servers will save response in cache and forward to user
5) User will get IP, cache the IP, and ask IP directly
## Content Delivery Network (CDN)
Servers located across the world that serve (usually) static content. Benefits include:
1) reduce the amount of requests your server needs to handle
2) allow users to obtain content more quickly due to proximity
## Databases (in-depth)
#### Relational Database Management System (RDBMS)
SQL Databases that are structured like tables. They follow the ACID (atomicity, consistency, isolation, and durability) principle.
How to scale an RDBMS?
- Master-Slave Replication
Master serves reads and writes and copies data to slaves. Slaves only serve reads.
Now, if a master goes down, we're in read-only mode. We also need a load balancer to handle reads.
- Master-Master Replication
Multiple masters to serve both reads and writes. They communicate to one another.
Requires a lot of extra logic on how to serve requests and communication.
- Federation
Separate database by service functionality (e.g. forums, users, products). This reduces the load on each database server.
- Sharding
Similar to federation, except don't break servers apart by functionality.
- Denormalization
Make multiple copies of the data to reduce time to reduce the number of joins when serving requests. Improves read performance at the cost of write performance.
- SQL Tuning
Improving database schema.
#### NoSQL
Data is stored as keys and values (like a Python dictionary or JSON). Data is denormalized and joins are now performed in the application layer.
### SQL vs. NoSQL
SQL pros:
- structured data
- more established
- want data to be consistent
- easy to scale
NoSQL pros:
- flexible data
- fast queries
## Communication
### HTTP
Method of transmiting data between a client and a server. Operates via TCP (less time critical, but reliable) or UDP (lower latency, not guaranteed)
### RPC
Method of asking a server to perform a procedure for the client (like AWS lambda).
| 0.782746 | 0.852629 |
<h2 id="Modern-Portfolio-Theory">Modern Portfolio Theory<a class="anchor-link" href="#Modern-Portfolio-Theory">¶</a></h2>
<p>Modern portfolio theory also popularly called as <strong><code>Mean-Variance Portofolio Theory</code> (MVP)</strong> is a major breakthrough in finance. It is based on the premises that returns are <strong>normally distributed</strong> and by looking at mean and variance, we can essentialy describe the distribution of end-of-period wealth.</p>
<p>The basic idea of this theory is to achieve diversification by constructuing portfolio for a minimal portfolio risk or maximal portfolio returns given a certain level of risk. Accordingly, the <strong>Efficient Frontier</strong> is a set of optimal portfolios in the risk-return spectrum and portfolios located under the Efficient Frontier curve are considered sub-optimal.</p>
<p>This means that the portfolios on the frontier offer</p>
<ul>
<li>Highest expected return for a given level of risk</li>
<li>Lowest level of risk for a given level of expected returns</li>
</ul>
<p>In essence, the investors goal should be to select a level of risk that he/she is comfortable with and then find a portfolio that maximizes returns based on the selected risk level.</p>
<h3 id="Import-libraries">Import libraries<a class="anchor-link" href="#Import-libraries">¶</a></h3>
```
import pandas as pd
import xlwings as xw
import numpy as np
from numpy import *
from numpy.linalg import multi_dot
import matplotlib.pyplot as plt
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 16, 8
from openpyxl import Workbook, load_workbook
```
<p>We will use the FAANG stocks as before to build our portfolio</p>
```
# FAANG stocks
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'NFLX' ]
numofasset = len(symbols)
numofportfolio = 5000
```
<h3 id="Retrive-Data">Retrive Data<a class="anchor-link" href="#Retrive-Data">¶</a></h3>
```
# Load locally stored data
df = pd.read_csv('faang_stocks.csv', index_col=0, parse_dates=True)['2013':]
# Check first 5 values
df.head()
```
<h3 id="View-Data-in-Excel">View Data in Excel<a class="anchor-link" href="#View-Data-in-Excel">¶</a></h3>
```
# View data in Excel
xw.view(df)
# Create a new Excel workbook
wb = xw.Book(r'portfolio.xlsx') # Book by full name
```
<h3 id="Descriptive-Statistics">Descriptive Statistics<a class="anchor-link" href="#Descriptive-Statistics">¶</a></h3>
```
summary = df.describe().T
summary
```
<h3 id="Visualize-Data">Visualize Data<a class="anchor-link" href="#Visualize-Data">¶</a></h3>
```
# Visualize the data
fig = plt.figure(figsize=(16,8))
ax = plt.axes()
ax.set_title('Normalized Price Plot')
ax.plot(df[-252:]/df.iloc[-252] * 100)
ax.legend(df.columns, loc='upper left')
ax.grid(True)
```
<h3 id="Calculate-Returns">Calculate Returns<a class="anchor-link" href="#Calculate-Returns">¶</a></h3>
```
# Calculate returns
returns = df.pct_change().fillna(0)
returns.head()
```
<h4 id="Annualized-Returns">Annualized Returns<a class="anchor-link" href="#Annualized-Returns">¶</a></h4><p>In <strong>MVP</strong>, the average returns play an important role as they are used to approximate the expected returns.</p>
```
# Calculate annual returns
annual_returns = (returns.mean() * 252)
annual_returns
# Visualize the data
fig = plt.figure()
ax =plt.axes()
ax.bar(annual_returns.index, annual_returns*100, color='royalblue', alpha=0.75)
ax.set_title('Annualized Returns (in %)');
```
<h3 id="Calculate-Volatility">Calculate Volatility<a class="anchor-link" href="#Calculate-Volatility">¶</a></h3>
```
vols = returns.std()
vols
```
<h4 id="Annualized-Volatilities">Annualized Volatilities<a class="anchor-link" href="#Annualized-Volatilities">¶</a></h4>
```
# Calculate annualized volatilities
annual_vols = vols*sqrt(252)
annual_vols
# Visualize the data
fig = plt.figure()
ax = plt.axes()
ax.bar(annual_vols.index, annual_vols*100, color='orange', alpha=0.5)
ax.set_title('Annualized Volatility (in %)');
```
<h2 id="Portfolio-Statistics">Portfolio Statistics<a class="anchor-link" href="#Portfolio-Statistics">¶</a></h2><p>Consider a portfolio fully invested in risky assets. Let $w$ and $\mu$ be the vector of weights and mean returns of <em>n</em> assets. <br/><br/></p>
$$\ {w=}\left(
\begin{array}{c}
w_1 \\
w_2 \\
\vdots \\
w_n \\
\end{array}%
\right);
\ \mathbf{\mu=}\left(
\begin{array}{ccc}
\mu_1 \\
\mu_2 \\
\vdots \\
\mu_n \\
\end{array}%
\right)$$<p></p>
<p>where the $\sum_{i=1}^{n}w_i=1$</p>
<p><strong>Expected Portfolio Return</strong> is then the dot product of the expected returns and their weights. <br/><br/></p>
$$\mu_\pi = w^T\cdot\mu$$<p>which is also equivalent to the $\Sigma_{i=1}^{n}w_i\mu_i$</p>
<p><strong>Expected Portfolio Variance</strong> is then the multidot product of weights and the covariance matrix. <br/><br/></p>
$$\sigma^2_\pi = w^T\cdot\Sigma\cdot w $$<p>where, ${\Sigma}$ is the covariance matrix</p>
$${\Sigma=}\left(
\begin{array}{ccc}
\Sigma_{1,1} & \dots & \Sigma_{1,n} \\
\vdots & \ddots & \vdots \\
\Sigma_{n,1} & \dots & \Sigma_{n,n} \\ %
\end{array}%
\right)$$
<h3 id="Equal-Weighted-Portfolio">Equal Weighted Portfolio<a class="anchor-link" href="#Equal-Weighted-Portfolio">¶</a></h3><p>Assume a portoflio composed of all five stocks with equal weighting. We will now calculate the portfolio statistics.</p>
```
wts = numofasset * [1./numofasset]
array(wts).shape
wts = array(wts)[:,newaxis]
wts
wts.shape
```
<h3 id="Portfolio-Return">Portfolio Return<a class="anchor-link" href="#Portfolio-Return">¶</a></h3>
```
array(returns.mean() * 252)[:,newaxis]
array(returns.mean() * 252)[:,newaxis].shape
# Portfolio returns
wts.T @ array(returns.mean() * 252)[:,newaxis]
```
<h3 id="Portfolio-Volatility">Portfolio Volatility<a class="anchor-link" href="#Portfolio-Volatility">¶</a></h3>
```
# Covariance matrix
returns.cov() * 252
# Portfolio variance
multi_dot([wts.T,returns.cov()*252,wts])
# Portfolio volatility
sqrt(multi_dot([wts.T,returns.cov()*252,wts]))
```
<h3 id="Portfolio-statistics">Portfolio statistics<a class="anchor-link" href="#Portfolio-statistics">¶</a></h3><p>Let's subsume key statistics into a function which can be used for optimization exercise.</p>
```
def portfolio_stats(weights):
weights = array(weights)[:,newaxis]
port_rets = weights.T @ array(returns.mean() * 252)[:,newaxis]
port_vols = sqrt(multi_dot([weights.T, returns.cov() * 252, weights]))
return np.array([port_rets, port_vols, port_rets/port_vols]).flatten()
pip install pyfolio
import pyfolio as pf
ewp = returns@wts
ewp.columns =['ret']
ewp.cumsum().iloc[-1]
pf.create_simple_tear_sheet(ewp['ret'])
plt.plot((1+ewp['ret']).cumprod())
```
<h2 id="Portfolio-Simulation">Portfolio Simulation<a class="anchor-link" href="#Portfolio-Simulation">¶</a></h2><p>Now, we will implement a Monte Carlo simulation to generate random portfolio weights on a larger scale and calculate the expected portfolio return, variance and sharpe ratio for every simulated allocation. We will then identify the portfolio with a highest return for per unit of risk.</p>
```
w = random.random(numofasset)[:, newaxis]
w
w /= sum(w)
w
w.shape, sum(w)
w.flatten()
# Initialize the lists
rets = []; vols = []; wts = []
# Simulate 5,000 portfolios
for i in range (5000):
# Generate random weights
weights = random.random(numofasset)[:, newaxis]
# Set weights such that sum of weights equals 1
weights /= sum(weights)
# Portfolio statistics
rets.append(weights.T @ array(returns.mean() * 252)[:, newaxis])
vols.append(sqrt(multi_dot([weights.T, returns.cov()*252, weights])))
wts.append(weights.flatten())
# Record values
port_rets = array(rets).flatten()
port_vols = array(vols).flatten()
port_wts = array(wts)
port_rets
port_vols
port_wts
port_rets.shape, port_vols.shape, port_wts.shape
# Create a dataframe for analysis
mc_df = pd.DataFrame({'returns': port_rets,
'volatility': port_vols,
'sharpe_ratio': port_rets/port_vols,
'weights': list(port_wts)})
mc_df.head()
```
<h3 id="Summary-Statistics">Summary Statistics<a class="anchor-link" href="#Summary-Statistics">¶</a></h3>
```
# Summary Statistics
mc_df.describe().T
```
<h3 id="Maximum-Sharpe-Ratio-Portfolio">Maximum Sharpe Ratio Portfolio<a class="anchor-link" href="#Maximum-Sharpe-Ratio-Portfolio">¶</a></h3>
```
# Max sharpe ratio portfolio
msrp = mc_df.iloc[mc_df['sharpe_ratio'].idxmax()]
msrp
# Max sharpe ratio portfolio weights
max_sharpe_port_wts = mc_df['weights'][mc_df['sharpe_ratio'].idxmax()]
# Allocation to achieve max sharpe ratio portfolio
dict(zip(symbols,np.around(max_sharpe_port_wts*100,2)))
```
<h3 id="Visulaize-Simulated-Portfolio">Visulaize Simulated Portfolio<a class="anchor-link" href="#Visulaize-Simulated-Portfolio">¶</a></h3>
```
# Visualize the simulated portfolio for risk and return
fig = plt.figure()
ax = plt.axes()
ax.set_title('Monte Carlo Simulated Allocation')
# Simulated portfolios
fig.colorbar(ax.scatter(port_vols, port_rets, c=port_rets / port_vols,
marker='o', cmap='RdYlGn', edgecolors='black'), label='Sharpe Ratio')
# Maximum sharpe ratio portfolio
ax.scatter(msrp['volatility'], msrp['returns'], c='red', marker='*', s = 300, label='Max Sharpe Ratio')
ax.set_xlabel('Expected Volatility')
ax.set_ylabel('Expected Return')
ax.grid(True)
```
<h2 id="Efficient-Frontier">Efficient Frontier<a class="anchor-link" href="#Efficient-Frontier">¶</a></h2>
<p>The Efficient Frontier is formed by a set of portfolios offering the highest expected portfolio return for a certain volatility or offering the lowest volatility for a certain level of expected returns.</p>
<p><strong>Return objective</strong>:</p>
$$\underset{w_1,w_2,\dots,w_n}{minimize} \space\space \sigma^2_{p}(w_1,w_2,\dots,w_n)$$<p>subject to,</p>
$$E[R_p] = m$$<p><strong>Risk constraint</strong>:</p>
$$\underset{w_1,w_2,\dots,w_n}{maximize} \space\space E[R_p(w_1,w_2,\dots,w_n)]$$<p>subject to,</p>
$$\sigma^2_{p}(w_1,w_2,\dots,w_n)=v^2$$<p>where, $\sum_{i=1}^{n}w_i=1$ for the above objectives.</p>
<p>We can use numerical optimization to achieve this objective. The goal of optimization is to find the optimal value of the objective function by adjusting the target variables operating withing some boundary conditions and constraints.</p>
<h3 id="Constrained-Optimization">Constrained Optimization<a class="anchor-link" href="#Constrained-Optimization">¶</a></h3><p>Construction of optimal portfolios is a constrained optimization problem where we specify some boundary conditions and constraints. The objective function here is a function returning maximum sharpe ratio, minimum variance (volatility) and the target variables are portfolio weights. We will use the <em><code>minimize</code></em> function from <code>scipy</code> optimization module to achieve our objective.</p>
<blockquote><div class="highlight"><pre><span></span><span class="n">sco</span><span class="o">.</span><span class="n">minimize</span><span class="p">(</span><span class="n">fun</span><span class="p">,</span> <span class="n">x0</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(),</span> <span class="n">method</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">jac</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">hess</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">hessp</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">bounds</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">constraints</span><span class="o">=</span><span class="p">(),</span> <span class="n">tol</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">callback</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">options</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
</pre></div>
</blockquote>
```
# Import optimization module from scipy
import scipy.optimize as sco
```
<h4 id="Maximum-sharpe-ratio-portfolio">Maximum sharpe ratio portfolio<a class="anchor-link" href="#Maximum-sharpe-ratio-portfolio">¶</a></h4>
```
# Maximizing sharpe ratio
def min_sharpe_ratio(weights):
return -portfolio_stats(weights)[2] # asigning minus 1
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bnds = tuple((0, 1) for x in range(numofasset))
initial_wts = numofasset*[1./numofasset]
# Optimizing for maximum sharpe ratio
opt_sharpe = sco.minimize(min_sharpe_ratio, initial_wts, method='SLSQP', bounds=bnds, constraints=cons)
opt_sharpe
# Portfolio weights
list(zip(symbols,np.around(opt_sharpe['x']*100,2)))
# Portfolio stats
stats = ['Returns', 'Volatility', 'Sharpe Ratio']
list(zip(stats,np.around(portfolio_stats(opt_sharpe['x']),4)))
```
<h4 id="Minumum-variance-portfolio">Minumum variance portfolio<a class="anchor-link" href="#Minumum-variance-portfolio">¶</a></h4>
```
# Minimize the variance
def min_variance(weights):
return portfolio_stats(weights)[1]**2
# Optimizing for minimum variance
opt_var = sco.minimize(min_variance, initial_wts, method='SLSQP', bounds=bnds, constraints=cons)
opt_var
# Portfolio weights
list(zip(symbols,np.around(opt_var['x']*100,2)))
# Portfolio stats
list(zip(stats,np.around(portfolio_stats(opt_var['x']),4)))
```
<h4 id="Efficient-Frontier-portfolio">Efficient Frontier portfolio<a class="anchor-link" href="#Efficient-Frontier-portfolio">¶</a></h4><p>For efficient frontier portfolios, we fix a target return and derive for objective function.</p>
```
# Minimize the volatility
def min_volatility(weights):
return portfolio_stats(weights)[1]
targetrets = linspace(0.22,0.50,100)
tvols = []
for tr in targetrets:
ef_cons = ({'type': 'eq', 'fun': lambda x: portfolio_stats(x)[0] - tr},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
opt_ef = sco.minimize(min_volatility, initial_wts, method='SLSQP', bounds=bnds, constraints=ef_cons)
tvols.append(opt_ef['fun'])
targetvols = array(tvols)
# Visualize the simulated portfolio for risk and return
fig = plt.figure()
ax = plt.axes()
ax.set_title('Efficient Frontier Portfolio')
# Efficient Frontier
fig.colorbar(ax.scatter(targetvols, targetrets, c=targetrets / targetvols,
marker='x', cmap='RdYlGn', edgecolors='black'), label='Sharpe Ratio')
# Maximum Sharpe Portfolio
ax.plot(portfolio_stats(opt_sharpe['x'])[1], portfolio_stats(opt_sharpe['x'])[0], 'r*', markersize =15.0)
# Minimum Variance Portfolio
ax.plot(portfolio_stats(opt_var['x'])[1], portfolio_stats(opt_var['x'])[0], 'b*', markersize =15.0)
ax.set_xlabel('Expected Volatility')
ax.set_ylabel('Expected Return')
ax.grid(True)
```
<h1 id="References">References<a class="anchor-link" href="#References">¶</a></h1><ul>
<li><p>Numpy linear algebra documentation <a href="https://numpy.org/doc/stable/reference/routines.linalg.html">https://numpy.org/doc/stable/reference/routines.linalg.html</a></p>
</li>
<li><p>Scipy optimization function documentation <a href="https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html">https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html</a></p>
</li>
<li><p>Xlwings documentation <a href="https://docs.xlwings.org/en/stable/index.html">https://docs.xlwings.org/en/stable/index.html</a></p>
</li>
<li><p>Yves Hilpisch (2018), Python For Finance: Analyze Big Financial Data</p>
</li>
</ul>
|
github_jupyter
|
import pandas as pd
import xlwings as xw
import numpy as np
from numpy import *
from numpy.linalg import multi_dot
import matplotlib.pyplot as plt
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 16, 8
from openpyxl import Workbook, load_workbook
# FAANG stocks
symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'NFLX' ]
numofasset = len(symbols)
numofportfolio = 5000
# Load locally stored data
df = pd.read_csv('faang_stocks.csv', index_col=0, parse_dates=True)['2013':]
# Check first 5 values
df.head()
# View data in Excel
xw.view(df)
# Create a new Excel workbook
wb = xw.Book(r'portfolio.xlsx') # Book by full name
summary = df.describe().T
summary
# Visualize the data
fig = plt.figure(figsize=(16,8))
ax = plt.axes()
ax.set_title('Normalized Price Plot')
ax.plot(df[-252:]/df.iloc[-252] * 100)
ax.legend(df.columns, loc='upper left')
ax.grid(True)
# Calculate returns
returns = df.pct_change().fillna(0)
returns.head()
# Calculate annual returns
annual_returns = (returns.mean() * 252)
annual_returns
# Visualize the data
fig = plt.figure()
ax =plt.axes()
ax.bar(annual_returns.index, annual_returns*100, color='royalblue', alpha=0.75)
ax.set_title('Annualized Returns (in %)');
vols = returns.std()
vols
# Calculate annualized volatilities
annual_vols = vols*sqrt(252)
annual_vols
# Visualize the data
fig = plt.figure()
ax = plt.axes()
ax.bar(annual_vols.index, annual_vols*100, color='orange', alpha=0.5)
ax.set_title('Annualized Volatility (in %)');
wts = numofasset * [1./numofasset]
array(wts).shape
wts = array(wts)[:,newaxis]
wts
wts.shape
array(returns.mean() * 252)[:,newaxis]
array(returns.mean() * 252)[:,newaxis].shape
# Portfolio returns
wts.T @ array(returns.mean() * 252)[:,newaxis]
# Covariance matrix
returns.cov() * 252
# Portfolio variance
multi_dot([wts.T,returns.cov()*252,wts])
# Portfolio volatility
sqrt(multi_dot([wts.T,returns.cov()*252,wts]))
def portfolio_stats(weights):
weights = array(weights)[:,newaxis]
port_rets = weights.T @ array(returns.mean() * 252)[:,newaxis]
port_vols = sqrt(multi_dot([weights.T, returns.cov() * 252, weights]))
return np.array([port_rets, port_vols, port_rets/port_vols]).flatten()
pip install pyfolio
import pyfolio as pf
ewp = returns@wts
ewp.columns =['ret']
ewp.cumsum().iloc[-1]
pf.create_simple_tear_sheet(ewp['ret'])
plt.plot((1+ewp['ret']).cumprod())
w = random.random(numofasset)[:, newaxis]
w
w /= sum(w)
w
w.shape, sum(w)
w.flatten()
# Initialize the lists
rets = []; vols = []; wts = []
# Simulate 5,000 portfolios
for i in range (5000):
# Generate random weights
weights = random.random(numofasset)[:, newaxis]
# Set weights such that sum of weights equals 1
weights /= sum(weights)
# Portfolio statistics
rets.append(weights.T @ array(returns.mean() * 252)[:, newaxis])
vols.append(sqrt(multi_dot([weights.T, returns.cov()*252, weights])))
wts.append(weights.flatten())
# Record values
port_rets = array(rets).flatten()
port_vols = array(vols).flatten()
port_wts = array(wts)
port_rets
port_vols
port_wts
port_rets.shape, port_vols.shape, port_wts.shape
# Create a dataframe for analysis
mc_df = pd.DataFrame({'returns': port_rets,
'volatility': port_vols,
'sharpe_ratio': port_rets/port_vols,
'weights': list(port_wts)})
mc_df.head()
# Summary Statistics
mc_df.describe().T
# Max sharpe ratio portfolio
msrp = mc_df.iloc[mc_df['sharpe_ratio'].idxmax()]
msrp
# Max sharpe ratio portfolio weights
max_sharpe_port_wts = mc_df['weights'][mc_df['sharpe_ratio'].idxmax()]
# Allocation to achieve max sharpe ratio portfolio
dict(zip(symbols,np.around(max_sharpe_port_wts*100,2)))
# Visualize the simulated portfolio for risk and return
fig = plt.figure()
ax = plt.axes()
ax.set_title('Monte Carlo Simulated Allocation')
# Simulated portfolios
fig.colorbar(ax.scatter(port_vols, port_rets, c=port_rets / port_vols,
marker='o', cmap='RdYlGn', edgecolors='black'), label='Sharpe Ratio')
# Maximum sharpe ratio portfolio
ax.scatter(msrp['volatility'], msrp['returns'], c='red', marker='*', s = 300, label='Max Sharpe Ratio')
ax.set_xlabel('Expected Volatility')
ax.set_ylabel('Expected Return')
ax.grid(True)
# Import optimization module from scipy
import scipy.optimize as sco
# Maximizing sharpe ratio
def min_sharpe_ratio(weights):
return -portfolio_stats(weights)[2] # asigning minus 1
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bnds = tuple((0, 1) for x in range(numofasset))
initial_wts = numofasset*[1./numofasset]
# Optimizing for maximum sharpe ratio
opt_sharpe = sco.minimize(min_sharpe_ratio, initial_wts, method='SLSQP', bounds=bnds, constraints=cons)
opt_sharpe
# Portfolio weights
list(zip(symbols,np.around(opt_sharpe['x']*100,2)))
# Portfolio stats
stats = ['Returns', 'Volatility', 'Sharpe Ratio']
list(zip(stats,np.around(portfolio_stats(opt_sharpe['x']),4)))
# Minimize the variance
def min_variance(weights):
return portfolio_stats(weights)[1]**2
# Optimizing for minimum variance
opt_var = sco.minimize(min_variance, initial_wts, method='SLSQP', bounds=bnds, constraints=cons)
opt_var
# Portfolio weights
list(zip(symbols,np.around(opt_var['x']*100,2)))
# Portfolio stats
list(zip(stats,np.around(portfolio_stats(opt_var['x']),4)))
# Minimize the volatility
def min_volatility(weights):
return portfolio_stats(weights)[1]
targetrets = linspace(0.22,0.50,100)
tvols = []
for tr in targetrets:
ef_cons = ({'type': 'eq', 'fun': lambda x: portfolio_stats(x)[0] - tr},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
opt_ef = sco.minimize(min_volatility, initial_wts, method='SLSQP', bounds=bnds, constraints=ef_cons)
tvols.append(opt_ef['fun'])
targetvols = array(tvols)
# Visualize the simulated portfolio for risk and return
fig = plt.figure()
ax = plt.axes()
ax.set_title('Efficient Frontier Portfolio')
# Efficient Frontier
fig.colorbar(ax.scatter(targetvols, targetrets, c=targetrets / targetvols,
marker='x', cmap='RdYlGn', edgecolors='black'), label='Sharpe Ratio')
# Maximum Sharpe Portfolio
ax.plot(portfolio_stats(opt_sharpe['x'])[1], portfolio_stats(opt_sharpe['x'])[0], 'r*', markersize =15.0)
# Minimum Variance Portfolio
ax.plot(portfolio_stats(opt_var['x'])[1], portfolio_stats(opt_var['x'])[0], 'b*', markersize =15.0)
ax.set_xlabel('Expected Volatility')
ax.set_ylabel('Expected Return')
ax.grid(True)
| 0.745954 | 0.986218 |
# Happy 2017
We have some GPX files which we want to plot on a nice map.
A lot of the the code below is based on [this python4oceanographers blog post](https://ocefpaf.github.io/python4oceanographers/blog/2014/08/18/gpx/).
First we import some packages necessary to achieve what we want.
```
import matplotlib.pyplot as plt
%matplotlib inline
import numpy
import gpxpy
import mplleaflet
import glob
import os
import pandas
plt.rcParams['figure.figsize'] = (16, 9) # Size up figures a bit
```
To quickly ingest the GPX files, we write (or actually copy from the aforementioned blog post) a parsing function.
```
def load_run_data(gpx_path, filter=""):
gpx_files = glob.glob(os.path.join(gpx_path, filter + "*.gpx"))
run_data = []
for file_idx, gpx_file in enumerate(gpx_files):
gpx = gpxpy.parse(open(gpx_file, 'r'))
# Loop through tracks
for track_idx, track in enumerate(gpx.tracks):
track_name = track.name
track_time = track.get_time_bounds().start_time
track_length = track.length_3d()
track_duration = track.get_duration()
track_speed = track.get_moving_data().max_speed
for seg_idx, segment in enumerate(track.segments):
segment_length = segment.length_3d()
for point_idx, point in enumerate(segment.points):
run_data.append([file_idx, os.path.basename(gpx_file), track_idx, track_name,
track_time, track_length, track_duration, track_speed,
seg_idx, segment_length, point.time, point.latitude,
point.longitude, point.elevation, segment.get_speed(point_idx)])
return run_data
```
For easy handling, we load the GPX tracks in their subfolder to a [Pandas](https://pypi.python.org/pypi/pandas) `dataframe` and display the first few entries of the data frame.
```
data = load_run_data(gpx_path='gpxtracks', filter="")
df = pandas.DataFrame(data, columns=['File_Index', 'File_Name', 'Index', 'Name',
'Time', 'Length', 'Duration', 'Max_Speed',
'Segment_Index', 'Segment_Length', 'Point_Time', 'Point_Latitude',
'Point_Longitude', 'Point_Elevation', 'Point_Speed'])
df.head()
```
At last we plot the tracks on top of the *awesome* [Stamen watercolor maps](http://maps.stamen.com/watercolor/#12/46.9413/7.4310).
```
for i in range(max(df['File_Index'])+1):
plt.plot(df['Point_Longitude'][df['File_Index']==i],
df['Point_Latitude'][df['File_Index']==i],
color='black', linewidth=7)
mplleaflet.display(tiles='stamen_wc')
# Save as HTML page, so we can print the image big...
for i in range(max(df['File_Index'])+1):
plt.plot(df['Point_Longitude'][df['File_Index']==i],
df['Point_Latitude'][df['File_Index']==i],
color='black', linewidth=7)
mplleaflet.save_html(tiles='stamen_wc', fileobj='2017.html')
```
|
github_jupyter
|
import matplotlib.pyplot as plt
%matplotlib inline
import numpy
import gpxpy
import mplleaflet
import glob
import os
import pandas
plt.rcParams['figure.figsize'] = (16, 9) # Size up figures a bit
def load_run_data(gpx_path, filter=""):
gpx_files = glob.glob(os.path.join(gpx_path, filter + "*.gpx"))
run_data = []
for file_idx, gpx_file in enumerate(gpx_files):
gpx = gpxpy.parse(open(gpx_file, 'r'))
# Loop through tracks
for track_idx, track in enumerate(gpx.tracks):
track_name = track.name
track_time = track.get_time_bounds().start_time
track_length = track.length_3d()
track_duration = track.get_duration()
track_speed = track.get_moving_data().max_speed
for seg_idx, segment in enumerate(track.segments):
segment_length = segment.length_3d()
for point_idx, point in enumerate(segment.points):
run_data.append([file_idx, os.path.basename(gpx_file), track_idx, track_name,
track_time, track_length, track_duration, track_speed,
seg_idx, segment_length, point.time, point.latitude,
point.longitude, point.elevation, segment.get_speed(point_idx)])
return run_data
data = load_run_data(gpx_path='gpxtracks', filter="")
df = pandas.DataFrame(data, columns=['File_Index', 'File_Name', 'Index', 'Name',
'Time', 'Length', 'Duration', 'Max_Speed',
'Segment_Index', 'Segment_Length', 'Point_Time', 'Point_Latitude',
'Point_Longitude', 'Point_Elevation', 'Point_Speed'])
df.head()
for i in range(max(df['File_Index'])+1):
plt.plot(df['Point_Longitude'][df['File_Index']==i],
df['Point_Latitude'][df['File_Index']==i],
color='black', linewidth=7)
mplleaflet.display(tiles='stamen_wc')
# Save as HTML page, so we can print the image big...
for i in range(max(df['File_Index'])+1):
plt.plot(df['Point_Longitude'][df['File_Index']==i],
df['Point_Latitude'][df['File_Index']==i],
color='black', linewidth=7)
mplleaflet.save_html(tiles='stamen_wc', fileobj='2017.html')
| 0.22431 | 0.858659 |
```
import copy
import datetime
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import bumps
import os
import math
from numpy import exp, linspace, random
from scipy.optimize import curve_fit
from scipy import stats
originpath = '../Documents/data'
path = originpath + '/conductivity'
"""
Loading in the data and meta-data, do not edit unless necessary for os differences.
"""
# sample meta-data
info_path = path + "/Sample_Info.csv"
corrected_path = path + "/Corrected_wtPercents.csv"
sample_info = pd.read_csv(info_path)
corrected_info = pd.read_csv(corrected_path)
names = {}
cps = {}
matrix = {}
solvent_names = {}
target = {} # target weight fraction of conjugated polymer
actual = {} # corrected weight fraction of conjugated polymer from UV-vis data
for index, row in sample_info.iterrows():
sam_no = row['Sample']
names[sam_no] = row['Name']
cps[sam_no] = row['Conjugated Polymer']
matrix[sam_no] = row['Commodity Polymer']
solvent_names[sam_no] = row['Solvent']
# change this if target fraction is already in wt percent, rather than wt fraction
target[sam_no] = float(row['Target Fraction'])*100
for index, row in corrected_info.iterrows():
sam_no = row['Sample']
# change this if actual fraction is already in wt percent rather than wt fraction
actual[sam_no] = float(row['average actual wt pct'])
"""
Importing conductivity data
"""
data_path = path + '/conductivity_data.csv'
#importing into dictionary: {sampleno:measurement}
con_data = {}
file = open(data_path, 'r')
the_lines = file.readlines()[2:]
for each in the_lines:
split_line = each.split(",")
con_data[split_line[0]] = float(split_line[15][:-1])
print(con_data)
# useful dictionaries with labels and colors for the plots and their legends
wt_names = {}
full_names = {}
wt_colors = {}
solvent_colors = {}
cp_colors = {}
rep_colors = {}
rep_names = {}
fraction = {}
temp_wt_colors = {
0.1: 'firebrick',
0.5: 'darkorange',
1.0: 'darkcyan',
5.0: 'mediumblue',
7.5: 'lime',
10.0: 'deeppink',
17.5: 'goldenrod',
25.0: 'darkorchid',
50.0: 'forestgreen',
0.0: 'black'
}
temp_solvent_colors = {
'Chloroform': 'firebrick',
'Bromobenzene': 'darkorange',
'Toluene': 'darkcyan',
'Slow Dry Chloroform': 'darkorchid'
}
temp_cp_colors = {
'RRe-P3HT': 'firebrick',
'RRa-P3HT': 'darkorange',
'P3DDT': 'darkorchid',
'PQT-12': 'darkcyan',
'None': 'black'
}
cp_marks = {
'RRe-P3HT': 'o',
'RRa-P3HT': '^',
'P3DDT': 's',
'PQT-12': 'D',
}
mfc = {
'RRe-P3HT':
'RRe-P3HT'
}
for key in names.keys():
if key in actual.keys():
frac = actual[key]
fraction[key] = actual[key]
else:
frac = target[key]
fraction[key] = target[key]
frac = np.round(frac,2)
if cps[key] == 'None':
wt_names[key] = matrix[key] + ' Control'
full_names[key] = matrix[key] + ' Control'
else:
wt_names[key] = str(frac) + ' wt% ' + cps[key]
full_names[key] = str(frac) + ' wt% ' + cps[key] + ' in ' + matrix[key]
for key in cps.keys():
wt_colors[key] = temp_wt_colors[target[key]]
solvent_colors[key] = temp_solvent_colors[solvent_names[key]]
cp_colors[key] = temp_cp_colors[cps[key]]
solubility = {}
solubility['Bromobenzene'] = 84.1
solubility['Chloroform'] = 14.1
solubility['Toluene'] = 0.7
solubility['Slow Dry Chloroform'] = 14.1
polarity = {}
polarity['Bromobenzene'] = 5.17
polarity['Chloroform'] = 4.81
polarity['Toluene'] = 2.38
polarity['Slow Dry Chloroform'] = 4.81
figure_path = path +'/figures'
if 'figures' in os.listdir(path):
print('figures path already exists')
else:
os.mkdir(figure_path)
"""
here, we group the conductivity data by fraction weight percent and then take avg, std dev of these groups
We then plot this average and error as a single point.
"""
con_group = {}
con_group_1 = {}
con_group_2 = {}
old =[]
new =[]
for num in con_data:
if 'e' in num:
new.append(num)
else:
old.append(num)
for sample in old:
x = fraction[sample]
y = con_data[sample]
if y > 0.000000000001:
if cps[sample] in con_group_1.keys():
a=1
else:
con_group_1[cps[sample]]= {}
if solvent_names[sample] in con_group_1[cps[sample]].keys():
a =1
else:
con_group_1[cps[sample]][solvent_names[sample]] = {}
if target[sample] in con_group_1[cps[sample]][solvent_names[sample]].keys():
con_group_1[cps[sample]][solvent_names[sample]][target[sample]].append([x,y])
else:
con_group_1[cps[sample]][solvent_names[sample]][target[sample]] =[[x,y]]
for polymer in con_group_1:
for solvent in con_group_1[polymer]:
for targ in con_group_1[polymer][solvent]:
x = np.average([item[0] for item in con_group_1[polymer][solvent][targ]])
dx = np.std([item[0] for item in con_group_1[polymer][solvent][targ]])
y = np.average([item[1] for item in con_group_1[polymer][solvent][targ]])
dy = np.std([item[1] for item in con_group_1[polymer][solvent][targ]])
con_group_1[polymer][solvent][targ] = [x, y, dx, dy]
for sample in new:
x = fraction[sample]
y = con_data[sample]
if y > 0.000000000001:
if cps[sample] in con_group_2.keys():
a=1
else:
con_group_2[cps[sample]]= {}
if solvent_names[sample] in con_group_2[cps[sample]].keys():
a =1
else:
con_group_2[cps[sample]][solvent_names[sample]] = {}
if target[sample] in con_group_2[cps[sample]][solvent_names[sample]].keys():
con_group_2[cps[sample]][solvent_names[sample]][target[sample]].append([x,y])
else:
con_group_2[cps[sample]][solvent_names[sample]][target[sample]] =[[x,y]]
for polymer in con_group_2:
for solvent in con_group_2[polymer]:
for targ in con_group_2[polymer][solvent]:
x = np.average([item[0] for item in con_group_2[polymer][solvent][targ]])
dx = np.std([item[0] for item in con_group_2[polymer][solvent][targ]])
y = np.average([item[1] for item in con_group_2[polymer][solvent][targ]])
dy = np.std([item[1] for item in con_group_2[polymer][solvent][targ]])
con_group_2[polymer][solvent][targ] = [x, y, dx, dy]
name_of_figure = 'conductivity of all RRe-P3HT'
filename = figure_path + '/' + name_of_figure
plt.figure(figsize=(6,6))
polymer = 'P3DDT'
dict1 = con_group_1
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='s', c = temp_solvent_colors[solvent],mfc = 'white', mew =2, ms=8)
plt.errorbar([-10], [-10],fmt='o', label= solvent, c = temp_solvent_colors[solvent], ms=8)
polymer = 'RRe-P3HT'
dict1 = con_group_1
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='o', c = temp_solvent_colors[solvent], ms=8)
polymer = 'RRe-P3HT'
dict1 = con_group_2
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='o', c = temp_solvent_colors[solvent],mfc='white',mew=1, ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT', c = 'black', ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT 2', c = 'black', mfc = 'white', mew =2,ms =8)
plt.errorbar([-10], [-10],fmt='s', label= 'P3DDT', c = 'black', mfc = 'white', mew =2,ms =8)
plt.xlabel('Conjugated Polymer wt%', fontsize=16)
plt.ylabel('Conductivity (S/m)', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xscale('log')
plt.yscale('log')
plt.title('Conductivity Measurements RRe-P3HT',fontsize=16)
plt.legend(fontsize=12,loc='upper left',title=None, title_fontsize=12)# ncol=3)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
waxspath= originpath + '/waxs'
peak_ratios = pd.read_csv(waxspath + '/ratios_list.csv')
peakratio = {}
fractions = {}
for index,row in peak_ratios.iterrows():
peakratio[row['sample']] = row['ratio']
fractions[row['sample']] = row['fraction']
name_of_figure = 'conductivity vs peak ratio'
filename = figure_path + '/' + name_of_figure
plt.figure(figsize=(6,6))
xlist = []
ylist = []
for sample in old:
if sample in peakratio.keys():
if con_data[sample] > 0.000000000001:
x=peakratio[sample]
y=con_data[sample]
plt.errorbar(x, y, fmt='o', c = temp_solvent_colors[solvent], mew=2, ms=8)
xlist.append(x)
ylist.append(y)
regression = stats.linregress(np.log(xlist),np.log(ylist))
print('old:',regression)
xline = np.linspace(min(xlist),max(xlist),100)
yline = np.exp(np.log(xline)*regression[0]+regression[1])
plt.plot(xline,yline, c = temp_solvent_colors[solvent])
xlist = []
ylist = []
samplelist = []
for sample in new:
if sample in peakratio.keys():
if con_data[sample] > 0.000000000001:
x=peakratio[sample]
y=con_data[sample]
plt.errorbar(x, y, fmt='o', c = temp_solvent_colors[solvent], mfc='white', mew=2, ms=8)
xlist.append(x)
ylist.append(y)
ind = xlist.index(np.max(xlist))
del xlist[ind]
del ylist[ind]
regression = stats.linregress(np.log(xlist),np.log(ylist))
print('new:',regression)
xline = np.linspace(min(xlist),max(xlist),100)
yline = np.exp(np.log(xline)*regression[0]+regression[1])
plt.plot(xline,yline, c = temp_solvent_colors[solvent], ls = 'dashed')
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT', c = 'black', ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT 2', c = 'black', mfc = 'white', mew =2,ms =8)
plt.xlabel('Pi-Stacking to Lamellar Crystal Peak Height Ratio', fontsize=16)
plt.ylabel('Conductivity (S/m)', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xscale('log')
plt.yscale('log')
#plt.ylim(0, 0.0000001)
#plt.xlim(0, 1.5)
plt.title('Conductivity vs Crystal Peak Height Ratio',fontsize=16)
plt.legend(fontsize=12,loc='upper left',title=None, title_fontsize=12)# ncol=3)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
```
|
github_jupyter
|
import copy
import datetime
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import bumps
import os
import math
from numpy import exp, linspace, random
from scipy.optimize import curve_fit
from scipy import stats
originpath = '../Documents/data'
path = originpath + '/conductivity'
"""
Loading in the data and meta-data, do not edit unless necessary for os differences.
"""
# sample meta-data
info_path = path + "/Sample_Info.csv"
corrected_path = path + "/Corrected_wtPercents.csv"
sample_info = pd.read_csv(info_path)
corrected_info = pd.read_csv(corrected_path)
names = {}
cps = {}
matrix = {}
solvent_names = {}
target = {} # target weight fraction of conjugated polymer
actual = {} # corrected weight fraction of conjugated polymer from UV-vis data
for index, row in sample_info.iterrows():
sam_no = row['Sample']
names[sam_no] = row['Name']
cps[sam_no] = row['Conjugated Polymer']
matrix[sam_no] = row['Commodity Polymer']
solvent_names[sam_no] = row['Solvent']
# change this if target fraction is already in wt percent, rather than wt fraction
target[sam_no] = float(row['Target Fraction'])*100
for index, row in corrected_info.iterrows():
sam_no = row['Sample']
# change this if actual fraction is already in wt percent rather than wt fraction
actual[sam_no] = float(row['average actual wt pct'])
"""
Importing conductivity data
"""
data_path = path + '/conductivity_data.csv'
#importing into dictionary: {sampleno:measurement}
con_data = {}
file = open(data_path, 'r')
the_lines = file.readlines()[2:]
for each in the_lines:
split_line = each.split(",")
con_data[split_line[0]] = float(split_line[15][:-1])
print(con_data)
# useful dictionaries with labels and colors for the plots and their legends
wt_names = {}
full_names = {}
wt_colors = {}
solvent_colors = {}
cp_colors = {}
rep_colors = {}
rep_names = {}
fraction = {}
temp_wt_colors = {
0.1: 'firebrick',
0.5: 'darkorange',
1.0: 'darkcyan',
5.0: 'mediumblue',
7.5: 'lime',
10.0: 'deeppink',
17.5: 'goldenrod',
25.0: 'darkorchid',
50.0: 'forestgreen',
0.0: 'black'
}
temp_solvent_colors = {
'Chloroform': 'firebrick',
'Bromobenzene': 'darkorange',
'Toluene': 'darkcyan',
'Slow Dry Chloroform': 'darkorchid'
}
temp_cp_colors = {
'RRe-P3HT': 'firebrick',
'RRa-P3HT': 'darkorange',
'P3DDT': 'darkorchid',
'PQT-12': 'darkcyan',
'None': 'black'
}
cp_marks = {
'RRe-P3HT': 'o',
'RRa-P3HT': '^',
'P3DDT': 's',
'PQT-12': 'D',
}
mfc = {
'RRe-P3HT':
'RRe-P3HT'
}
for key in names.keys():
if key in actual.keys():
frac = actual[key]
fraction[key] = actual[key]
else:
frac = target[key]
fraction[key] = target[key]
frac = np.round(frac,2)
if cps[key] == 'None':
wt_names[key] = matrix[key] + ' Control'
full_names[key] = matrix[key] + ' Control'
else:
wt_names[key] = str(frac) + ' wt% ' + cps[key]
full_names[key] = str(frac) + ' wt% ' + cps[key] + ' in ' + matrix[key]
for key in cps.keys():
wt_colors[key] = temp_wt_colors[target[key]]
solvent_colors[key] = temp_solvent_colors[solvent_names[key]]
cp_colors[key] = temp_cp_colors[cps[key]]
solubility = {}
solubility['Bromobenzene'] = 84.1
solubility['Chloroform'] = 14.1
solubility['Toluene'] = 0.7
solubility['Slow Dry Chloroform'] = 14.1
polarity = {}
polarity['Bromobenzene'] = 5.17
polarity['Chloroform'] = 4.81
polarity['Toluene'] = 2.38
polarity['Slow Dry Chloroform'] = 4.81
figure_path = path +'/figures'
if 'figures' in os.listdir(path):
print('figures path already exists')
else:
os.mkdir(figure_path)
"""
here, we group the conductivity data by fraction weight percent and then take avg, std dev of these groups
We then plot this average and error as a single point.
"""
con_group = {}
con_group_1 = {}
con_group_2 = {}
old =[]
new =[]
for num in con_data:
if 'e' in num:
new.append(num)
else:
old.append(num)
for sample in old:
x = fraction[sample]
y = con_data[sample]
if y > 0.000000000001:
if cps[sample] in con_group_1.keys():
a=1
else:
con_group_1[cps[sample]]= {}
if solvent_names[sample] in con_group_1[cps[sample]].keys():
a =1
else:
con_group_1[cps[sample]][solvent_names[sample]] = {}
if target[sample] in con_group_1[cps[sample]][solvent_names[sample]].keys():
con_group_1[cps[sample]][solvent_names[sample]][target[sample]].append([x,y])
else:
con_group_1[cps[sample]][solvent_names[sample]][target[sample]] =[[x,y]]
for polymer in con_group_1:
for solvent in con_group_1[polymer]:
for targ in con_group_1[polymer][solvent]:
x = np.average([item[0] for item in con_group_1[polymer][solvent][targ]])
dx = np.std([item[0] for item in con_group_1[polymer][solvent][targ]])
y = np.average([item[1] for item in con_group_1[polymer][solvent][targ]])
dy = np.std([item[1] for item in con_group_1[polymer][solvent][targ]])
con_group_1[polymer][solvent][targ] = [x, y, dx, dy]
for sample in new:
x = fraction[sample]
y = con_data[sample]
if y > 0.000000000001:
if cps[sample] in con_group_2.keys():
a=1
else:
con_group_2[cps[sample]]= {}
if solvent_names[sample] in con_group_2[cps[sample]].keys():
a =1
else:
con_group_2[cps[sample]][solvent_names[sample]] = {}
if target[sample] in con_group_2[cps[sample]][solvent_names[sample]].keys():
con_group_2[cps[sample]][solvent_names[sample]][target[sample]].append([x,y])
else:
con_group_2[cps[sample]][solvent_names[sample]][target[sample]] =[[x,y]]
for polymer in con_group_2:
for solvent in con_group_2[polymer]:
for targ in con_group_2[polymer][solvent]:
x = np.average([item[0] for item in con_group_2[polymer][solvent][targ]])
dx = np.std([item[0] for item in con_group_2[polymer][solvent][targ]])
y = np.average([item[1] for item in con_group_2[polymer][solvent][targ]])
dy = np.std([item[1] for item in con_group_2[polymer][solvent][targ]])
con_group_2[polymer][solvent][targ] = [x, y, dx, dy]
name_of_figure = 'conductivity of all RRe-P3HT'
filename = figure_path + '/' + name_of_figure
plt.figure(figsize=(6,6))
polymer = 'P3DDT'
dict1 = con_group_1
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='s', c = temp_solvent_colors[solvent],mfc = 'white', mew =2, ms=8)
plt.errorbar([-10], [-10],fmt='o', label= solvent, c = temp_solvent_colors[solvent], ms=8)
polymer = 'RRe-P3HT'
dict1 = con_group_1
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='o', c = temp_solvent_colors[solvent], ms=8)
polymer = 'RRe-P3HT'
dict1 = con_group_2
for solvent in dict1[polymer]:
for targ in dict1[polymer][solvent]:
x = dict1[polymer][solvent][targ][0]
xerr = dict1[polymer][solvent][targ][2]
y = dict1[polymer][solvent][targ][1]
yerr = dict1[polymer][solvent][targ][3]
plt.errorbar(x, y, xerr= xerr, yerr= yerr, fmt='o', c = temp_solvent_colors[solvent],mfc='white',mew=1, ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT', c = 'black', ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT 2', c = 'black', mfc = 'white', mew =2,ms =8)
plt.errorbar([-10], [-10],fmt='s', label= 'P3DDT', c = 'black', mfc = 'white', mew =2,ms =8)
plt.xlabel('Conjugated Polymer wt%', fontsize=16)
plt.ylabel('Conductivity (S/m)', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xscale('log')
plt.yscale('log')
plt.title('Conductivity Measurements RRe-P3HT',fontsize=16)
plt.legend(fontsize=12,loc='upper left',title=None, title_fontsize=12)# ncol=3)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
waxspath= originpath + '/waxs'
peak_ratios = pd.read_csv(waxspath + '/ratios_list.csv')
peakratio = {}
fractions = {}
for index,row in peak_ratios.iterrows():
peakratio[row['sample']] = row['ratio']
fractions[row['sample']] = row['fraction']
name_of_figure = 'conductivity vs peak ratio'
filename = figure_path + '/' + name_of_figure
plt.figure(figsize=(6,6))
xlist = []
ylist = []
for sample in old:
if sample in peakratio.keys():
if con_data[sample] > 0.000000000001:
x=peakratio[sample]
y=con_data[sample]
plt.errorbar(x, y, fmt='o', c = temp_solvent_colors[solvent], mew=2, ms=8)
xlist.append(x)
ylist.append(y)
regression = stats.linregress(np.log(xlist),np.log(ylist))
print('old:',regression)
xline = np.linspace(min(xlist),max(xlist),100)
yline = np.exp(np.log(xline)*regression[0]+regression[1])
plt.plot(xline,yline, c = temp_solvent_colors[solvent])
xlist = []
ylist = []
samplelist = []
for sample in new:
if sample in peakratio.keys():
if con_data[sample] > 0.000000000001:
x=peakratio[sample]
y=con_data[sample]
plt.errorbar(x, y, fmt='o', c = temp_solvent_colors[solvent], mfc='white', mew=2, ms=8)
xlist.append(x)
ylist.append(y)
ind = xlist.index(np.max(xlist))
del xlist[ind]
del ylist[ind]
regression = stats.linregress(np.log(xlist),np.log(ylist))
print('new:',regression)
xline = np.linspace(min(xlist),max(xlist),100)
yline = np.exp(np.log(xline)*regression[0]+regression[1])
plt.plot(xline,yline, c = temp_solvent_colors[solvent], ls = 'dashed')
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT', c = 'black', ms=8)
plt.errorbar([-10], [-10],fmt='o', label= 'RRe P3HT 2', c = 'black', mfc = 'white', mew =2,ms =8)
plt.xlabel('Pi-Stacking to Lamellar Crystal Peak Height Ratio', fontsize=16)
plt.ylabel('Conductivity (S/m)', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xscale('log')
plt.yscale('log')
#plt.ylim(0, 0.0000001)
#plt.xlim(0, 1.5)
plt.title('Conductivity vs Crystal Peak Height Ratio',fontsize=16)
plt.legend(fontsize=12,loc='upper left',title=None, title_fontsize=12)# ncol=3)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
| 0.253122 | 0.338924 |
<a href="https://colab.research.google.com/github/msmsd778/multiple-linear-regression/blob/main/Multiple_Linear_Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Importing Needed packages
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
%matplotlib inline
```
# Downloading Data
```
!wget -O FuelConsumption.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%202/data/FuelConsumptionCo2.csv
```
# Reading the data in
```
df = pd.read_csv("FuelConsumption.csv")
df.head()
cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB','CO2EMISSIONS']]
cdf.head(9)
plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
```
# Creating train and test dataset
```
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
```
# Train data distribution
```
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
```
# Multiple Regression Model
```
from sklearn import linear_model
regr = linear_model.LinearRegression()
x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(x, y)
print('Coefficients: ', regr.coef_)
```
# Ordinary Least Squares (OLS)
## Prediction
```
y_hat = regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2))
print('Variance score: %.2f' % regr.score(x, y))
```

Using FUEL CONSUMPTION in CITY__ and FUEL CONSUMPTION in HWY__ instead of FUELCONSUMPTION_COMB.
```
regr = linear_model.LinearRegression()
x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(x, y)
print('Coefficicents: ', regr.coef_)
y_hat = regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
y = np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2))
print('Variance score: %.2f' % regr.score(x, y))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
%matplotlib inline
!wget -O FuelConsumption.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%202/data/FuelConsumptionCo2.csv
df = pd.read_csv("FuelConsumption.csv")
df.head()
cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY','FUELCONSUMPTION_COMB','CO2EMISSIONS']]
cdf.head(9)
plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
from sklearn import linear_model
regr = linear_model.LinearRegression()
x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(x, y)
print('Coefficients: ', regr.coef_)
y_hat = regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB']])
y = np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2))
print('Variance score: %.2f' % regr.score(x, y))
regr = linear_model.LinearRegression()
x = np.asanyarray(train[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(x, y)
print('Coefficicents: ', regr.coef_)
y_hat = regr.predict(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
x = np.asanyarray(test[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_CITY','FUELCONSUMPTION_HWY']])
y = np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares: %.2f" % np.mean((y_hat - y) ** 2))
print('Variance score: %.2f' % regr.score(x, y))
| 0.403802 | 0.815967 |
# Neural Networks, gradient descent, and regression, no tears
In this notebook, we show how to use neural networks (NNs) with [PyTorch](https://pytorch.org/) to solve a linear regression problem using different gradient descent methods. The three gradient descent methods we will look at are
* batch gradient descent,
* stochastic gradient descent, and
* mini-batch gradient descent.
If you recall, ordinary least square (OLS) regression problems of the form $y = \beta + w_1 x_1 + \ldots + w_n x_n$ are solved through __maximum likelihood methods__. The weights, $w = \{w_1, \ldots, w_n\}$, of a regression model are estimated through a __loss function__. Initially, we guess what the weights are, and then we make predictions $\hat{y}$ based on the weights. The predictions $\hat{y}$ will differ from the true values $y$. The loss function is often written as follows $L(y, \hat{y}) = (y - \hat{y})^2$. The partial derivative of the loss function (with respect to the weights) helps us adjust the weights in direction (negative or positive direction) and magnitude (how much do we move). Typically, the weights are adjusted as follows.
$w^{*}_{j} = w_j - \alpha \frac{\partial L}{\partial w_j}$
where
* $w^{*}_{j}$ is the j-th new weight
* $w_j$ is the j-th current weight
* $\alpha$ is the learning rate
* $\frac{\partial L}{\partial w_j}$ is the partial derivative of the loss function $L$ with respect to the weight $w_j$
There are many ways to actually use the loss function and thus, errors, to adjust the weights.
* We can feed one example $x = \{ x_1, \ldots, x_n \}$ to the model to get a prediction $\hat{y}$, compute the error (loss), and then use the error to adjust the weights. Typically, we want to randomize the inputs $x$ before feeding them one at a time to the model. We feed one example at a time until we run out of examples. This approach is called stochastic gradient descent.
* We can feed all examples to the model to get a vector of predictions, compute the average error, and then use this average error to adjust the weights. This approach is called batch gradient descent.
* We can feed a small number of examples to the model, compute the corresponding average error, and then use this average error to adjust the weights. Each of these small number of examples we feed into the model are called __batches__, and there are many ways to decide the size of the batch. We feed the model as many batches as we have until we run out of examples. This approach is called mini-batch gradient descent.
After we iterate over all examples, no matter the method (batch, stochastic, or mini-batch), this completion is termed an __epoch__; meaning, in one epoch, we have used all the training examples to adjust the weights. As you will see below, we need to run through many epochs when using NNs to learn the weights for a OLS regression model. All the NNs shown below will only have 2 layers, an input and output layer; so, the NN architectures are the same (identical). The difference is with training them through batch, stochastic, or mini-batch gradient descent.
# Simulate data
Let's simulate our data to following the equation $y = 5.0 + w_1 x_1 + \epsilon$, where
* $x_1 \sim \mathcal{N}(2, 1)$, $x_1$ is sampled from a normal distribution of mean 2 and 1 standard deviation
* $\epsilon \sim \mathcal{N}(0, 1)$, $\epsilon$ is the error term and sampled from a normal distribution of mean 0 and 1 standard deviation
* $y \sim \mathcal{N}(5.0 + w_1 x_1)$, $y$ is sampled from a normal distribution of mean $5.0 + w_1 x_1$ and 1 standard deviation
Note that we sampled 1,000 $x, y$ pairs. Our matrix to represent the input values $x$, however, has 2 columns; the first column represents the bias and all its values are 1; the second column represents the sampled $x$. Note also that once we have our data, we need to convert them to PyTorch tensors.
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import torch
import torch.nn as nn
from torch import optim
from numpy.random import normal
from sklearn.metrics import r2_score
np.random.seed(37)
n = 1000
X = np.hstack([
np.ones(n).reshape(n, 1),
normal(2.0, 1.0, n).reshape(n, 1)
])
y = normal(5.0 + 2.0 * X[:, 1], 1, n).reshape(n, 1) + normal(0, 1, n).reshape(n, 1)
X = torch.from_numpy(X).type(torch.FloatTensor)
y = torch.from_numpy(y).type(torch.FloatTensor)
```
# Gradient descent
Here, we will define a 2 layers NN architecture, and apply the different gradient descent approaches. Note that we keep track of the losses and plot them out. We also estimated $R^2$ value for each model resulting from each of the approaches. Finally, we use Scikit-Learn's `LinearRegression` model to estimate the weights as well for a sanity check.
```
def plot_loss(loss_trace, title='Loss over epochs'):
loss_df = pd.DataFrame(loss_trace, columns=['epoch', 'loss'])
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.plot(loss_df['epoch'], loss_df['loss'])
ax.set_title(title)
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
```
## Batch gradient descent
```
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(4500):
y_pred = model(X)
loss = loss_fn(y_pred, y)
loss_trace.append((epoch, loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
## Stochastic gradient descent
```
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(500):
indices = list(range(n))
np.random.shuffle(indices)
for i in indices:
y_pred = model(X[i])
loss = loss_fn(y_pred, y[i])
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
## Mini-batch gradient descent
```
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(300):
start = 0
batch_size = 32
indices = list(range(n))
while True:
stop = start + batch_size
if stop >= n:
stop = n
y_pred = model(X[indices])
loss = loss_fn(y_pred, y[indices])
optimizer.zero_grad()
loss.backward()
optimizer.step()
start += batch_size
if start >= n:
break
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
## Verify with Scikit-Learn Linear Model
```
from sklearn import linear_model
model = linear_model.LinearRegression(fit_intercept=False)
model.fit(X.data.numpy(), y.data.numpy())
print(model.coef_)
print(r2_score(model.predict(X), y.data.numpy()))
```
## Summary of results
Here are some things to note.
* Batch gradient descent takes many more epochs (4,500) to learn the weights and converge. Mini-batch gradient descent takes only 300 epochs, and stochastic gradient descent takes 500 epochs.
* Batch and mini-batch gradient descents' losses over epochs are smooth while stochastic gradient descent bounces up and down. This observation is no doubt related to randomization of the data.
* Stochastic gradient descent and Scikit-Learn's `LinearRegression` learn the intercept and weight that are closest to the model's true parameters, however, their $R^2$ are not the highest, and, in fact, are the lowest.
* The NN learned through batch gradient descent has the highest $R^2$ value, though the intercept and weight do not really resemble the model's true parameters. Obviously, we have over-fitted our model.
## Using DataLoader to control batch sizes
You can implement batch, stochastic, and mini-batch gradient descent using the `DataLoader` class. In the examples above, we manually controlled for batch sizes, however, with `DataLoader`, you can set the `batch_size` and `shuffle` properties to mimic the type of gradient descent you want.
* `batch_size = n` and `shuffle = False` indicates batch gradient descent
* `batch_size = 1` and `shuffle = True` indicates stochastic gradient descent
* `batch_size = k` and `shuffle = False` indicates mini-batch gradient descent (where k << n)
The example below shows how we may use `DataLoader` to use stochastic gradient descent.
```
import torch.utils.data as Data
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=1,
shuffle=True
)
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(300):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for stochastic gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
## Having fun with more than 3 layers and stochastic gradient descent
Here, we try to have fun and define 3 layers: 1 input, 1 hidden, and 1 output layer. Note that we switch over to using PyTorch's `Module` to define the NN architecture. Note the results are not that great; hint, it is not easy to architect a NN.
```
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden, bias=False)
self.predict = torch.nn.Linear(n_hidden, n_output, bias=False)
def forward(self, x):
x = F.sigmoid(self.hidden(x))
x = self.predict(x)
return x
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=1,
shuffle=True
)
model = Net(n_feature=2, n_hidden=2, n_output=1)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(100):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model.predict.weight[0, 0].data.numpy()
w1 = model.predict.weight[0, 1].data.numpy()
plot_loss(loss_trace, r'Loss over epochs for stochastic gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
## Having fun with 5 layers and mini-batch gradient descent
Here, we define a NN architecture with 5 layers: 1 input, 3 hidden, and 1 output layers. Again, note the mediocre results.
```
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden1 = torch.nn.Linear(n_feature, 10, bias=False)
self.hidden2 = torch.nn.Linear(10, 20, bias=False)
self.hidden = torch.nn.Linear(20, n_hidden, bias=False)
self.predict = torch.nn.Linear(n_hidden, n_output, bias=False)
def forward(self, x):
x = F.sigmoid(self.hidden1(x))
x = F.sigmoid(self.hidden2(x))
x = F.sigmoid(self.hidden(x))
x = self.predict(x)
return x
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=32,
shuffle=False
)
model = Net(n_feature=2, n_hidden=2, n_output=1)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(1000):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model.predict.weight[0, 0].data.numpy()
w1 = model.predict.weight[0, 1].data.numpy()
plot_loss(loss_trace, r'Loss over epochs for mini-batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
```
# References
* [PyTorch with examples](https://pytorch.org/tutorials/beginner/pytorch_with_examples.html#nn-module)
* [Linear regression in x minutes using PyTorch](https://hackernoon.com/linear-regression-in-x-minutes-using-pytorch-8eec49f6a0e2)
* [PyTorch examples](https://github.com/jcjohnson/pytorch-examples#pytorch-tensors)
* [A Gentle Introduction to Mini-Batch Gradient Descent and How to Configure Batch Size](https://machinelearningmastery.com/gentle-introduction-mini-batch-gradient-descent-configure-batch-size/)
* [How to adjust learning rate](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate)
# Take a Look!
Take a look at [Dr. Arthur P. Dempster](https://en.wikipedia.org/wiki/Arthur_P._Dempster).
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import torch
import torch.nn as nn
from torch import optim
from numpy.random import normal
from sklearn.metrics import r2_score
np.random.seed(37)
n = 1000
X = np.hstack([
np.ones(n).reshape(n, 1),
normal(2.0, 1.0, n).reshape(n, 1)
])
y = normal(5.0 + 2.0 * X[:, 1], 1, n).reshape(n, 1) + normal(0, 1, n).reshape(n, 1)
X = torch.from_numpy(X).type(torch.FloatTensor)
y = torch.from_numpy(y).type(torch.FloatTensor)
def plot_loss(loss_trace, title='Loss over epochs'):
loss_df = pd.DataFrame(loss_trace, columns=['epoch', 'loss'])
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.plot(loss_df['epoch'], loss_df['loss'])
ax.set_title(title)
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(4500):
y_pred = model(X)
loss = loss_fn(y_pred, y)
loss_trace.append((epoch, loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(500):
indices = list(range(n))
np.random.shuffle(indices)
for i in indices:
y_pred = model(X[i])
loss = loss_fn(y_pred, y[i])
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(300):
start = 0
batch_size = 32
indices = list(range(n))
while True:
stop = start + batch_size
if stop >= n:
stop = n
y_pred = model(X[indices])
loss = loss_fn(y_pred, y[indices])
optimizer.zero_grad()
loss.backward()
optimizer.step()
start += batch_size
if start >= n:
break
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
from sklearn import linear_model
model = linear_model.LinearRegression(fit_intercept=False)
model.fit(X.data.numpy(), y.data.numpy())
print(model.coef_)
print(r2_score(model.predict(X), y.data.numpy()))
import torch.utils.data as Data
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=1,
shuffle=True
)
model = torch.nn.Sequential(
torch.nn.Linear(2, 1, bias=False)
)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(300):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model[0].weight[0, 0]
w1 = model[0].weight[0, 1]
plot_loss(loss_trace, r'Loss over epochs for stochastic gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden, bias=False)
self.predict = torch.nn.Linear(n_hidden, n_output, bias=False)
def forward(self, x):
x = F.sigmoid(self.hidden(x))
x = self.predict(x)
return x
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=1,
shuffle=True
)
model = Net(n_feature=2, n_hidden=2, n_output=1)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(100):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model.predict.weight[0, 0].data.numpy()
w1 = model.predict.weight[0, 1].data.numpy()
plot_loss(loss_trace, r'Loss over epochs for stochastic gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden1 = torch.nn.Linear(n_feature, 10, bias=False)
self.hidden2 = torch.nn.Linear(10, 20, bias=False)
self.hidden = torch.nn.Linear(20, n_hidden, bias=False)
self.predict = torch.nn.Linear(n_hidden, n_output, bias=False)
def forward(self, x):
x = F.sigmoid(self.hidden1(x))
x = F.sigmoid(self.hidden2(x))
x = F.sigmoid(self.hidden(x))
x = self.predict(x)
return x
loader = Data.DataLoader(
dataset=Data.TensorDataset(X, y),
batch_size=32,
shuffle=False
)
model = Net(n_feature=2, n_hidden=2, n_output=1)
loss_fn = torch.nn.MSELoss(size_average=True)
learning_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_trace = []
for epoch in range(1000):
for step, (batch_x, batch_y) in enumerate(loader):
y_pred = model(batch_x)
loss = loss_fn(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_trace.append((epoch, loss.item()))
r2 = r2_score(model(X).data.numpy(), y.data.numpy())
b0 = model.predict.weight[0, 0].data.numpy()
w1 = model.predict.weight[0, 1].data.numpy()
plot_loss(loss_trace, r'Loss over epochs for mini-batch gradient descent $R^2$={:.2f}, $\beta_0$={:.2f}, $w_1$={:.2f}'.format(r2, b0, w1))
| 0.918503 | 0.995448 |
```
import pandas as pd
import numpy as np
import logging
import sys
from datetime import datetime
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import scipy
import copy
from scipy.stats import skewnorm
from random import expovariate
# a little hacky, but works if you don't want to actually install the
# custom packages
sys.path.append('..')
from uniswapv3_simulator.pool import Uniswapv3Pool
from uniswapv3_simulator.utils import pool_init_price, solve_for_liquidity_delta
from uniswapv3_simulator.math import tick_to_sqrt_price, sqrt_price_to_tick
from utils import amount_to_float
expt = 'simulation_v1'
timestamp = datetime.now().strftime('%y%m%d%H%M%S')
logging.basicConfig(level=logging.INFO,
filename=f'./{expt}_{timestamp}.log',
)
logging.getLogger('uniswap-v3').setLevel(logging.DEBUG)
logging.getLogger('covalent_api').setLevel(logging.DEBUG)
logger = logging.getLogger('experiment')
from uniswapv3_simulator.utils import sqrt_price_to_tick
from uniswapv3_simulator.tick import MAX_TICK, MIN_TICK
init_price = 3088
fee = 0.03
budget = 10000
num_ticks = 10000
init_tick = sqrt_price_to_tick(np.sqrt(init_price))
ticks = np.round(np.linspace(init_tick + 1, MAX_TICK, num_ticks), 0)
def init_uniform_pool(fee, price, budget, tick_spacing = 1):
pool = Uniswapv3Pool(fee, tick_spacing, price)
pool.set_position('uniform', MIN_TICK, MAX_TICK, budget)
return pool
_pool = init_uniform_pool(fee, init_price, budget, tick_spacing = 1)
tick = ticks[0]
txn_rate = 100
alpha = 0.5
num_sims = 10**5
def simulate(_pool, tick, txn_rate, alpha, num_sims, budget, txn_modifier=60 * 60, mu=0, sigma=0.1):
"""
pool: already instantiated with Liquidity shape and depth
tick: p2 of the range we are considering, >= p
txn_rate: transactions per unit; i.e. 100 txn per hour requires txn_modifier = 60 * 60
alpha: number of arbs
budget: budget for liquidity
txn_modifier: factor to convert from txn units to seconds, 60 * 60 is for hours
mu: drift for GBM
sigma: vol for GBM
"""
# copy pool object
pool = copy.deepcopy(_pool)
pool_tick = pool.tick
price = pool.sqrt_price ** 2
p2 = tick
p1 = 2 * pool_tick - p2
# we have our symmetric range: (p1, p2) which surrounds the initial price
# now add our position
pool.set_position('target', p1, p2, budget)
fees = np.empty(num_sims)
for i in range(num_sims):
# draw arrival times
arrivals = np.random.exponential(1.0 / (txn_rate / txn_modifier), int(2.5 * txn_rate))
cumulative_arrival = np.cumsum(arrivals) / txn_modifier
arrivals = arrivals[cumulative_arrival <= 1.0] / txn_modifier
for dt in arrivals:
u, n = np.random.uniform(), np.random.normal()
X = (mu - 0.5 * sigma ** 2)*dt + (sigma * np.sqrt(dt) * n)
new_price = price * np.exp(X)
if u < alpha:
# this is an arbitrage, trade to new price
price = new_price
pool.swap(price)
else:
# this is a liquidity trade, trade to new price and back
pool.swap(new_price)
pool.swap(price)
fees[i] = pool.get_position('target')
return np.mean(fees), np.std(fees)
simulate(_pool, tick, txn_rate, alpha, num_sims, budget, txn_modifier=3600)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import logging
import sys
from datetime import datetime
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import scipy
import copy
from scipy.stats import skewnorm
from random import expovariate
# a little hacky, but works if you don't want to actually install the
# custom packages
sys.path.append('..')
from uniswapv3_simulator.pool import Uniswapv3Pool
from uniswapv3_simulator.utils import pool_init_price, solve_for_liquidity_delta
from uniswapv3_simulator.math import tick_to_sqrt_price, sqrt_price_to_tick
from utils import amount_to_float
expt = 'simulation_v1'
timestamp = datetime.now().strftime('%y%m%d%H%M%S')
logging.basicConfig(level=logging.INFO,
filename=f'./{expt}_{timestamp}.log',
)
logging.getLogger('uniswap-v3').setLevel(logging.DEBUG)
logging.getLogger('covalent_api').setLevel(logging.DEBUG)
logger = logging.getLogger('experiment')
from uniswapv3_simulator.utils import sqrt_price_to_tick
from uniswapv3_simulator.tick import MAX_TICK, MIN_TICK
init_price = 3088
fee = 0.03
budget = 10000
num_ticks = 10000
init_tick = sqrt_price_to_tick(np.sqrt(init_price))
ticks = np.round(np.linspace(init_tick + 1, MAX_TICK, num_ticks), 0)
def init_uniform_pool(fee, price, budget, tick_spacing = 1):
pool = Uniswapv3Pool(fee, tick_spacing, price)
pool.set_position('uniform', MIN_TICK, MAX_TICK, budget)
return pool
_pool = init_uniform_pool(fee, init_price, budget, tick_spacing = 1)
tick = ticks[0]
txn_rate = 100
alpha = 0.5
num_sims = 10**5
def simulate(_pool, tick, txn_rate, alpha, num_sims, budget, txn_modifier=60 * 60, mu=0, sigma=0.1):
"""
pool: already instantiated with Liquidity shape and depth
tick: p2 of the range we are considering, >= p
txn_rate: transactions per unit; i.e. 100 txn per hour requires txn_modifier = 60 * 60
alpha: number of arbs
budget: budget for liquidity
txn_modifier: factor to convert from txn units to seconds, 60 * 60 is for hours
mu: drift for GBM
sigma: vol for GBM
"""
# copy pool object
pool = copy.deepcopy(_pool)
pool_tick = pool.tick
price = pool.sqrt_price ** 2
p2 = tick
p1 = 2 * pool_tick - p2
# we have our symmetric range: (p1, p2) which surrounds the initial price
# now add our position
pool.set_position('target', p1, p2, budget)
fees = np.empty(num_sims)
for i in range(num_sims):
# draw arrival times
arrivals = np.random.exponential(1.0 / (txn_rate / txn_modifier), int(2.5 * txn_rate))
cumulative_arrival = np.cumsum(arrivals) / txn_modifier
arrivals = arrivals[cumulative_arrival <= 1.0] / txn_modifier
for dt in arrivals:
u, n = np.random.uniform(), np.random.normal()
X = (mu - 0.5 * sigma ** 2)*dt + (sigma * np.sqrt(dt) * n)
new_price = price * np.exp(X)
if u < alpha:
# this is an arbitrage, trade to new price
price = new_price
pool.swap(price)
else:
# this is a liquidity trade, trade to new price and back
pool.swap(new_price)
pool.swap(price)
fees[i] = pool.get_position('target')
return np.mean(fees), np.std(fees)
simulate(_pool, tick, txn_rate, alpha, num_sims, budget, txn_modifier=3600)
| 0.369998 | 0.408955 |
```
import numpy as np
import matplotlib.pyplot as plt
import os
import pathlib
from zipfile import ZipFile
import PIL
import tensorflow as tf
train_dir = pathlib.Path("/Users/admin/Desktop/Fruit_classifier/fruits-360/Training")
test_dir = pathlib.Path("/Users/admin/Desktop/Fruit_classifier/fruits-360/Test")
image_count = len(list(train_dir.glob('*/*.jpg')))
image_count
image_count1 = len(list(test_dir.glob('*/*.jpg')))
image_count1
fruits = list(train_dir.glob('Apricot/*.jpg'))
plt.figure(figsize=(10, 10))
for i in range(16):
plt.subplot(4, 4, i + 1)
img = PIL.Image.open(str(fruits[i+40]))
plt.imshow(img)
plt.axis('off')
plt.show()
batch_size = 32
img_height = 100
img_width = 100
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset='training',
seed=42,
image_size=(img_height, img_width),
batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset='validation',
seed=42,
image_size=(img_height, img_width),
batch_size=batch_size
)
class_names=train_ds.class_names
num_class = len(class_names)
class_names
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.imshow(images[i].numpy().astype('uint8'))
plt.title(class_names[labels[i]])
plt.axis('off')
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2)
])
preprocess_input = tf.keras.applications.resnet.preprocess_input
base_model = tf.keras.applications.resnet.ResNet50(
input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet'
)
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(num_class)
inputs = tf.keras.Input(shape=(100, 100, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.summary()
model.evaluate(val_ds)
epochs = 10
history = model.fit(
train_ds,
epochs=epochs,
validation_data=val_ds
)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(12, 10))
plt.plot(epochs_range, train_loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc='upper left')
plt.title('Training and Validation Loss')
plt.show()
np.argmin(val_loss)
model.save('final.h5')
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import os
import pathlib
from zipfile import ZipFile
import PIL
import tensorflow as tf
train_dir = pathlib.Path("/Users/admin/Desktop/Fruit_classifier/fruits-360/Training")
test_dir = pathlib.Path("/Users/admin/Desktop/Fruit_classifier/fruits-360/Test")
image_count = len(list(train_dir.glob('*/*.jpg')))
image_count
image_count1 = len(list(test_dir.glob('*/*.jpg')))
image_count1
fruits = list(train_dir.glob('Apricot/*.jpg'))
plt.figure(figsize=(10, 10))
for i in range(16):
plt.subplot(4, 4, i + 1)
img = PIL.Image.open(str(fruits[i+40]))
plt.imshow(img)
plt.axis('off')
plt.show()
batch_size = 32
img_height = 100
img_width = 100
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset='training',
seed=42,
image_size=(img_height, img_width),
batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset='validation',
seed=42,
image_size=(img_height, img_width),
batch_size=batch_size
)
class_names=train_ds.class_names
num_class = len(class_names)
class_names
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.imshow(images[i].numpy().astype('uint8'))
plt.title(class_names[labels[i]])
plt.axis('off')
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2)
])
preprocess_input = tf.keras.applications.resnet.preprocess_input
base_model = tf.keras.applications.resnet.ResNet50(
input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet'
)
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(num_class)
inputs = tf.keras.Input(shape=(100, 100, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.summary()
model.evaluate(val_ds)
epochs = 10
history = model.fit(
train_ds,
epochs=epochs,
validation_data=val_ds
)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(12, 10))
plt.plot(epochs_range, train_loss, label="Training Loss")
plt.plot(epochs_range, val_loss, label="Validation Loss")
plt.legend(loc='upper left')
plt.title('Training and Validation Loss')
plt.show()
np.argmin(val_loss)
model.save('final.h5')
| 0.616474 | 0.412708 |
```
import numpy as np
import pandas as pd
import datetime as dtm
import matplotlib.pyplot as plt
import matplotlib.dates as dts
import netCDF4 as nc
import os
import re
import pytz
%matplotlib inline
```
# read in SOG data:
```
filename='/data/eolson/SOG/SOG-runs/SOGCompMZEff/profiles/hoff-SOG.dat'
file_obj = open(filename, 'rt')
for index, line in enumerate(file_obj):
line = line.strip()
if line.startswith('*FieldNames:'):
field_names = line.split(': ', 1)[1].split(', ')
elif line.startswith('*FieldUnits:'):
field_units = line.split(': ', 1)[1].split(', ')
elif line.startswith('*HoffmuellerStartYr:'):
year_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartDay:'):
day_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartSec:'):
sec_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerInterval:'):
interval = line.split(': ', 1)[1]
elif line.startswith('*EndOfHeader'):
break
data = pd.read_csv(filename, delim_whitespace=True, header=0, names=field_names, skiprows=index, chunksize=102)
# Timestamp in matplotlib time
dt_num = dts.date2num(dtm.datetime.strptime(year_start + ' ' + day_start, '%Y %j')) + float(sec_start)/86400
interval=float(interval)
# Extract dataframe chunks into dictionary
for index, chunk in enumerate(data):
if index==0:
da=chunk
else:
da=np.dstack((da,chunk))
z=da[:,0,0]
t=np.arange(da.shape[2])
t=(t+1.0)*3600
tt,zz=np.meshgrid(t,-z)
print field_names
#print t
#print day_start
#print dts.num2date(dt_num)
#print z
```
Load SS2DSOG nuts & bio data:
```
resultsDir='/data/eolson/MEOPAR/SS2DSOGruns/run5x5_18/'
fname='SalishSea_1h_20041019_20041020_ptrc_T.nc'
f=nc.Dataset(os.path.join(resultsDir,fname))
fkeys=f.variables.keys()
lons=f.variables['nav_lon'][1,:]
lats=f.variables['nav_lat'][:,1]
for ik in fkeys:
match = re.search(r'depth.',ik)
if match:
zkey=match.group(0)
zSS=f.variables[zkey][:]
xxSS,zzSS=np.meshgrid(lons,-z[:])
xtSS,ytSS=np.meshgrid(lons,lats)
print fkeys
f2name='/data/eolson/MEOPAR/SS2DSOGruns/nuts_SOG5x5_S3-2014-10-19-WithMRubraMicroZooRemin.nc'
f2=nc.Dataset(f2name)
```
REPEAT WITH LATER TIME FOR NEMO:
```
fig, axs = plt.subplots(2,2,figsize=(12,8))
ti=47 # hrs since start
# Phyto
iii=4
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
iii=5
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g')
iii=6
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b')
iii=7
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='k')
#pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,0].set_ylabel('z (m)')
axs[1,0].set_xlabel('Phyto')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar1=f.variables['PHY2'][:,:,:,:] # diatoms
pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
ivar2=f.variables['PHY'][:,:,:,:]
pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
ivar3=f.variables['ZOO2'][:,:,:,:]
pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
ivar4=f.variables['ZOO'][:,:,:,:]
pl4=axs[1,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k')
pl4=axs[0,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k')
axs[0,0].set_ylim([-60,0])
# NO
iii=8
pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r')
#pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_ylim([-20,0])
axs[1,1].set_xlabel(field_names[iii])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NO3'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
#print ivar[0,0:100,2,2]
fig, axs = plt.subplots(2,2,figsize=(12,8))
# Phyto
iii=16
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
iii=17
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b')
iii=19
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g')
#pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,0].set_ylabel('z (m)')
axs[1,0].set_xlabel('DON/PON/bSi')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar1=f.variables['DOC'][:,:,:,:]/7.6
pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
ivar2=f.variables['POC'][:,:,:,:]/7.6
pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
ivar3=f.variables['DSi'][:,:,:,:]
pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
axs[0,0].set_ylim([-50,0])
# NO
iii=8
pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r')
#pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_ylim([-20,0])
axs[1,1].set_xlabel(field_names[iii])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NO3'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
#print ivar[0,0:100,2,2]
fig, axs = plt.subplots(2,2,figsize=(12,8))
# Si
iii=10
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
axs[0,0].set_ylabel('z (m)')
axs[0,0].set_xlabel('Si')
axs[0,0].set_ylim([-20,0])
#pl0=axs[0,0].plot(f2.variables['Si'][0,0:101,2,2],-zSS[0:101],'-',color='k')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['Si'][:,:,:,:]
pl3=axs[1,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
pl4=axs[0,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
# NH4
iii=9
pl0=axs[0,1].plot(da[:,iii,ti],-z,'.',color='b')
#pl0=axs[0,1].plot(f2.variables['NH4'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_xlabel(field_names[iii])
axs[0,1].set_ylim([-20,0])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NH4'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
pl4=axs[0,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
#axs[0,1].set_xlim([.8,1.2])
ivar1=f.variables['PHY2'][:,:,:,:] # diatoms
print ivar1[182,0:101,2,2]
f.close()
f2.close()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import datetime as dtm
import matplotlib.pyplot as plt
import matplotlib.dates as dts
import netCDF4 as nc
import os
import re
import pytz
%matplotlib inline
filename='/data/eolson/SOG/SOG-runs/SOGCompMZEff/profiles/hoff-SOG.dat'
file_obj = open(filename, 'rt')
for index, line in enumerate(file_obj):
line = line.strip()
if line.startswith('*FieldNames:'):
field_names = line.split(': ', 1)[1].split(', ')
elif line.startswith('*FieldUnits:'):
field_units = line.split(': ', 1)[1].split(', ')
elif line.startswith('*HoffmuellerStartYr:'):
year_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartDay:'):
day_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartSec:'):
sec_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerInterval:'):
interval = line.split(': ', 1)[1]
elif line.startswith('*EndOfHeader'):
break
data = pd.read_csv(filename, delim_whitespace=True, header=0, names=field_names, skiprows=index, chunksize=102)
# Timestamp in matplotlib time
dt_num = dts.date2num(dtm.datetime.strptime(year_start + ' ' + day_start, '%Y %j')) + float(sec_start)/86400
interval=float(interval)
# Extract dataframe chunks into dictionary
for index, chunk in enumerate(data):
if index==0:
da=chunk
else:
da=np.dstack((da,chunk))
z=da[:,0,0]
t=np.arange(da.shape[2])
t=(t+1.0)*3600
tt,zz=np.meshgrid(t,-z)
print field_names
#print t
#print day_start
#print dts.num2date(dt_num)
#print z
resultsDir='/data/eolson/MEOPAR/SS2DSOGruns/run5x5_18/'
fname='SalishSea_1h_20041019_20041020_ptrc_T.nc'
f=nc.Dataset(os.path.join(resultsDir,fname))
fkeys=f.variables.keys()
lons=f.variables['nav_lon'][1,:]
lats=f.variables['nav_lat'][:,1]
for ik in fkeys:
match = re.search(r'depth.',ik)
if match:
zkey=match.group(0)
zSS=f.variables[zkey][:]
xxSS,zzSS=np.meshgrid(lons,-z[:])
xtSS,ytSS=np.meshgrid(lons,lats)
print fkeys
f2name='/data/eolson/MEOPAR/SS2DSOGruns/nuts_SOG5x5_S3-2014-10-19-WithMRubraMicroZooRemin.nc'
f2=nc.Dataset(f2name)
fig, axs = plt.subplots(2,2,figsize=(12,8))
ti=47 # hrs since start
# Phyto
iii=4
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
iii=5
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g')
iii=6
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b')
iii=7
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='k')
#pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,0].set_ylabel('z (m)')
axs[1,0].set_xlabel('Phyto')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar1=f.variables['PHY2'][:,:,:,:] # diatoms
pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
ivar2=f.variables['PHY'][:,:,:,:]
pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
ivar3=f.variables['ZOO2'][:,:,:,:]
pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
ivar4=f.variables['ZOO'][:,:,:,:]
pl4=axs[1,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k')
pl4=axs[0,0].plot(ivar4[360*ti,0:101,2,2],-zSS[0:101],'.',color='k')
axs[0,0].set_ylim([-60,0])
# NO
iii=8
pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r')
#pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_ylim([-20,0])
axs[1,1].set_xlabel(field_names[iii])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NO3'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
#print ivar[0,0:100,2,2]
fig, axs = plt.subplots(2,2,figsize=(12,8))
# Phyto
iii=16
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
iii=17
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='b')
iii=19
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='g')
#pl0=axs[0,0].plot(f2.variables['PHY'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,0].set_ylabel('z (m)')
axs[1,0].set_xlabel('DON/PON/bSi')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar1=f.variables['DOC'][:,:,:,:]/7.6
pl3=axs[1,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
pl3=axs[0,0].plot(ivar1[360*ti,0:101,2,2],-zSS[0:101],'.',color='m')
ivar2=f.variables['POC'][:,:,:,:]/7.6
pl3=axs[1,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
pl3=axs[0,0].plot(ivar2[360*ti,0:101,2,2],-zSS[0:101],'.',color='c')
ivar3=f.variables['DSi'][:,:,:,:]
pl3=axs[1,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
pl3=axs[0,0].plot(ivar3[360*ti,0:101,2,2],-zSS[0:101],'.',color='y')
axs[0,0].set_ylim([-50,0])
# NO
iii=8
pl0=axs[0,1].plot(da[:,iii,ti],-z,'-',color='r')
#pl0=axs[0,1].plot(f2.variables['NO3'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_ylim([-20,0])
axs[1,1].set_xlabel(field_names[iii])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NO3'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
pl3=axs[0,1].plot(ivar[360*ti-1,0:100,2,2],-zSS[0:100],'.',color='k')
#print ivar[0,0:100,2,2]
fig, axs = plt.subplots(2,2,figsize=(12,8))
# Si
iii=10
pl0=axs[0,0].plot(da[:,iii,ti],-z,'-',color='r')
axs[0,0].set_ylabel('z (m)')
axs[0,0].set_xlabel('Si')
axs[0,0].set_ylim([-20,0])
#pl0=axs[0,0].plot(f2.variables['Si'][0,0:101,2,2],-zSS[0:101],'-',color='k')
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['Si'][:,:,:,:]
pl3=axs[1,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
pl4=axs[0,0].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
# NH4
iii=9
pl0=axs[0,1].plot(da[:,iii,ti],-z,'.',color='b')
#pl0=axs[0,1].plot(f2.variables['NH4'][0,0:101,2,2],-zSS[0:101],'-',color='k')
axs[0,1].set_ylabel('z (m)')
axs[0,1].set_xlabel(field_names[iii])
axs[0,1].set_ylim([-20,0])
#pl1=axs[0].plot(da[:,iii,da.shape[2]-1],-z)
ivar=f.variables['NH4'][:,:,:,:]
pl3=axs[1,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
pl4=axs[0,1].plot(ivar[360*ti,0:100,2,2],-zSS[0:100],'.',color='g')
#axs[0,1].set_xlim([.8,1.2])
ivar1=f.variables['PHY2'][:,:,:,:] # diatoms
print ivar1[182,0:101,2,2]
f.close()
f2.close()
| 0.058379 | 0.643889 |
## Chapter 12 - Bayesian Approaches to Testing a Point ("Null") Hypothesis
- [12.2.2 - Are different groups equal or not?](#12.2.2---Are-different-groups-equal-or-not?)
```
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import theano.tensor as tt
from matplotlib import gridspec
%matplotlib inline
plt.style.use('seaborn-white')
color = '#87ceeb'
%load_ext watermark
%watermark -p pandas,numpy,pymc3,matplotlib,seaborn,theano
```
#### Data
Using *R*, I executed lines 18-63 from the script `OneOddGroupModelComp2E.R` to generate the exact same data used in the book. The script can be downloaded from the book's website. After executing the lines, the List object `dataList` in *R* contains five elements:
1. `nCond`: A scalar value (4) representing the number of conditions (background music types).
2. `nSubj`: A scalar value (80) representing the number of subjects.
3. `CondOfSubj`: A vector representing the condition (1, 2, 3 or 4) of a subject during a test.
4. `nTrlOfSubj`: A vector with the number of trials/words per subject (20 for all subjects).
5. `nCorrOfSubj`: A vector with number of correct recalls per subject.
I exported the last three elements of `dataList` to a csv file using the following command in *R*:
`write.csv(data.frame(dataList[c(3:5)]), file='background_music.csv', row.names=FALSE)`
```
df = pd.read_csv('data/background_music.csv', dtype={'CondOfSubj':'category'})
# Mapping the condition descriptions to the condition codes. Just for illustrative purposes.
bgmusic = {0:'Das Kruschke', 1:'Mozart', 2:'Bach', 3:'Beethoven'}
df['CondText'] = df.CondOfSubj.cat.codes.map(bgmusic)
cond_idx = df.CondOfSubj.cat.codes.values
cond_codes = df.CondOfSubj.cat.categories
nCond = cond_codes.size
nSubj = df.index.size
df.info()
df.groupby('CondOfSubj').head(3)
```
### 12.2.2 - Are different groups equal or not?
Given the data, how credible is it that the 4 types of background music influence the ability to recall words
**differently**?
```
# The means as mentioned in section 12.2.2
df.groupby('CondText', sort=False)['nCorrOfSubj'].mean()
```
Note: in contrast to the *R* output in the book, the parameters in PyMC3 (like $\omega$ and model index) are indexed starting with 0.
Model 0 = condition specific $\omega_c$
Model 1 = same $\omega$ for all conditions
```
with pm.Model() as model_1:
# constants
aP, bP = 1., 1.
# Pseudo- and true priors for model 1.
a0 = tt.as_tensor([.48*500, aP])
b0 = tt.as_tensor([(1-.48)*500, bP])
# True and pseudopriors for model 0
a = tt.as_tensor(np.c_[np.tile(aP, 4), [(.40*125), (.50*125), (.51*125), (.52*125)]])
b = tt.as_tensor(np.c_[np.tile(bP, 4), [(1-.40)*125, (1-.50)*125, (1-.51)*125, (1-.52)*125]])
# Prior on model index [0,1]
m_idx = pm.Categorical('m_idx', np.asarray([.5, .5]))
# Priors on concentration parameters
kappa_minus2 = pm.Gamma('kappa_minus2', 2.618, 0.0809, shape=nCond)
kappa = pm.Deterministic('kappa', kappa_minus2 +2)
# omega0
omega0 = pm.Beta('omega0', a0[m_idx], b0[m_idx])
# omega (condition specific)
omega = pm.Beta('omega', a[:,m_idx], b[:,m_idx], shape=nCond)
# Use condition specific omega when m_idx = 0, else omega0
aBeta = pm.math.switch(pm.math.eq(m_idx, 0), omega * (kappa-2)+1, omega0 * (kappa-2)+1)
bBeta = pm.math.switch(pm.math.eq(m_idx, 0), (1-omega) * (kappa-2)+1, (1-omega0) * (kappa-2)+1)
# Theta
theta = pm.Beta('theta', aBeta[cond_idx], bBeta[cond_idx], shape=nSubj)
# Likelihood
y = pm.Binomial('y', n=df.nTrlOfSubj.values, p=theta, observed=df.nCorrOfSubj)
pm.model_to_graphviz(model_1)
with model_1:
trace1 = pm.sample(5000, target_accept=.95)
pm.traceplot(trace1);
```
#### Figure 12.5
```
fig = plt.figure(figsize=(12,8))
# Define gridspec
gs = gridspec.GridSpec(3, 3)
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[0,2])
ax4 = plt.subplot(gs[1,0])
ax5 = plt.subplot(gs[1,1])
ax6 = plt.subplot(gs[1,2])
ax7 = plt.subplot(gs[2,:])
# Group the first six axes in a list for easier access in loop below
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# Differences of posteriors to be displayed: omega x - omega y
x = [0,0,0,1,1,2]
y = [1,2,3,2,3,3]
# Plot histograms
for ax, a, b in zip(axes, x, y):
diff = trace1['omega'][:,a]-trace1['omega'][:,b]
pm.plot_posterior(diff, ref_val=0, point_estimate='mode', color=color, ax=ax)
ax.set_xlabel('$\omega_{}$ - $\omega_{}$'.format(a,b), fontdict={'size':18})
ax.xaxis.set_ticks([-.2, -.1, 0.0, 0.1, 0.2])
# Plot trace values of model index (0, 1)
ax7.plot(np.arange(1, len(trace1['m_idx'])+1),trace1['m_idx'], color=color, linewidth=4)
ax7.set_xlabel('Step in Markov chain', fontdict={'size':14})
ax7.set_ylabel('Model Index (0, 1)', fontdict={'size':14})
ax7.set_ylim(-0.05,1.05)
fig.tight_layout()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import theano.tensor as tt
from matplotlib import gridspec
%matplotlib inline
plt.style.use('seaborn-white')
color = '#87ceeb'
%load_ext watermark
%watermark -p pandas,numpy,pymc3,matplotlib,seaborn,theano
df = pd.read_csv('data/background_music.csv', dtype={'CondOfSubj':'category'})
# Mapping the condition descriptions to the condition codes. Just for illustrative purposes.
bgmusic = {0:'Das Kruschke', 1:'Mozart', 2:'Bach', 3:'Beethoven'}
df['CondText'] = df.CondOfSubj.cat.codes.map(bgmusic)
cond_idx = df.CondOfSubj.cat.codes.values
cond_codes = df.CondOfSubj.cat.categories
nCond = cond_codes.size
nSubj = df.index.size
df.info()
df.groupby('CondOfSubj').head(3)
# The means as mentioned in section 12.2.2
df.groupby('CondText', sort=False)['nCorrOfSubj'].mean()
with pm.Model() as model_1:
# constants
aP, bP = 1., 1.
# Pseudo- and true priors for model 1.
a0 = tt.as_tensor([.48*500, aP])
b0 = tt.as_tensor([(1-.48)*500, bP])
# True and pseudopriors for model 0
a = tt.as_tensor(np.c_[np.tile(aP, 4), [(.40*125), (.50*125), (.51*125), (.52*125)]])
b = tt.as_tensor(np.c_[np.tile(bP, 4), [(1-.40)*125, (1-.50)*125, (1-.51)*125, (1-.52)*125]])
# Prior on model index [0,1]
m_idx = pm.Categorical('m_idx', np.asarray([.5, .5]))
# Priors on concentration parameters
kappa_minus2 = pm.Gamma('kappa_minus2', 2.618, 0.0809, shape=nCond)
kappa = pm.Deterministic('kappa', kappa_minus2 +2)
# omega0
omega0 = pm.Beta('omega0', a0[m_idx], b0[m_idx])
# omega (condition specific)
omega = pm.Beta('omega', a[:,m_idx], b[:,m_idx], shape=nCond)
# Use condition specific omega when m_idx = 0, else omega0
aBeta = pm.math.switch(pm.math.eq(m_idx, 0), omega * (kappa-2)+1, omega0 * (kappa-2)+1)
bBeta = pm.math.switch(pm.math.eq(m_idx, 0), (1-omega) * (kappa-2)+1, (1-omega0) * (kappa-2)+1)
# Theta
theta = pm.Beta('theta', aBeta[cond_idx], bBeta[cond_idx], shape=nSubj)
# Likelihood
y = pm.Binomial('y', n=df.nTrlOfSubj.values, p=theta, observed=df.nCorrOfSubj)
pm.model_to_graphviz(model_1)
with model_1:
trace1 = pm.sample(5000, target_accept=.95)
pm.traceplot(trace1);
fig = plt.figure(figsize=(12,8))
# Define gridspec
gs = gridspec.GridSpec(3, 3)
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[0,2])
ax4 = plt.subplot(gs[1,0])
ax5 = plt.subplot(gs[1,1])
ax6 = plt.subplot(gs[1,2])
ax7 = plt.subplot(gs[2,:])
# Group the first six axes in a list for easier access in loop below
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
# Differences of posteriors to be displayed: omega x - omega y
x = [0,0,0,1,1,2]
y = [1,2,3,2,3,3]
# Plot histograms
for ax, a, b in zip(axes, x, y):
diff = trace1['omega'][:,a]-trace1['omega'][:,b]
pm.plot_posterior(diff, ref_val=0, point_estimate='mode', color=color, ax=ax)
ax.set_xlabel('$\omega_{}$ - $\omega_{}$'.format(a,b), fontdict={'size':18})
ax.xaxis.set_ticks([-.2, -.1, 0.0, 0.1, 0.2])
# Plot trace values of model index (0, 1)
ax7.plot(np.arange(1, len(trace1['m_idx'])+1),trace1['m_idx'], color=color, linewidth=4)
ax7.set_xlabel('Step in Markov chain', fontdict={'size':14})
ax7.set_ylabel('Model Index (0, 1)', fontdict={'size':14})
ax7.set_ylim(-0.05,1.05)
fig.tight_layout()
| 0.61832 | 0.927034 |
# Preprocess text
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_11a import *
```
## Data
We will use the IMDB dataset that consists of 50,000 labeled reviews of movies (positive or negative) and 50,000 unlabelled ones.
[Jump_to lesson 12 video](https://course19.fast.ai/videos/?lesson=12&t=4964)
```
path = untar_data(URLs.IMDB)
path.ls()
```
We define a subclass of `ItemList` that will read the texts in the corresponding filenames.
```
#export
def read_file(fn):
with open(fn, 'r', encoding = 'utf8') as f: return f.read()
class TextList(ItemList):
@classmethod
def from_files(cls, path, extensions='.txt', recurse=True, include=None, **kwargs):
return cls(get_files(path, extensions, recurse=recurse, include=include), path, **kwargs)
def get(self, i):
if isinstance(i, Path): return read_file(i)
return i
```
Just in case there are some text log files, we restrict the ones we take to the training, test, and unsupervised folders.
```
il = TextList.from_files(path, include=['train', 'test', 'unsup'])
```
We should expect a total of 100,000 texts.
```
len(il.items)
```
Here is the first one as an example.
```
txt = il[0]
txt
```
For text classification, we will split by the grand parent folder as before, but for language modeling, we take all the texts and just put 10% aside.
```
sd = SplitData.split_by_func(il, partial(random_splitter, p_valid=0.1))
sd
```
## Tokenizing
We need to tokenize the dataset first, which is splitting a sentence in individual tokens. Those tokens are the basic words or punctuation signs with a few tweaks: don't for instance is split between do and n't. We will use a processor for this, in conjunction with the [spacy library](https://spacy.io/).
[Jump_to lesson 12 video](https://course19.fast.ai/videos/?lesson=12&t=5070)
```
#export
import spacy,html
```
Before even tokenizeing, we will apply a bit of preprocessing on the texts to clean them up (we saw the one up there had some HTML code). These rules are applied before we split the sentences in tokens.
```
#export
#special tokens
UNK, PAD, BOS, EOS, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxrep xxwrep xxup xxmaj".split()
def sub_br(t):
"Replaces the <br /> by \n"
re_br = re.compile(r'<\s*br\s*/?>', re.IGNORECASE)
return re_br.sub("\n", t)
def spec_add_spaces(t):
"Add spaces around / and #"
return re.sub(r'([/#])', r' \1 ', t)
def rm_useless_spaces(t):
"Remove multiple spaces"
return re.sub(' {2,}', ' ', t)
def replace_rep(t):
"Replace repetitions at the character level: cccc -> TK_REP 4 c"
def _replace_rep(m:Collection[str]) -> str:
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
re_rep = re.compile(r'(\S)(\1{3,})')
return re_rep.sub(_replace_rep, t)
def replace_wrep(t):
"Replace word repetitions: word word word -> TK_WREP 3 word"
def _replace_wrep(m:Collection[str]) -> str:
c,cc = m.groups()
return f' {TK_WREP} {len(cc.split())+1} {c} '
re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})')
return re_wrep.sub(_replace_wrep, t)
def fixup_text(x):
"Various messy things we've seen in documents"
re1 = re.compile(r' +')
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
default_pre_rules = [fixup_text, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces, sub_br]
default_spec_tok = [UNK, PAD, BOS, EOS, TK_REP, TK_WREP, TK_UP, TK_MAJ]
replace_rep('cccc')
replace_wrep('word word word word word ')
```
These rules are applies after the tokenization on the list of tokens.
```
#export
def replace_all_caps(x):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
res = []
for t in x:
if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower())
else: res.append(t)
return res
def deal_caps(x):
"Replace all Capitalized tokens in by their lower version and add `TK_MAJ` before."
res = []
for t in x:
if t == '': continue
if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ)
res.append(t.lower())
return res
def add_eos_bos(x): return [BOS] + x + [EOS]
default_post_rules = [deal_caps, replace_all_caps, add_eos_bos]
replace_all_caps(['I', 'AM', 'SHOUTING'])
deal_caps(['My', 'name', 'is', 'Jeremy'])
```
Since tokenizing and applying those rules takes a bit of time, we'll parallelize it using `ProcessPoolExecutor` to go faster.
```
#export
from spacy.symbols import ORTH
from concurrent.futures import ProcessPoolExecutor
def parallel(func, arr, max_workers=4):
if max_workers<2: results = list(progress_bar(map(func, enumerate(arr)), total=len(arr)))
else:
with ProcessPoolExecutor(max_workers=max_workers) as ex:
return list(progress_bar(ex.map(func, enumerate(arr)), total=len(arr)))
if any([o is not None for o in results]): return results
#export
class TokenizeProcessor(Processor):
def __init__(self, lang="en", chunksize=2000, pre_rules=None, post_rules=None, max_workers=4):
self.chunksize,self.max_workers = chunksize,max_workers
self.tokenizer = spacy.blank(lang).tokenizer
for w in default_spec_tok:
self.tokenizer.add_special_case(w, [{ORTH: w}])
self.pre_rules = default_pre_rules if pre_rules is None else pre_rules
self.post_rules = default_post_rules if post_rules is None else post_rules
def proc_chunk(self, args):
i,chunk = args
chunk = [compose(t, self.pre_rules) for t in chunk]
docs = [[d.text for d in doc] for doc in self.tokenizer.pipe(chunk)]
docs = [compose(t, self.post_rules) for t in docs]
return docs
def __call__(self, items):
toks = []
if isinstance(items[0], Path): items = [read_file(i) for i in items]
chunks = [items[i: i+self.chunksize] for i in (range(0, len(items), self.chunksize))]
toks = parallel(self.proc_chunk, chunks, max_workers=self.max_workers)
return sum(toks, [])
# return [sum(toks)]
def proc1(self, item): return self.proc_chunk([item])[0]
def deprocess(self, toks): return [self.deproc1(tok) for tok in toks]
def deproc1(self, tok): return " ".join(tok)
tp = TokenizeProcessor()
txt[:250]
' • '.join(tp(il[:100])[0])[:400]
```
## Numericalizing
Once we have tokenized our texts, we replace each token by an individual number, this is called numericalizing. Again, we do this with a processor (not so different from the `CategoryProcessor`).
[Jump_to lesson 12 video](https://course19.fast.ai/videos/?lesson=12&t=5491)
```
#export
import collections
class NumericalizeProcessor(Processor):
def __init__(self, vocab=None, max_vocab=60000, min_freq=2):
self.vocab,self.max_vocab,self.min_freq = vocab,max_vocab,min_freq
def __call__(self, items):
#The vocab is defined on the first use.
if self.vocab is None:
freq = Counter(p for o in items for p in o)
# freq = Counter(o for o in items)
self.vocab = [o for o,c in freq.most_common(self.max_vocab) if c >= self.min_freq]
for o in reversed(default_spec_tok):
if o in self.vocab: self.vocab.remove(o)
self.vocab.insert(0, o)
if getattr(self, 'otoi', None) is None:
self.otoi = collections.defaultdict(int,{v:k for k,v in enumerate(self.vocab)})
return [self.proc1(o) for o in items]
def proc1(self, item): return [self.otoi[o] for o in item]
def deprocess(self, idxs):
assert self.vocab is not None
return [self.deproc1(idx) for idx in idxs]
def deproc1(self, idx): return [self.vocab[i] for i in idx]
```
When we do language modeling, we will infer the labels from the text during training, so there's no need to label. The training loop expects labels however, so we need to add dummy ones.
```
proc_tok,proc_num = TokenizeProcessor(max_workers=8),NumericalizeProcessor()
%time ll = label_by_func(sd, lambda x: 0, proc_x = [proc_tok,proc_num])
```
Once the items have been processed they will become list of numbers, we can still access the underlying raw data in `x_obj` (or `y_obj` for the targets, but we don't have any here).
```
ll.train.x_obj(0)
```
Since the preprocessing takes time, we save the intermediate result using pickle. Don't use any lambda functions in your processors or they won't be able to pickle.
```
pickle.dump(ll, open(path/'ld.pkl', 'wb'))
ll = pickle.load(open(path/'ld.pkl', 'rb'))
```
## Batching
We have a bit of work to convert our `LabelList` in a `DataBunch` as we don't just want batches of IMDB reviews. We want to stream through all the texts concatenated. We also have to prepare the targets that are the newt words in the text. All of this is done with the next object called `LM_PreLoader`. At the beginning of each epoch, it'll shuffle the articles (if `shuffle=True`) and create a big stream by concatenating all of them. We divide this big stream in `bs` smaller streams. That we will read in chunks of bptt length.
[Jump_to lesson 12 video](https://course19.fast.ai/videos/?lesson=12&t=5565)
```
# Just using those for illustration purposes, they're not used otherwise.
from IPython.display import display,HTML
import pandas as pd
```
Let's say our stream is:
```
stream = """
In this notebook, we will go back over the example of classifying movie reviews we studied in part 1 and dig deeper under the surface.
First we will look at the processing steps necessary to convert text into numbers and how to customize it. By doing this, we'll have another example of the Processor used in the data block API.
Then we will study how we build a language model and train it.\n
"""
tokens = np.array(tp([stream])[0])
```
Then if we split it in 6 batches it would give something like this:
```
bs,seq_len = 6,15
d_tokens = np.array([tokens[i*seq_len:(i+1)*seq_len] for i in range(bs)])
df = pd.DataFrame(d_tokens)
display(HTML(df.to_html(index=False,header=None)))
```
Then if we have a `bptt` of 5, we would go over those three batches.
```
bs,bptt = 6,5
for k in range(3):
d_tokens = np.array([tokens[i*seq_len + k*bptt:i*seq_len + (k+1)*bptt] for i in range(bs)])
df = pd.DataFrame(d_tokens)
display(HTML(df.to_html(index=False,header=None)))
#export
class LM_PreLoader():
def __init__(self, data, bs=64, bptt=70, shuffle=False):
self.data,self.bs,self.bptt,self.shuffle = data,bs,bptt,shuffle
total_len = sum([len(t) for t in data.x])
self.n_batch = total_len // bs
self.batchify()
def __len__(self): return ((self.n_batch-1) // self.bptt) * self.bs
def __getitem__(self, idx):
source = self.batched_data[idx % self.bs]
seq_idx = (idx // self.bs) * self.bptt
return source[seq_idx:seq_idx+self.bptt],source[seq_idx+1:seq_idx+self.bptt+1]
def batchify(self):
texts = self.data.x
if self.shuffle: texts = texts[torch.randperm(len(texts))]
stream = torch.cat([tensor(t) for t in texts])
self.batched_data = stream[:self.n_batch * self.bs].view(self.bs, self.n_batch)
dl = DataLoader(LM_PreLoader(ll.valid, shuffle=True), batch_size=64)
```
Let's check it all works ok: `x1`, `y1`, `x2` and `y2` should all be of size `bs` by `bptt`. The texts in each row of `x1` should continue in `x2`. `y1` and `y2` should have the same texts as their `x` counterpart, shifted of one position to the right.
```
iter_dl = iter(dl)
x1,y1 = next(iter_dl)
x2,y2 = next(iter_dl)
x1.size(),y1.size()
vocab = proc_num.vocab
" ".join(vocab[o] for o in x1[0])
" ".join(vocab[o] for o in y1[0])
" ".join(vocab[o] for o in x2[0])
```
And let's prepare some convenience function to do this quickly.
```
#export
def get_lm_dls(train_ds, valid_ds, bs, bptt, **kwargs):
return (DataLoader(LM_PreLoader(train_ds, bs, bptt, shuffle=True), batch_size=bs, **kwargs),
DataLoader(LM_PreLoader(valid_ds, bs, bptt, shuffle=False), batch_size=2*bs, **kwargs))
def lm_databunchify(sd, bs, bptt, **kwargs):
return DataBunch(*get_lm_dls(sd.train, sd.valid, bs, bptt, **kwargs))
bs,bptt = 64,70
data = lm_databunchify(ll, bs, bptt)
```
## Batching for classification
When we will want to tackle classification, gathering the data will be a bit different: first we will label our texts with the folder they come from, and then we will need to apply padding to batch them together. To avoid mixing very long texts with very short ones, we will also use `Sampler` to sort (with a bit of randomness for the training set) our samples by length.
First the data block API calls shold look familiar.
[Jump_to lesson 12 video](https://course19.fast.ai/videos/?lesson=12&t=5877)
```
proc_cat = CategoryProcessor()
il = TextList.from_files(path, include=['train', 'test'])
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='test'))
ll = label_by_func(sd, parent_labeler, proc_x = [proc_tok, proc_num], proc_y=proc_cat)
pickle.dump(ll, open(path/'ll_clas.pkl', 'wb'))
ll = pickle.load(open(path/'ll_clas.pkl', 'rb'))
```
Let's check the labels seem consistent with the texts.
```
[(ll.train.x_obj(i), ll.train.y_obj(i)) for i in [1,12552]]
```
We saw samplers in notebook 03. For the validation set, we will simply sort the samples by length, and we begin with the longest ones for memory reasons (it's better to always have the biggest tensors first).
```
#export
from torch.utils.data import Sampler
class SortSampler(Sampler):
def __init__(self, data_source, key): self.data_source,self.key = data_source,key
def __len__(self): return len(self.data_source)
def __iter__(self):
return iter(sorted(list(range(len(self.data_source))), key=self.key, reverse=True))
```
For the training set, we want some kind of randomness on top of this. So first, we shuffle the texts and build megabatches of size `50 * bs`. We sort those megabatches by length before splitting them in 50 minibatches. That way we will have randomized batches of roughly the same length.
Then we make sure to have the biggest batch first and shuffle the order of the other batches. We also make sure the last batch stays at the end because its size is probably lower than batch size.
```
#export
class SortishSampler(Sampler):
def __init__(self, data_source, key, bs):
self.data_source,self.key,self.bs = data_source,key,bs
def __len__(self) -> int: return len(self.data_source)
def __iter__(self):
idxs = torch.randperm(len(self.data_source))
megabatches = [idxs[i:i+self.bs*50] for i in range(0, len(idxs), self.bs*50)]
sorted_idx = torch.cat([tensor(sorted(s, key=self.key, reverse=True)) for s in megabatches])
batches = [sorted_idx[i:i+self.bs] for i in range(0, len(sorted_idx), self.bs)]
max_idx = torch.argmax(tensor([self.key(ck[0]) for ck in batches])) # find the chunk with the largest key,
batches[0],batches[max_idx] = batches[max_idx],batches[0] # then make sure it goes first.
batch_idxs = torch.randperm(len(batches)-2)
sorted_idx = torch.cat([batches[i+1] for i in batch_idxs]) if len(batches) > 1 else LongTensor([])
sorted_idx = torch.cat([batches[0], sorted_idx, batches[-1]])
return iter(sorted_idx)
```
Padding: we had the padding token (that as an id of 1) at the end of each sequence to make them all the same size when batching them. Note that we need padding at the end to be able to use `PyTorch` convenience functions that will let us ignore that padding (see 12c).
```
#export
def pad_collate(samples, pad_idx=1, pad_first=False):
max_len = max([len(s[0]) for s in samples])
res = torch.zeros(len(samples), max_len).long() + pad_idx
for i,s in enumerate(samples):
if pad_first: res[i, -len(s[0]):] = LongTensor(s[0])
else: res[i, :len(s[0]) ] = LongTensor(s[0])
return res, tensor([s[1] for s in samples])
bs = 64
train_sampler = SortishSampler(ll.train.x, key=lambda t: len(ll.train[int(t)][0]), bs=bs)
train_dl = DataLoader(ll.train, batch_size=bs, sampler=train_sampler, collate_fn=pad_collate)
iter_dl = iter(train_dl)
x,y = next(iter_dl)
lengths = []
for i in range(x.size(0)): lengths.append(x.size(1) - (x[i]==1).sum().item())
lengths[:5], lengths[-1]
```
The last one is the minimal length. This is the first batch so it has the longest sequence, but if look at the next one that is more random, we see lengths are roughly the sames.
```
x,y = next(iter_dl)
lengths = []
for i in range(x.size(0)): lengths.append(x.size(1) - (x[i]==1).sum().item())
lengths[:5], lengths[-1]
```
We can see the padding at the end:
```
x
```
And we add a convenience function:
```
#export
def get_clas_dls(train_ds, valid_ds, bs, **kwargs):
train_sampler = SortishSampler(train_ds.x, key=lambda t: len(train_ds.x[t]), bs=bs)
valid_sampler = SortSampler(valid_ds.x, key=lambda t: len(valid_ds.x[t]))
return (DataLoader(train_ds, batch_size=bs, sampler=train_sampler, collate_fn=pad_collate, **kwargs),
DataLoader(valid_ds, batch_size=bs*2, sampler=valid_sampler, collate_fn=pad_collate, **kwargs))
def clas_databunchify(sd, bs, **kwargs):
return DataBunch(*get_clas_dls(sd.train, sd.valid, bs, **kwargs))
bs,bptt = 64,70
data = clas_databunchify(ll, bs)
```
## Export
```
!python notebook2script.py 12_text.ipynb
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_11a import *
path = untar_data(URLs.IMDB)
path.ls()
#export
def read_file(fn):
with open(fn, 'r', encoding = 'utf8') as f: return f.read()
class TextList(ItemList):
@classmethod
def from_files(cls, path, extensions='.txt', recurse=True, include=None, **kwargs):
return cls(get_files(path, extensions, recurse=recurse, include=include), path, **kwargs)
def get(self, i):
if isinstance(i, Path): return read_file(i)
return i
il = TextList.from_files(path, include=['train', 'test', 'unsup'])
len(il.items)
txt = il[0]
txt
sd = SplitData.split_by_func(il, partial(random_splitter, p_valid=0.1))
sd
#export
import spacy,html
#export
#special tokens
UNK, PAD, BOS, EOS, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxrep xxwrep xxup xxmaj".split()
def sub_br(t):
"Replaces the <br /> by \n"
re_br = re.compile(r'<\s*br\s*/?>', re.IGNORECASE)
return re_br.sub("\n", t)
def spec_add_spaces(t):
"Add spaces around / and #"
return re.sub(r'([/#])', r' \1 ', t)
def rm_useless_spaces(t):
"Remove multiple spaces"
return re.sub(' {2,}', ' ', t)
def replace_rep(t):
"Replace repetitions at the character level: cccc -> TK_REP 4 c"
def _replace_rep(m:Collection[str]) -> str:
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
re_rep = re.compile(r'(\S)(\1{3,})')
return re_rep.sub(_replace_rep, t)
def replace_wrep(t):
"Replace word repetitions: word word word -> TK_WREP 3 word"
def _replace_wrep(m:Collection[str]) -> str:
c,cc = m.groups()
return f' {TK_WREP} {len(cc.split())+1} {c} '
re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})')
return re_wrep.sub(_replace_wrep, t)
def fixup_text(x):
"Various messy things we've seen in documents"
re1 = re.compile(r' +')
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
default_pre_rules = [fixup_text, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces, sub_br]
default_spec_tok = [UNK, PAD, BOS, EOS, TK_REP, TK_WREP, TK_UP, TK_MAJ]
replace_rep('cccc')
replace_wrep('word word word word word ')
#export
def replace_all_caps(x):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
res = []
for t in x:
if t.isupper() and len(t) > 1: res.append(TK_UP); res.append(t.lower())
else: res.append(t)
return res
def deal_caps(x):
"Replace all Capitalized tokens in by their lower version and add `TK_MAJ` before."
res = []
for t in x:
if t == '': continue
if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ)
res.append(t.lower())
return res
def add_eos_bos(x): return [BOS] + x + [EOS]
default_post_rules = [deal_caps, replace_all_caps, add_eos_bos]
replace_all_caps(['I', 'AM', 'SHOUTING'])
deal_caps(['My', 'name', 'is', 'Jeremy'])
#export
from spacy.symbols import ORTH
from concurrent.futures import ProcessPoolExecutor
def parallel(func, arr, max_workers=4):
if max_workers<2: results = list(progress_bar(map(func, enumerate(arr)), total=len(arr)))
else:
with ProcessPoolExecutor(max_workers=max_workers) as ex:
return list(progress_bar(ex.map(func, enumerate(arr)), total=len(arr)))
if any([o is not None for o in results]): return results
#export
class TokenizeProcessor(Processor):
def __init__(self, lang="en", chunksize=2000, pre_rules=None, post_rules=None, max_workers=4):
self.chunksize,self.max_workers = chunksize,max_workers
self.tokenizer = spacy.blank(lang).tokenizer
for w in default_spec_tok:
self.tokenizer.add_special_case(w, [{ORTH: w}])
self.pre_rules = default_pre_rules if pre_rules is None else pre_rules
self.post_rules = default_post_rules if post_rules is None else post_rules
def proc_chunk(self, args):
i,chunk = args
chunk = [compose(t, self.pre_rules) for t in chunk]
docs = [[d.text for d in doc] for doc in self.tokenizer.pipe(chunk)]
docs = [compose(t, self.post_rules) for t in docs]
return docs
def __call__(self, items):
toks = []
if isinstance(items[0], Path): items = [read_file(i) for i in items]
chunks = [items[i: i+self.chunksize] for i in (range(0, len(items), self.chunksize))]
toks = parallel(self.proc_chunk, chunks, max_workers=self.max_workers)
return sum(toks, [])
# return [sum(toks)]
def proc1(self, item): return self.proc_chunk([item])[0]
def deprocess(self, toks): return [self.deproc1(tok) for tok in toks]
def deproc1(self, tok): return " ".join(tok)
tp = TokenizeProcessor()
txt[:250]
' • '.join(tp(il[:100])[0])[:400]
#export
import collections
class NumericalizeProcessor(Processor):
def __init__(self, vocab=None, max_vocab=60000, min_freq=2):
self.vocab,self.max_vocab,self.min_freq = vocab,max_vocab,min_freq
def __call__(self, items):
#The vocab is defined on the first use.
if self.vocab is None:
freq = Counter(p for o in items for p in o)
# freq = Counter(o for o in items)
self.vocab = [o for o,c in freq.most_common(self.max_vocab) if c >= self.min_freq]
for o in reversed(default_spec_tok):
if o in self.vocab: self.vocab.remove(o)
self.vocab.insert(0, o)
if getattr(self, 'otoi', None) is None:
self.otoi = collections.defaultdict(int,{v:k for k,v in enumerate(self.vocab)})
return [self.proc1(o) for o in items]
def proc1(self, item): return [self.otoi[o] for o in item]
def deprocess(self, idxs):
assert self.vocab is not None
return [self.deproc1(idx) for idx in idxs]
def deproc1(self, idx): return [self.vocab[i] for i in idx]
proc_tok,proc_num = TokenizeProcessor(max_workers=8),NumericalizeProcessor()
%time ll = label_by_func(sd, lambda x: 0, proc_x = [proc_tok,proc_num])
ll.train.x_obj(0)
pickle.dump(ll, open(path/'ld.pkl', 'wb'))
ll = pickle.load(open(path/'ld.pkl', 'rb'))
# Just using those for illustration purposes, they're not used otherwise.
from IPython.display import display,HTML
import pandas as pd
stream = """
In this notebook, we will go back over the example of classifying movie reviews we studied in part 1 and dig deeper under the surface.
First we will look at the processing steps necessary to convert text into numbers and how to customize it. By doing this, we'll have another example of the Processor used in the data block API.
Then we will study how we build a language model and train it.\n
"""
tokens = np.array(tp([stream])[0])
bs,seq_len = 6,15
d_tokens = np.array([tokens[i*seq_len:(i+1)*seq_len] for i in range(bs)])
df = pd.DataFrame(d_tokens)
display(HTML(df.to_html(index=False,header=None)))
bs,bptt = 6,5
for k in range(3):
d_tokens = np.array([tokens[i*seq_len + k*bptt:i*seq_len + (k+1)*bptt] for i in range(bs)])
df = pd.DataFrame(d_tokens)
display(HTML(df.to_html(index=False,header=None)))
#export
class LM_PreLoader():
def __init__(self, data, bs=64, bptt=70, shuffle=False):
self.data,self.bs,self.bptt,self.shuffle = data,bs,bptt,shuffle
total_len = sum([len(t) for t in data.x])
self.n_batch = total_len // bs
self.batchify()
def __len__(self): return ((self.n_batch-1) // self.bptt) * self.bs
def __getitem__(self, idx):
source = self.batched_data[idx % self.bs]
seq_idx = (idx // self.bs) * self.bptt
return source[seq_idx:seq_idx+self.bptt],source[seq_idx+1:seq_idx+self.bptt+1]
def batchify(self):
texts = self.data.x
if self.shuffle: texts = texts[torch.randperm(len(texts))]
stream = torch.cat([tensor(t) for t in texts])
self.batched_data = stream[:self.n_batch * self.bs].view(self.bs, self.n_batch)
dl = DataLoader(LM_PreLoader(ll.valid, shuffle=True), batch_size=64)
iter_dl = iter(dl)
x1,y1 = next(iter_dl)
x2,y2 = next(iter_dl)
x1.size(),y1.size()
vocab = proc_num.vocab
" ".join(vocab[o] for o in x1[0])
" ".join(vocab[o] for o in y1[0])
" ".join(vocab[o] for o in x2[0])
#export
def get_lm_dls(train_ds, valid_ds, bs, bptt, **kwargs):
return (DataLoader(LM_PreLoader(train_ds, bs, bptt, shuffle=True), batch_size=bs, **kwargs),
DataLoader(LM_PreLoader(valid_ds, bs, bptt, shuffle=False), batch_size=2*bs, **kwargs))
def lm_databunchify(sd, bs, bptt, **kwargs):
return DataBunch(*get_lm_dls(sd.train, sd.valid, bs, bptt, **kwargs))
bs,bptt = 64,70
data = lm_databunchify(ll, bs, bptt)
proc_cat = CategoryProcessor()
il = TextList.from_files(path, include=['train', 'test'])
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='test'))
ll = label_by_func(sd, parent_labeler, proc_x = [proc_tok, proc_num], proc_y=proc_cat)
pickle.dump(ll, open(path/'ll_clas.pkl', 'wb'))
ll = pickle.load(open(path/'ll_clas.pkl', 'rb'))
[(ll.train.x_obj(i), ll.train.y_obj(i)) for i in [1,12552]]
#export
from torch.utils.data import Sampler
class SortSampler(Sampler):
def __init__(self, data_source, key): self.data_source,self.key = data_source,key
def __len__(self): return len(self.data_source)
def __iter__(self):
return iter(sorted(list(range(len(self.data_source))), key=self.key, reverse=True))
#export
class SortishSampler(Sampler):
def __init__(self, data_source, key, bs):
self.data_source,self.key,self.bs = data_source,key,bs
def __len__(self) -> int: return len(self.data_source)
def __iter__(self):
idxs = torch.randperm(len(self.data_source))
megabatches = [idxs[i:i+self.bs*50] for i in range(0, len(idxs), self.bs*50)]
sorted_idx = torch.cat([tensor(sorted(s, key=self.key, reverse=True)) for s in megabatches])
batches = [sorted_idx[i:i+self.bs] for i in range(0, len(sorted_idx), self.bs)]
max_idx = torch.argmax(tensor([self.key(ck[0]) for ck in batches])) # find the chunk with the largest key,
batches[0],batches[max_idx] = batches[max_idx],batches[0] # then make sure it goes first.
batch_idxs = torch.randperm(len(batches)-2)
sorted_idx = torch.cat([batches[i+1] for i in batch_idxs]) if len(batches) > 1 else LongTensor([])
sorted_idx = torch.cat([batches[0], sorted_idx, batches[-1]])
return iter(sorted_idx)
#export
def pad_collate(samples, pad_idx=1, pad_first=False):
max_len = max([len(s[0]) for s in samples])
res = torch.zeros(len(samples), max_len).long() + pad_idx
for i,s in enumerate(samples):
if pad_first: res[i, -len(s[0]):] = LongTensor(s[0])
else: res[i, :len(s[0]) ] = LongTensor(s[0])
return res, tensor([s[1] for s in samples])
bs = 64
train_sampler = SortishSampler(ll.train.x, key=lambda t: len(ll.train[int(t)][0]), bs=bs)
train_dl = DataLoader(ll.train, batch_size=bs, sampler=train_sampler, collate_fn=pad_collate)
iter_dl = iter(train_dl)
x,y = next(iter_dl)
lengths = []
for i in range(x.size(0)): lengths.append(x.size(1) - (x[i]==1).sum().item())
lengths[:5], lengths[-1]
x,y = next(iter_dl)
lengths = []
for i in range(x.size(0)): lengths.append(x.size(1) - (x[i]==1).sum().item())
lengths[:5], lengths[-1]
x
#export
def get_clas_dls(train_ds, valid_ds, bs, **kwargs):
train_sampler = SortishSampler(train_ds.x, key=lambda t: len(train_ds.x[t]), bs=bs)
valid_sampler = SortSampler(valid_ds.x, key=lambda t: len(valid_ds.x[t]))
return (DataLoader(train_ds, batch_size=bs, sampler=train_sampler, collate_fn=pad_collate, **kwargs),
DataLoader(valid_ds, batch_size=bs*2, sampler=valid_sampler, collate_fn=pad_collate, **kwargs))
def clas_databunchify(sd, bs, **kwargs):
return DataBunch(*get_clas_dls(sd.train, sd.valid, bs, **kwargs))
bs,bptt = 64,70
data = clas_databunchify(ll, bs)
!python notebook2script.py 12_text.ipynb
| 0.425009 | 0.878835 |
```
import keras
keras.__version__
```
# 5.2 - 소규모 데이터셋에서 컨브넷 사용하기
이 노트북은 [케라스 창시자에게 배우는 딥러닝](https://tensorflow.blog/%EC%BC%80%EB%9D%BC%EC%8A%A4-%EB%94%A5%EB%9F%AC%EB%8B%9D/) 책의 5장 2절의 코드 예제입니다. 책에는 더 많은 내용과 그림이 있습니다. 이 노트북에는 소스 코드에 관련된 설명만 포함합니다.
## 소규모 데이터셋에서 밑바닥부터 컨브넷을 훈련하기
매우 적은 데이터를 사용해 이미지 분류 모델을 훈련하는 일은 흔한 경우입니다. 여러분이 전문적인 컴퓨터 비전 작업을 한다면 실제로 이런 상황을 마주치게 될 가능성이 높습니다.
보통 '적은' 샘플이란 수백 개에서 수만 개 사이를 의미합니다. 실용적인 예제로 4,000개의 강아지와 고양이 사진(2,000개는 강아지, 2,000개는 고양이)으로 구성된 데이터셋에서 강아지와 고양이 이미지를 분류해 보겠습니다. 훈련을 위해 2,000개의 사진을 사용하고 검증과 테스트에 각각 1,000개의 사진을 사용하겠습니다.
이 절에서 문제를 해결하기 위해 기본적인 전략 하나를 살펴볼 것입니다. 보유한 소규모 데이터셋을 사용해 처음부터 새로운 모델을 훈련하는 것입니다. 2,000개의 훈련 샘플에서 작은 컨브넷을 어떤 규제 방법도 사용하지 않고 훈련하여 기준이 되는 기본 성능을 만들겠습니다. 이 방법은 71%의 분류 정확도를 달성할 것입니다. 이 방법의 주요 이슈는 과대적합이 될 것입니다. 그다음 컴퓨터 비전에서 과대적합을 줄이기 위한 강력한 방법인 데이터 증식을 소개하겠습니다. 데이터 증식을 통해 네트워크의 성능을 82% 정확도로 향상시킬 것입니다.
다음 절에서 작은 데이터셋에 딥러닝을 적용하기 위한 핵심적인 기술 두 가지를 살펴보겠습니다. 사전 훈련된 네트워크로 특성을 추출하는 것(90%에서 96%의 정확도를 얻게 됩니다)과 사전 훈련된 네트워크를 세밀하게 튜닝하는 것입니다(최종 모델은 97% 정확도를 얻을 것입니다). 이런 세 가지 전략(처음부터 작은 모델을 훈련하기, 사전 훈련된 모델을 사용해 특성 추출하기, 사전 훈련된 모델을 세밀하게 튜닝하기)은 작은 데이터셋에서 이미지 분류 문제를 수행할 때 여러분의 도구 상자에 포함되어 있어야 합니다.
## 작은 데이터셋 문제에서 딥러닝의 타당성
딥러닝은 데이터가 풍부할 때만 작동한다는 말을 이따금 듣습니다. 부분적으로는 맞습니다. 딥러닝의 근본적인 특징은 훈련 데이터에서 특성 공학의 수작업 없이 흥미로운 특성을 찾을 수 있는 것입니다. 이는 훈련 샘플이 많아야만 가능합니다. 입력 샘플이 이미지와 같이 매우 고차원인 문제에서는 특히 그렇습니다.
하지만 많은 샘플이 의미하는 것은 상대적입니다. 우선 훈련하려는 네트워크의 크기와 깊이에 상대적입니다. 복잡한 문제를 푸는 컨브넷을 수십 개의 샘플만을 사용해서 훈련하는 것은 불가능합니다. 하지만 모델이 작고 규제가 잘 되어 있으며 간단한 작업이라면 수백 개의 샘플로도 충분할 수 있습니다. 컨브넷은 지역적이고 평행 이동으로 변하지 않는 특성을 학습하기 때문에 지각에 관한 문제에서 매우 효율적으로 데이터를 사용합니다. 매우 작은 이미지 데이터셋에서 어떤 종류의 특성 공학을 사용하지 않고 컨브넷을 처음부터 훈련해도 납득할 만한 결과를 만들 수 있습니다. 이 절에서 실제로 이런 결과를 보게 될 것입니다.
거기에 더해 딥러닝 모델은 태생적으로 매우 다목적입니다. 말하자면 대규모 데이터셋에서 훈련시킨 이미지 분류 모델이나 스피치-투-텍스트 모델을 조금만 변경해서 완전히 다른 문제에 재사용할 수 있습니다. 특히 컴퓨터 비전에서는 (보통 ImageNet 데이터셋에서 훈련된) 사전 훈련된 모델들이 다운로드받을 수 있도록 많이 공개되어 있어서 매우 적은 데이터에서 강력한 비전 모델을 만드는데 사용할 수 있습니다. 바로 다음 절에서 우리가 해볼 것입니다.
먼저 데이터를 구하는 것부터 시작해 보죠.
## 데이터 내려받기
여기서 사용할 강아지 vs. 고양이 데이터셋은 케라스에 포함되어 있지 않습니다. 컨브넷이 주류가 되기 전인 2013년 후반에 캐글에서 컴퓨터 비전 경연 대회의 일환으로 이 데이터셋을 만들었습니다. 원본 데이터셋을 `https://www.kaggle.com/c/dogs-vs-cats/data`에서 내려받을 수 있습니다(캐글 계정이 없다면 하나 만들어야 하지만 계정을 만드는 과정은 간단합니다). 실습의 편의를 위해서 번역서의 깃허브에는 이 데이터셋을 미리 다운로드하여 포함하였습니다.
이 사진들은 중간 정도의 해상도를 가진 컬러 JPEG 파일입니다. 다음이 몇 개 샘플입니다:

당연히 2013년 강아지 vs. 고양이 캐글 경연은 컨브넷을 사용한 참가자가 우승하였습니다. 최고 성능은 95%의 정확도를 달성했습니다. 이 예제를 가지고 (다음 절에서) 참가자들이 사용했던 데이터의 10%보다 적은 양으로 모델을 훈련하고도 이와 아주 근접한 정확도를 달성해 보겠습니다.
이 데이터셋은 25,000개의 강아지와 고양이 이미지(클래스마다 12,500개)를 담고 있고 (압축해서) 543MB 크기입니다. 다운로드하고 압축을 해제한 후 세 개의 서브셋이 들어 있는 새로운 데이터셋을 만들 것입니다. 클래스마다 1,000개의 샘플로 이루어진 훈련 세트, 클래스마다 500개의 샘플로 이루어진 검증 세트, 클래스마다 500개의 샘플로 이루어진 테스트 세트입니다.
다음은 이를 처리하는 코드입니다:
```
import os, shutil
# 원본 데이터셋을 압축 해제한 디렉터리 경로
original_dataset_dir = './datasets/cats_and_dogs/train'
# 소규모 데이터셋을 저장할 디렉터리
base_dir = './datasets/cats_and_dogs_small'
if os.path.exists(base_dir): # 반복적인 실행을 위해 디렉토리를 삭제합니다.
shutil.rmtree(base_dir) # 이 코드는 책에 포함되어 있지 않습니다.
os.mkdir(base_dir)
# 훈련, 검증, 테스트 분할을 위한 디렉터리
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# 훈련용 고양이 사진 디렉터리
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# 훈련용 강아지 사진 디렉터리
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# 검증용 고양이 사진 디렉터리
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
# 검증용 강아지 사진 디렉터리
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
# 테스트용 고양이 사진 디렉터리
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# 테스트용 강아지 사진 디렉터리
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# 처음 1,000개의 고양이 이미지를 train_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 validation_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 test_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# 처음 1,000개의 강아지 이미지를 train_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 validation_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 test_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
```
복사가 잘 되었는지 확인하기 위해 각 분할(훈련/검증/테스트)에 들어 있는 사진의 개수를 카운트해 보죠:
```
print('훈련용 고양이 이미지 전체 개수:', len(os.listdir(train_cats_dir)))
print('훈련용 강아지 이미지 전체 개수:', len(os.listdir(train_dogs_dir)))
print('검증용 고양이 이미지 전체 개수:', len(os.listdir(validation_cats_dir)))
print('검증용 강아지 이미지 전체 개수:', len(os.listdir(validation_dogs_dir)))
print('테스트용 고양이 이미지 전체 개수:', len(os.listdir(test_cats_dir)))
print('테스트용 강아지 이미지 전체 개수:', len(os.listdir(test_dogs_dir)))
```
이제 2,000개의 훈련 이미지, 1,000개의 검증 이미지, 1,000개의 테스트 이미지가 준비되었습니다. 분할된 각 데이터는 클래마다 동일한 개수의 샘플을 포함합니다. 균형잡힌 이진 분류 문제이므로 정확도를 사용해 성공을 측정하겠습니다.
## 네트워크 구성하기
이전 예제에서 MNIST를 위해 간단한 컨브넷을 만들었습니다. 이제 컨브넷에 친숙해졌을 것입니다. 여기서 사용할 구조도 일반적으로 동일합니다. `Conv2D`(`relu` 활성화 함수 사용)와 `MaxPooling2D` 층을 번갈아 쌓은 컨브넷을 만들겠습니다.
이전보다 이미지가 크고 복잡한 문제이기 때문에 네트워크를 좀 더 크게 만들겠습니다. `Conv2D` + `MaxPooling2D` 단계를 하나 더 추가합니다. 이렇게 하면 네트워크의 용량을 늘리고 `Flatten` 층의 크기가 너무 커지지 않도록 특성 맵의 크기를 줄일 수 있습니다. 150 × 150 크기(임의로 선택한 것입니다)의 입력으로 시작해서 `Flatten` 층 이전에 7 × 7 크기의 특성 맵으로 줄어듭니다.
특성 맵의 깊이는 네트워크에서 점진적으로 증가하지만(32에서 128까지), 특성 맵의 크기는 감소합니다(150 × 150에서 7 × 7까지). 이는 거의 모든 컨브넷에서 볼 수 있는 전형적인 패턴입니다.
이진 분류 문제이므로 네트워크는 하나의 유닛(크기가 1인 `Dense` 층)과 `sigmoid` 활성화 함수로 끝납니다. 이 유닛은 한 클래스에 대한 확률을 인코딩할 것입니다.
```
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
```
층들을 거치면서 특성 맵의 차원이 어떻게 변하는지 살펴보겠습니다:
```
model.summary()
```
컴파일 단계에서 이전과 같이 `RMSprop` 옵티마이저를 선택하겠습니다. 네트워크의 마지막이 하나의 시그모이드 유닛이기 때문에 이진 크로스엔트로피(binary crossentropy)를 손실로 사용합니다(4장 5절에서 다양한 경우에 사용할 수 있는 손실 함수 목록을 볼 수 있습니다).
```
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
```
## 데이터 전처리
데이터는 네트워크에 주입되기 전에 부동 소수 타입의 텐서로 적절하게 전처리되어 있어야 합니다. 지금은 데이터가 JPEG 파일로 되어 있으므로 네트워크에 주입하려면 대략 다음 과정을 따릅니다.
1. 사진 파일을 읽습니다.
2. JPEG 콘텐츠를 RGB 픽셀 값으로 디코딩합니다.
3. 그다음 부동 소수 타입의 텐서로 변환합니다.
4. 픽셀 값(0에서 255 사이)의 스케일을 [0, 1] 사이로 조정합니다(신경망은 작은 입력 값을 선호합니다).
좀 복잡하게 보일 수 있지만 다행히 케라스는 이런 단계를 자동으로 처리하는 유틸리티를 가지고 있습니다. 케라스는 `keras.preprocessing.image`에 이미지 처리를 위한 헬퍼 도구들을 가지고 있습니다. 특히 `ImageDataGenerator` 클래스는 디스크에 있는 이미지 파일을 전처리된 배치 텐서로 자동으로 바꾸어주는 파이썬 제너레이터를 만들어 줍니다. 이 클래스를 사용해 보겠습니다.
```
from keras.preprocessing.image import ImageDataGenerator
# 모든 이미지를 1/255로 스케일을 조정합니다
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지를 150 × 150 크기로 바꿉니다
target_size=(150, 150),
batch_size=20,
# binary_crossentropy 손실을 사용하기 때문에 이진 레이블이 필요합니다
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
```
이 제너레이터의 출력 하나를 살펴보죠. 이 출력은 150 × 150 RGB 이미지의 배치(`(20, 150, 150, 3)` 크기)와 이진 레이블의 배치(`(20,)` 크기)입니다. 각 배치에는 20개의 샘플(배치 크기)이 있습니다. 제너레이터는 이 배치를 무한정 만들어 냅니다. 타깃 폴더에 있는 이미지를 끝없이 반복합니다. 따라서 반복 루프안의 어디에선가 `break` 문을 사용해야 합니다.
```
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
break
```
제너레이터를 사용한 데이터에 모델을 훈련시켜 보겠습니다. `fit_generator` 메서드는 `fit` 메서드와 동일하되 데이터 제너레이터를 사용할 수 있습니다. 이 메서드는 첫 번째 매개변수로 입력과 타깃의 배치를 끝없이 반환하는 파이썬 제너레이터를 기대합니다. 데이터가 끝없이 생성되기 때문에 케라스 모델에 하나의 에포크를 정의하기 위해 제너레이터로부터 얼마나 많은 샘플을 뽑을 것인지 알려 주어야 합니다. `steps_per_epoch` 매개변수에서 이를 설정합니다. 제너레이터로부터 `steps_per_epoch` 개의 배치만큼 뽑은 다음, 즉 `steps_per_epoch` 횟수만큼 경사 하강법 단계를 실행한 다음에 훈련 프로세스는 다음 에포크로 넘어갑니다. 여기서는 20개의 샘플이 하나의 배치이므로 2,000개의 샘플을 모두 처리할 때까지 100개의 배치를 뽑을 것입니다.
`fit_generator`를 사용할 때 `fit` 메서드와 마찬가지로 `validation_data` 매개변수를 전달할 수 있습니다. 이 매개변수에는 데이터 제너레이터도 가능하지만 넘파이 배열의 튜플도 가능합니다. `validation_data`로 제너레이터를 전달하면 검증 데이터의 배치를 끝없이 반환합니다. 따라서 검증 데이터 제너레이터에서 얼마나 많은 배치를 추출하여 평가할지 `validation_steps` 매개변수에 지정해야 합니다.
```
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
```
훈련이 끝나면 항상 모델을 저장하는 것이 좋은 습관입니다:
```
model.save('cats_and_dogs_small_1.h5')
```
훈련 데이터와 검증 데이터에 대한 모델의 손실과 정확도를 그래프로 나타내 보겠습니다:
```
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
이 그래프는 과대적합의 특성을 보여줍니다. 훈련 정확도가 시간이 지남에 따라 선형적으로 증가해서 거의 100%에 도달합니다. 반면 검증 정확도는 70-72%에서 멈추었습니다. 검증 손실은 다섯 번의 에포크만에 최솟값에 다다른 이후에 더 이상 진전되지 않았습니다. 반면 훈련 손실은 거의 0에 도달할 때까지 선형적으로 계속 감소합니다.
비교적 훈련 샘플의 수(2,000개)가 적기 때문에 과대적합이 가장 중요한 문제입니다. 드롭아웃이나 가중치 감소(L2 규제)와 같은 과대적합을 감소시킬 수 있는 여러 가지 기법들을 배웠습니다. 여기에서는 컴퓨터 비전에 특화되어 있어서 딥러닝으로 이미지를 다룰 때 매우 일반적으로 사용되는 새로운 방법인 데이터 증식을 시도해 보겠습니다.
## 데이터 증식 사용하기
과대적합은 학습할 샘플이 너무 적어 새로운 데이터에 일반화할 수 있는 모델을 훈련시킬 수 없기 때문에 발생합니다. 무한히 많은 데이터가 주어지면 데이터 분포의 모든 가능한 측면을 모델이 학습할 수 있을 것입니다. 데이터 증식은 기존의 훈련 샘플로부터 더 많은 훈련 데이터를 생성하는 방법입니다. 이 방법은 그럴듯한 이미지를 생성하도록 여러 가지 랜덤한 변환을 적용하여 샘플을 늘립니다. 훈련 시에 모델이 정확히 같은 데이터를 두 번 만나지 않도록 하는 것이 목표입니다. 모델이 데이터의 여러 측면을 학습하면 일반화에 도움이 될 것입니다.
케라스에서는 `ImageDataGenerator`가 읽은 이미지에 여러 종류의 랜덤 변환을 적용하도록 설정할 수 있습니다. 예제를 먼저 만들어 보죠:
```
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
```
추가적인 매개변수가 몇 개 더 있습니다(케라스 문서를 참고하세요). 이 코드를 간단히 살펴보죠.
* `rotation_range`는 랜덤하게 사진을 회전시킬 각도 범위입니다(0-180 사이).
* `width_shift_range`와 `height_shift_range`는 사진을 수평과 수직으로 랜덤하게 평행 이동시킬 범위입니다(전체 넓이와 높이에 대한 비율).
* `shear_range`는 랜덤하게 전단 변환을 적용할 각도 범위입니다.
* `zoom_range`는 랜덤하게 사진을 확대할 범위입니다.
* `horizontal_flip`은 랜덤하게 이미지를 수평으로 뒤집습니다. 수평 대칭을 가정할 수 있을 때 사용합니다(예를 들어, 풍경/인물 사진).
* `fill_mode`는 회전이나 가로/세로 이동으로 인해 새롭게 생성해야 할 픽셀을 채울 전략입니다.
증식된 이미지 샘플을 살펴보죠:
```
# 이미지 전처리 유틸리티 모듈
from keras.preprocessing import image
fnames = sorted([os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)])
# 증식할 이미지 선택합니다
img_path = fnames[3]
# 이미지를 읽고 크기를 변경합니다
img = image.load_img(img_path, target_size=(150, 150))
# (150, 150, 3) 크기의 넘파이 배열로 변환합니다
x = image.img_to_array(img)
# (1, 150, 150, 3) 크기로 변환합니다
x = x.reshape((1,) + x.shape)
# flow() 메서드는 랜덤하게 변환된 이미지의 배치를 생성합니다.
# 무한 반복되기 때문에 어느 지점에서 중지해야 합니다!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
```
데이터 증식을 사용하여 새로운 네트워크를 훈련시킬 때 네트워크에 같은 입력 데이터가 두 번 주입되지 않습니다. 하지만 적은 수의 원본 이미지에서 만들어졌기 때문에 여전히 입력 데이터들 사이에 상호 연관성이 큽니다. 즉, 새로운 정보를 만들어낼 수 없고 단지 기존 정보의 재조합만 가능합니다. 그렇기 때문에 완전히 과대적합을 제거하기에 충분하지 않을 수 있습니다. 과대적합을 더 억제하기 위해 완전 연결 분류기 직전에 `Dropout` 층을 추가하겠습니다:
```
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
```
데이터 증식과 드롭아웃을 사용하여 이 네트워크를 훈련시켜 봅시다:
```
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# 검증 데이터는 증식되어서는 안 됩니다!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지를 150 × 150 크기로 바꿉니다
target_size=(150, 150),
batch_size=32,
# binary_crossentropy 손실을 사용하기 때문에 이진 레이블을 만들어야 합니다
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
```
다음 절에서 이 모델을 사용하기 위해 모델을 저장합니다.
```
model.save('cats_and_dogs_small_2.h5')
```
결과 그래프를 다시 그려 보죠:
```
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
그림 5-12와 5-13을 참고하세요. 데이터 증식과 드롭아웃 덕택에 더이상 과대적합되지 않습니다. 훈련 곡선이 검증 곡선에 가깝게 따라가고 있습니다. 검증 데이터에서 82% 정확도를 달성하였습니다. 규제하지 않은 모델과 비교했을 때 15% 정도 향상되었습니다.
다른 규제 기법을 더 사용하고 네트워크의 파라미터를 튜닝하면(합성곱 층의 필터 수나 네트워크의 층의 수 등) 86%나 87% 정도까지 더 높은 정확도를 얻을 수도 있습니다. 하지만 데이터가 적기 때문에 컨브넷을 처음부터 훈련해서 더 높은 정확도를 달성하기는 어렵습니다. 이런 상황에서 정확도를 높이기 위한 다음 단계는 사전 훈련된 모델을 사용하는 것입니다. 다음 두 절에서 이에 대해 집중적으로 살펴보겠습니다.
|
github_jupyter
|
import keras
keras.__version__
import os, shutil
# 원본 데이터셋을 압축 해제한 디렉터리 경로
original_dataset_dir = './datasets/cats_and_dogs/train'
# 소규모 데이터셋을 저장할 디렉터리
base_dir = './datasets/cats_and_dogs_small'
if os.path.exists(base_dir): # 반복적인 실행을 위해 디렉토리를 삭제합니다.
shutil.rmtree(base_dir) # 이 코드는 책에 포함되어 있지 않습니다.
os.mkdir(base_dir)
# 훈련, 검증, 테스트 분할을 위한 디렉터리
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# 훈련용 고양이 사진 디렉터리
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# 훈련용 강아지 사진 디렉터리
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# 검증용 고양이 사진 디렉터리
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
# 검증용 강아지 사진 디렉터리
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
# 테스트용 고양이 사진 디렉터리
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# 테스트용 강아지 사진 디렉터리
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# 처음 1,000개의 고양이 이미지를 train_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 validation_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 test_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# 처음 1,000개의 강아지 이미지를 train_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 validation_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 test_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('훈련용 고양이 이미지 전체 개수:', len(os.listdir(train_cats_dir)))
print('훈련용 강아지 이미지 전체 개수:', len(os.listdir(train_dogs_dir)))
print('검증용 고양이 이미지 전체 개수:', len(os.listdir(validation_cats_dir)))
print('검증용 강아지 이미지 전체 개수:', len(os.listdir(validation_dogs_dir)))
print('테스트용 고양이 이미지 전체 개수:', len(os.listdir(test_cats_dir)))
print('테스트용 강아지 이미지 전체 개수:', len(os.listdir(test_dogs_dir)))
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
from keras.preprocessing.image import ImageDataGenerator
# 모든 이미지를 1/255로 스케일을 조정합니다
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지를 150 × 150 크기로 바꿉니다
target_size=(150, 150),
batch_size=20,
# binary_crossentropy 손실을 사용하기 때문에 이진 레이블이 필요합니다
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
break
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_1.h5')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# 이미지 전처리 유틸리티 모듈
from keras.preprocessing import image
fnames = sorted([os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)])
# 증식할 이미지 선택합니다
img_path = fnames[3]
# 이미지를 읽고 크기를 변경합니다
img = image.load_img(img_path, target_size=(150, 150))
# (150, 150, 3) 크기의 넘파이 배열로 변환합니다
x = image.img_to_array(img)
# (1, 150, 150, 3) 크기로 변환합니다
x = x.reshape((1,) + x.shape)
# flow() 메서드는 랜덤하게 변환된 이미지의 배치를 생성합니다.
# 무한 반복되기 때문에 어느 지점에서 중지해야 합니다!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# 검증 데이터는 증식되어서는 안 됩니다!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지를 150 × 150 크기로 바꿉니다
target_size=(150, 150),
batch_size=32,
# binary_crossentropy 손실을 사용하기 때문에 이진 레이블을 만들어야 합니다
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_2.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| 0.36886 | 0.911653 |
# TensorFlow Semi-supervised Object Detection Architecture (TSODA)
Welcome to this project, I'll explain to you the necessary steps to run this application and train your own semi-supervised model.
If you forgot something, here is the original tutorial: https://medium.com/p/757b9c88f270/edit.
Also check the GitHub repository: https://github.com/AlvaroCavalcante/tf-models.
# Initial configurations
Here you can find more models if you don't want to use SSD: [Tensorflow detection model zoo: COCO-trained models](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md#coco-trained-models), and here the respective configuration files: [object_detection/samples/configs/](https://github.com/tensorflow/models/tree/master/research/object_detection/samples/configs).
```
repo_url = 'https://github.com/AlvaroCavalcante/tf-models' # replace by your repo
MODEL = 'ssd_inception_v2_coco_2018_01_28' # replace by the model you want use
pipeline_file = 'ssd_inception_v2_coco.config'
# Model hyperparameters
num_steps = 2500
num_eval_steps = 50
batch_size = 16
```
## Clone your repository which has your images and scripts!
```
import os
%cd /content
repo_dir_path = os.path.abspath(os.path.join('.', os.path.basename(repo_url)))
!git clone {repo_url}
%cd {repo_dir_path}
!git pull
```
## Installing the requirements
```
%cd /content
!apt-get install -qq protobuf-compiler python-pil python-lxml python-tk
!pip install -q Cython contextlib2 pillow lxml matplotlib
!pip install -q pycocotools
!pip install gast==0.3.3
!pip install tf_slim
!pip install virtualenv
%cd /content/tf-models/research
!protoc object_detection/protos/*.proto --python_out=.
import shutil
import os
os.environ['PYTHONPATH'] += ':/content/tf-models/research/:/content/tf-models/research/slim/'
!python object_detection/builders/model_builder_test.py
!pip install tensorflow-object-detection-api
```
# Creating the necessary methods
These methods will be used in the iteration of the semi-supervised model.
## Preparing the **tfrecord** files.
The **generate_tfrecord()** method is responsable to convert the JPG + XML files to CSV and then to TF record, but befone is checked if the record files already exists and delete the files if condition is True.
This is necessary because this method is called in each iteration, generating a new version of TF record with the labeled images, avoiding any possible error.
```
def delete_files():
path = '/content/tf-models/research/object_detection/'
files = ['train.record', 'test.record', 'train_labels.csv', 'test_labels.csv']
for current_file in files:
os.remove(path + current_file)
print('Deleted files')
def generate_tfrecord(repo_dir_path):
%cd {repo_dir_path}
if os.path.isfile('/content/tf-models/research/object_detection/train.record'):
delete_files()
!python research/object_detection/xml_to_csv.py -i research/object_detection/train_images -o research/object_detection/train_labels.csv -l research/object_detection/label_map
!python research/object_detection/xml_to_csv.py -i research/object_detection/test_images -o research/object_detection/test_labels.csv
!python research/object_detection/generate_tfrecord.py --csv_input=research/object_detection/train_labels.csv --output_path=research/object_detection/train.record --img_path=research/object_detection/train_images --label_map research/object_detection/label_map/label_map.pbtxt
!python research/object_detection/generate_tfrecord.py --csv_input=research/object_detection/test_labels.csv --output_path=research/object_detection/test.record --img_path=research/object_detection/test_images --label_map research/object_detection/label_map/label_map.pbtxt
```
Call the method first time and define the paths
```
generate_tfrecord(repo_dir_path)
test_record_fname = '/content/tf-models/research/object_detection/test.record'
train_record_fname = '/content/tf-models/research/object_detection/train.record'
label_map_pbtxt_fname = '/content/tf-models/research/object_detection/label_map/label_map.pbtxt'
tf_log_path = '/content/tf-models/research/object_detection/events.out.tfevents.1576020673.0083b462c1a8'
```
## Download base model for transfer learning
Transfer learning is used based on a pre-trained model in COCO dataset!
```
%cd /content/tf-models/research
import os
import shutil
import glob
import urllib.request
import tarfile
MODEL_FILE = MODEL + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
DEST_DIR = '/content/tf-models/research/pretrained_model' #saved here
if not (os.path.exists(MODEL_FILE)):
urllib.request.urlretrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar = tarfile.open(MODEL_FILE)
tar.extractall()
tar.close()
os.remove(MODEL_FILE)
if (os.path.exists(DEST_DIR)):
shutil.rmtree(DEST_DIR)
os.rename(MODEL, DEST_DIR)
```
## Configuring the training pipeline
The method **change_pipeline()** will be called to update the parameters of the model configuration file. In practice, it's necessary to update the total number of epochs and the checkpoint path, once just in the first iteration we'll use the downloaded model, as in the next ones our own treined model checkpoint will be updated during training.
```
import os
pipeline_fname = os.path.join('/content/tf-models/research/object_detection/samples/configs/', pipeline_file)
fine_tune_checkpoint = os.path.join(DEST_DIR, "model.ckpt")
assert os.path.isfile(pipeline_fname), '`{}` not exist'.format(pipeline_fname)
def get_num_classes(pbtxt_fname):
from object_detection.utils import label_map_util
label_map = label_map_util.load_labelmap(pbtxt_fname)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
print(categories)
category_index = label_map_util.create_category_index(categories)
print(category_index)
return len(category_index.keys())
def change_pipeline():
import re
num_classes = get_num_classes(label_map_pbtxt_fname)
with open(pipeline_fname) as f:
s = f.read()
with open(pipeline_fname, 'w') as f:
# fine_tune_checkpoint
s = re.sub('fine_tune_checkpoint: ".*?"',
'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s)
# tfrecord files train and test.
s = re.sub(
'(input_path: ".*?)(train.record)(.*?")', 'input_path: "{}"'.format(train_record_fname), s)
s = re.sub(
'(input_path: ".*?)(val.record)(.*?")', 'input_path: "{}"'.format(test_record_fname), s)
# label_map_path
s = re.sub(
'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s)
# Set training batch_size.
s = re.sub('batch_size: [0-9]+',
'batch_size: {}'.format(batch_size), s)
# Set training steps, num_steps
s = re.sub('num_steps: [0-9]+',
'num_steps: {}'.format(num_steps), s)
# Set number of classes num_classes.
s = re.sub('num_classes: [0-9]+',
'num_classes: {}'.format(num_classes), s)
f.write(s)
change_pipeline()
!cat {pipeline_fname} # this is your final result pipeline config
```
## Restarting the training directory
Every iteration we remove the model dir to avoid erros, the checkpoint is restored to continue the training.
```
def start_new_training_dir():
model_dir = 'training/'
!rm -rf {model_dir}
os.makedirs(model_dir, exist_ok=True)
model_dir = 'training/'
start_new_training_dir()
```
# Training process
The following steps are the most important for the semi supervised iteration.
## Exporting model
The **export_model()** method is called just after the training finishes, saving a checkpoint and the saved_model.pb which is used to infer new images.
A improvement that could be done is instead of use the saved_model.pb try to load the checkpoints and use them to infer the images, creating a even faster iteration.
**Note:** We are accepting collaborators for the project :)
```
def export_model():
import re
import numpy as np
output_directory = './fine_tuned_model'
lst = os.listdir(model_dir)
lst = [l for l in lst if 'model.ckpt-' in l and '.meta' in l]
steps=np.array([int(re.findall('\d+', l)[0]) for l in lst])
last_model = lst[steps.argmax()].replace('.meta', '')
last_model_path = os.path.join(model_dir, last_model)
print(last_model_path)
!source tf1/bin/activate; python /content/tf-models/research/object_detection/export_inference_graph.py \
--input_type=image_tensor \
--pipeline_config_path={pipeline_fname} \
--output_directory={output_directory} \
--trained_checkpoint_prefix={last_model_path}
```
## Training the model
To train the model we use the **train_model()** method, that just run the training script with the previous specified parameters.
The beginning of the command is the virtual env initialization, which is explained below.
```
def train_model():
!source tf1/bin/activate; python /content/tf-models/research/object_detection/model_main.py \
--pipeline_config_path={pipeline_fname} \
--model_dir={model_dir} \
--alsologtostderr \
--num_train_steps={num_steps} \
--num_eval_steps={num_eval_steps}
```
## Inferences!
The model inference is done in the following method, which just enter in the research folder and run the inference_from_model.py
```
def inference_from_model():
%cd /content/tf-models/research
!python /content/tf-models/research/object_detection/inference_from_model.py
```
## TF 2.0 drawback (Virtual env)
I know this is weird, a virtual machine in a Google Colab? Yes, unfortunately. This happens because the default Colab version of TF is 2.x, but the training must be done in version 1.x for compatibility reasons. I tried to adapt the TF 2.x to run the training but no sucess.
This will definelly change in a future version, but by now, this works!
Here we are creating a new venv and installing all the requirements, including TF 1.x.
**Note:** Change Colab's TF version to 1.x will not work once the inference script is adapted to run on version 2.x, the best to do is wait until a complete compatible version releases. You can also try to adapt the training script to work on 2.x, but I think you will spend some time on it.
```
def create_venv():
!virtualenv tf1
!source tf1/bin/activate; pip install tensorflow==1.15
!source tf1/bin/activate; pip install -q Cython contextlib2 pillow lxml matplotlib
!source tf1/bin/activate; pip install -q pycocotools
!source tf1/bin/activate; pip install gast==0.3.3
!source tf1/bin/activate; pip install tf_slim
!source tf1/bin/activate; pip install scipy
%cd /content/tf-models/research
!source tf1/bin/activate; protoc object_detection/protos/*.proto --python_out=.
!source tf1/bin/activate; python object_detection/builders/model_builder_test.py
!source tf1/bin/activate; pip install tensorflow-object-detection-api
```
# Iteration process
Here is where all the iteration is done, the methods we create before are called and do their job. The explanations are in code.
```
import os
import sys
train_path = repo_dir_path + '/research/object_detection' # base directory
unlabel_data = len(os.listdir(train_path + '/unlabeled_data')) # number of remaining unlabeled images
train_count = 0
while train_count != 5: # my stop criterion is 5 iterations, you are free to change it!
create_venv() # In the next iterations, this will run faster, as everything will be cached.
train_model()
print('MODEL TRAINED')
if train_count > 0: # Delete the checkpoint folder after the first iteration, where it's created. Necessary to avoid errors
fine_tune_model_path = repo_dir_path + '/research/fine_tuned_model'
shutil.rmtree(fine_tune_model_path)
fine_tune_model_name = 'fine_tuned_model' # define the checkpoint path for pipeline config
export_model()
print('MODEL EXPORTED')
inference_from_model()
print('INFERENCE DONE')
generate_tfrecord(repo_dir_path) # new TF records!
print('NEW TF RECORDs')
unlabel_data = len(os.listdir(train_path + '/unlabeled_data'))
print('REMAINING IMAGES', unlabel_data) # Check how many images are not labeled yet
checkpoint_path = os.path.join('/content/tf-models/research', fine_tune_model_name)
fine_tune_checkpoint = os.path.join(checkpoint_path, "model.ckpt")
num_steps += 1500 if unlabel_data > 0 else 2000 # this is important, let's talk more about this below.
change_pipeline() # new parameters in pipe config
print('PIPELINE CHANGED')
start_new_training_dir()
print('NEW TRAINING DIR')
train_count += 1
```
As you saw in my Medium Article, the epochs pattern will be a really important parameter. Dependending on your problem you may want to change this number to give more time to your model learn something. In this case, after no remaining unlabeled images, I increased the number of epochs by iteration, so the model could have more time to generalize the features.
## Downloading the model!
If you want to deploy this model or test it "in field" you will need to download it into your machine.
```
output_directory = repo_dir_path + '/research/fine_tuned_model'
!ls {model_dir}
!ls {output_directory}
import os
pb_fname = os.path.join(os.path.abspath(output_directory), "frozen_inference_graph.pb")
assert os.path.isfile(pb_fname), '`{}` not exist'.format(pb_fname)
checkpoint_meta = os.path.join(os.path.abspath(output_directory), "model.ckpt.meta")
checkpoint_index = os.path.join(os.path.abspath(output_directory), "model.ckpt.index")
checkpoint_data = os.path.join(os.path.abspath(output_directory), "model.ckpt.data-00000-of-00001")
checkpoint = os.path.join(os.path.abspath(output_directory), "checkpoint")
saved_model = os.path.join(os.path.abspath(output_directory), "saved_model/saved_model.pb")
# uncomment this line to download logs too
#tf_log = os.path.join(os.path.abspath('/content/models/research/training/eval_0'), "events.out.tfevents.1576020673.0083b462c1a8")
from google.colab import files
files.download(pb_fname)
files.download(label_map_pbtxt_fname)
files.download(pipeline_fname)
files.download(checkpoint_meta)
files.download(checkpoint_index)
files.download(checkpoint_data)
files.download(saved_model)
files.download(checkpoint)
```
## Check some automatically created labels!
You can download some images and XML files to check how your model is labeling your data. You may actually want to download the whole folder to get all your brand new labeled images!
```
from google.colab import files
files.download('/content/tf-models/research/object_detection/train_images/dog.154.jpg') # my dog image and xml :)
files.download('/content/tf-models/research/object_detection/train_images/dog.xml.jpg')
```
# Some results
These are some images labeled automatically, please share this project if it helped you and contribute if you want :)

|
github_jupyter
|
repo_url = 'https://github.com/AlvaroCavalcante/tf-models' # replace by your repo
MODEL = 'ssd_inception_v2_coco_2018_01_28' # replace by the model you want use
pipeline_file = 'ssd_inception_v2_coco.config'
# Model hyperparameters
num_steps = 2500
num_eval_steps = 50
batch_size = 16
import os
%cd /content
repo_dir_path = os.path.abspath(os.path.join('.', os.path.basename(repo_url)))
!git clone {repo_url}
%cd {repo_dir_path}
!git pull
%cd /content
!apt-get install -qq protobuf-compiler python-pil python-lxml python-tk
!pip install -q Cython contextlib2 pillow lxml matplotlib
!pip install -q pycocotools
!pip install gast==0.3.3
!pip install tf_slim
!pip install virtualenv
%cd /content/tf-models/research
!protoc object_detection/protos/*.proto --python_out=.
import shutil
import os
os.environ['PYTHONPATH'] += ':/content/tf-models/research/:/content/tf-models/research/slim/'
!python object_detection/builders/model_builder_test.py
!pip install tensorflow-object-detection-api
def delete_files():
path = '/content/tf-models/research/object_detection/'
files = ['train.record', 'test.record', 'train_labels.csv', 'test_labels.csv']
for current_file in files:
os.remove(path + current_file)
print('Deleted files')
def generate_tfrecord(repo_dir_path):
%cd {repo_dir_path}
if os.path.isfile('/content/tf-models/research/object_detection/train.record'):
delete_files()
!python research/object_detection/xml_to_csv.py -i research/object_detection/train_images -o research/object_detection/train_labels.csv -l research/object_detection/label_map
!python research/object_detection/xml_to_csv.py -i research/object_detection/test_images -o research/object_detection/test_labels.csv
!python research/object_detection/generate_tfrecord.py --csv_input=research/object_detection/train_labels.csv --output_path=research/object_detection/train.record --img_path=research/object_detection/train_images --label_map research/object_detection/label_map/label_map.pbtxt
!python research/object_detection/generate_tfrecord.py --csv_input=research/object_detection/test_labels.csv --output_path=research/object_detection/test.record --img_path=research/object_detection/test_images --label_map research/object_detection/label_map/label_map.pbtxt
generate_tfrecord(repo_dir_path)
test_record_fname = '/content/tf-models/research/object_detection/test.record'
train_record_fname = '/content/tf-models/research/object_detection/train.record'
label_map_pbtxt_fname = '/content/tf-models/research/object_detection/label_map/label_map.pbtxt'
tf_log_path = '/content/tf-models/research/object_detection/events.out.tfevents.1576020673.0083b462c1a8'
%cd /content/tf-models/research
import os
import shutil
import glob
import urllib.request
import tarfile
MODEL_FILE = MODEL + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
DEST_DIR = '/content/tf-models/research/pretrained_model' #saved here
if not (os.path.exists(MODEL_FILE)):
urllib.request.urlretrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar = tarfile.open(MODEL_FILE)
tar.extractall()
tar.close()
os.remove(MODEL_FILE)
if (os.path.exists(DEST_DIR)):
shutil.rmtree(DEST_DIR)
os.rename(MODEL, DEST_DIR)
import os
pipeline_fname = os.path.join('/content/tf-models/research/object_detection/samples/configs/', pipeline_file)
fine_tune_checkpoint = os.path.join(DEST_DIR, "model.ckpt")
assert os.path.isfile(pipeline_fname), '`{}` not exist'.format(pipeline_fname)
def get_num_classes(pbtxt_fname):
from object_detection.utils import label_map_util
label_map = label_map_util.load_labelmap(pbtxt_fname)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True)
print(categories)
category_index = label_map_util.create_category_index(categories)
print(category_index)
return len(category_index.keys())
def change_pipeline():
import re
num_classes = get_num_classes(label_map_pbtxt_fname)
with open(pipeline_fname) as f:
s = f.read()
with open(pipeline_fname, 'w') as f:
# fine_tune_checkpoint
s = re.sub('fine_tune_checkpoint: ".*?"',
'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s)
# tfrecord files train and test.
s = re.sub(
'(input_path: ".*?)(train.record)(.*?")', 'input_path: "{}"'.format(train_record_fname), s)
s = re.sub(
'(input_path: ".*?)(val.record)(.*?")', 'input_path: "{}"'.format(test_record_fname), s)
# label_map_path
s = re.sub(
'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s)
# Set training batch_size.
s = re.sub('batch_size: [0-9]+',
'batch_size: {}'.format(batch_size), s)
# Set training steps, num_steps
s = re.sub('num_steps: [0-9]+',
'num_steps: {}'.format(num_steps), s)
# Set number of classes num_classes.
s = re.sub('num_classes: [0-9]+',
'num_classes: {}'.format(num_classes), s)
f.write(s)
change_pipeline()
!cat {pipeline_fname} # this is your final result pipeline config
def start_new_training_dir():
model_dir = 'training/'
!rm -rf {model_dir}
os.makedirs(model_dir, exist_ok=True)
model_dir = 'training/'
start_new_training_dir()
def export_model():
import re
import numpy as np
output_directory = './fine_tuned_model'
lst = os.listdir(model_dir)
lst = [l for l in lst if 'model.ckpt-' in l and '.meta' in l]
steps=np.array([int(re.findall('\d+', l)[0]) for l in lst])
last_model = lst[steps.argmax()].replace('.meta', '')
last_model_path = os.path.join(model_dir, last_model)
print(last_model_path)
!source tf1/bin/activate; python /content/tf-models/research/object_detection/export_inference_graph.py \
--input_type=image_tensor \
--pipeline_config_path={pipeline_fname} \
--output_directory={output_directory} \
--trained_checkpoint_prefix={last_model_path}
def train_model():
!source tf1/bin/activate; python /content/tf-models/research/object_detection/model_main.py \
--pipeline_config_path={pipeline_fname} \
--model_dir={model_dir} \
--alsologtostderr \
--num_train_steps={num_steps} \
--num_eval_steps={num_eval_steps}
def inference_from_model():
%cd /content/tf-models/research
!python /content/tf-models/research/object_detection/inference_from_model.py
def create_venv():
!virtualenv tf1
!source tf1/bin/activate; pip install tensorflow==1.15
!source tf1/bin/activate; pip install -q Cython contextlib2 pillow lxml matplotlib
!source tf1/bin/activate; pip install -q pycocotools
!source tf1/bin/activate; pip install gast==0.3.3
!source tf1/bin/activate; pip install tf_slim
!source tf1/bin/activate; pip install scipy
%cd /content/tf-models/research
!source tf1/bin/activate; protoc object_detection/protos/*.proto --python_out=.
!source tf1/bin/activate; python object_detection/builders/model_builder_test.py
!source tf1/bin/activate; pip install tensorflow-object-detection-api
import os
import sys
train_path = repo_dir_path + '/research/object_detection' # base directory
unlabel_data = len(os.listdir(train_path + '/unlabeled_data')) # number of remaining unlabeled images
train_count = 0
while train_count != 5: # my stop criterion is 5 iterations, you are free to change it!
create_venv() # In the next iterations, this will run faster, as everything will be cached.
train_model()
print('MODEL TRAINED')
if train_count > 0: # Delete the checkpoint folder after the first iteration, where it's created. Necessary to avoid errors
fine_tune_model_path = repo_dir_path + '/research/fine_tuned_model'
shutil.rmtree(fine_tune_model_path)
fine_tune_model_name = 'fine_tuned_model' # define the checkpoint path for pipeline config
export_model()
print('MODEL EXPORTED')
inference_from_model()
print('INFERENCE DONE')
generate_tfrecord(repo_dir_path) # new TF records!
print('NEW TF RECORDs')
unlabel_data = len(os.listdir(train_path + '/unlabeled_data'))
print('REMAINING IMAGES', unlabel_data) # Check how many images are not labeled yet
checkpoint_path = os.path.join('/content/tf-models/research', fine_tune_model_name)
fine_tune_checkpoint = os.path.join(checkpoint_path, "model.ckpt")
num_steps += 1500 if unlabel_data > 0 else 2000 # this is important, let's talk more about this below.
change_pipeline() # new parameters in pipe config
print('PIPELINE CHANGED')
start_new_training_dir()
print('NEW TRAINING DIR')
train_count += 1
output_directory = repo_dir_path + '/research/fine_tuned_model'
!ls {model_dir}
!ls {output_directory}
import os
pb_fname = os.path.join(os.path.abspath(output_directory), "frozen_inference_graph.pb")
assert os.path.isfile(pb_fname), '`{}` not exist'.format(pb_fname)
checkpoint_meta = os.path.join(os.path.abspath(output_directory), "model.ckpt.meta")
checkpoint_index = os.path.join(os.path.abspath(output_directory), "model.ckpt.index")
checkpoint_data = os.path.join(os.path.abspath(output_directory), "model.ckpt.data-00000-of-00001")
checkpoint = os.path.join(os.path.abspath(output_directory), "checkpoint")
saved_model = os.path.join(os.path.abspath(output_directory), "saved_model/saved_model.pb")
# uncomment this line to download logs too
#tf_log = os.path.join(os.path.abspath('/content/models/research/training/eval_0'), "events.out.tfevents.1576020673.0083b462c1a8")
from google.colab import files
files.download(pb_fname)
files.download(label_map_pbtxt_fname)
files.download(pipeline_fname)
files.download(checkpoint_meta)
files.download(checkpoint_index)
files.download(checkpoint_data)
files.download(saved_model)
files.download(checkpoint)
from google.colab import files
files.download('/content/tf-models/research/object_detection/train_images/dog.154.jpg') # my dog image and xml :)
files.download('/content/tf-models/research/object_detection/train_images/dog.xml.jpg')
| 0.337313 | 0.793106 |
```
import nltk
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
import numpy as np
#nltk.download_shell()
#messages = [line.rstrip() for line in open('SMSSpamCollection')]
messages = pd.read_csv("SMSSpamCollection", sep="\t", names=["label","message"])
messages.head()
messages.describe()
messages.groupby('label').describe()
messages["length"] = messages.message.apply(len)
messages
plt.figure(figsize=(12,9))
plt.xticks(np.arange(0,1000,50))
sns.histplot(data=messages, x="length", bins=50,hue="label")
messages.groupby('label').describe()
import string
from nltk.corpus import stopwords
string.punctuation
stopwords.words("English")
def text_process(msg):
# remove '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
nopunc=[char for char in msg if char not in string.punctuation]
nopunc="".join(nopunc)
# remove stopwords and return
return [word for word in nopunc.split() if word.lower() not in stopwords.words("English")]
messages["message"].head().apply(text_process)
from sklearn.feature_extraction.text import CountVectorizer
# Might take awhile...
bow_transformer = CountVectorizer(analyzer=text_process).fit(messages['message'])
# Print total number of vocab words
print(len(bow_transformer.vocabulary_))
message4 = messages['message'][3]
print(message4)
print(bow_transformer.get_feature_names()[4073])
print(bow_transformer.get_feature_names()[9570])
messages_bow = bow_transformer.transform(messages['message'])
print('Shape of Sparse Matrix: ', messages_bow.shape)
print('Amount of Non-Zero occurences: ', messages_bow.nnz)
sparsity = (100.0 * messages_bow.nnz / (messages_bow.shape[0] * messages_bow.shape[1]))
print('sparsity: {}'.format(round(sparsity)))
from sklearn.feature_extraction.text import TfidfTransformer
messages_tfidf = tfidf_transformer.transform(messages_bow)
print(messages_tfidf.shape)
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(messages_tfidf, messages['label'])
all_predictions = spam_detect_model.predict(messages_tfidf)
print(all_predictions)
from sklearn.metrics import classification_report
print (classification_report(messages['label'], all_predictions))
from sklearn.model_selection import train_test_split
msg_train, msg_test, label_train, label_test = \
train_test_split(messages['message'], messages['label'], test_size=0.2)
print(len(msg_train), len(msg_test), len(msg_train) + len(msg_test))
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
pipeline.fit(msg_train,label_train)
predictions = pipeline.predict(msg_test)
print(classification_report(predictions,label_test))
```
|
github_jupyter
|
import nltk
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
import numpy as np
#nltk.download_shell()
#messages = [line.rstrip() for line in open('SMSSpamCollection')]
messages = pd.read_csv("SMSSpamCollection", sep="\t", names=["label","message"])
messages.head()
messages.describe()
messages.groupby('label').describe()
messages["length"] = messages.message.apply(len)
messages
plt.figure(figsize=(12,9))
plt.xticks(np.arange(0,1000,50))
sns.histplot(data=messages, x="length", bins=50,hue="label")
messages.groupby('label').describe()
import string
from nltk.corpus import stopwords
string.punctuation
stopwords.words("English")
def text_process(msg):
# remove '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
nopunc=[char for char in msg if char not in string.punctuation]
nopunc="".join(nopunc)
# remove stopwords and return
return [word for word in nopunc.split() if word.lower() not in stopwords.words("English")]
messages["message"].head().apply(text_process)
from sklearn.feature_extraction.text import CountVectorizer
# Might take awhile...
bow_transformer = CountVectorizer(analyzer=text_process).fit(messages['message'])
# Print total number of vocab words
print(len(bow_transformer.vocabulary_))
message4 = messages['message'][3]
print(message4)
print(bow_transformer.get_feature_names()[4073])
print(bow_transformer.get_feature_names()[9570])
messages_bow = bow_transformer.transform(messages['message'])
print('Shape of Sparse Matrix: ', messages_bow.shape)
print('Amount of Non-Zero occurences: ', messages_bow.nnz)
sparsity = (100.0 * messages_bow.nnz / (messages_bow.shape[0] * messages_bow.shape[1]))
print('sparsity: {}'.format(round(sparsity)))
from sklearn.feature_extraction.text import TfidfTransformer
messages_tfidf = tfidf_transformer.transform(messages_bow)
print(messages_tfidf.shape)
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(messages_tfidf, messages['label'])
all_predictions = spam_detect_model.predict(messages_tfidf)
print(all_predictions)
from sklearn.metrics import classification_report
print (classification_report(messages['label'], all_predictions))
from sklearn.model_selection import train_test_split
msg_train, msg_test, label_train, label_test = \
train_test_split(messages['message'], messages['label'], test_size=0.2)
print(len(msg_train), len(msg_test), len(msg_train) + len(msg_test))
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
pipeline.fit(msg_train,label_train)
predictions = pipeline.predict(msg_test)
print(classification_report(predictions,label_test))
| 0.459076 | 0.202089 |
# Optimal Ensemble Learning with the [`sl3`](https://jeremyrcoyle.github.io/sl3/) R package
## Author: [Nima Hejazi](https://nimahejazi.org)
## Date: 14 February 2018
### _Attribution:_ based on materials by David Benkeser, Jeremy Coyle, Ivana Malenica, and Oleg Sofrygin
## Introduction
In this demonstration, we will illustrate the basic functionality of the `sl3` R package. Specifically, we will walk through the concept of machine learning pipelines, the construction of ensemble models, simple optimality properties of stacked regression. After this introduction we will be well prepared to discuss more advanced topics in ensemble learning, such as optimal kernel density estimation.
## Resources
* The `sl3` R package homepage: https://jeremyrcoyle.github.io/sl3/
* The `sl3` R package repository: https://github.com/jeremyrcoyle/sl3
## Setup
First, we'll load the packages required for this exercise and load a simple data set (`cpp_imputed` below) that we'll use for demonstration purposes:
```
set.seed(49753)
# packages we'll be using
library(data.table)
library(SuperLearner)
library(origami)
library(sl3)
# load example data set
data(cpp_imputed)
# take a peek at the data
head(cpp_imputed)
```
To use this data set with `sl3`, the object must be wrapped in a customized `sl3` container, an __`sl3` "Task"__ object. A _task_ is an idiom for all of the elements of a prediction problem other than the learning algorithms and prediction approach itself -- that is, a task delineates the structure of the data set of interest and any potential metadata (e.g., observation-level weights).
```
# here are the covariates we are interested in and, of course, the outcome
covars <- c("apgar1", "apgar5", "parity", "gagebrth", "mage", "meducyrs",
"sexn")
outcome <- "haz"
# create the sl3 task and take a look at it
task <- make_sl3_Task(data = cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
# let's take a look at the sl3 task
task
```
## Interlude: Object Oriented Programming in `R`
`sl3` is designed using basic OOP principles and the `R6` OOP framework. While we’ve tried to make it easy to use `sl3` without worrying much about OOP, it is helpful to have some intuition about how `sl3` is structured. In this section, we briefly outline some key concepts from OOP. Readers familiar with OOP basics are invited to skip this section. The key concept of OOP is that of an object, a collection of data and functions that corresponds to some conceptual unit. Objects have two main types of elements: (1) _fields_, which can be thought of as nouns, are information about an object, and (2) _methods_, which can be thought of as verbs, are actions an object can perform. Objects are members of classes, which define what those specific fields and methods are. Classes can inherit elements from other classes (sometimes called base classes) – accordingly, classes that are similar, but not exactly the same, can share some parts of their definitions.
Many different implementations of OOP exist, with variations in how these concepts are implemented and used. R has several different implementations, including `S3`, `S4`, reference classes, and `R6`. `sl3` uses the `R6` implementation. In `R6`, methods and fields of a class object are accessed using the `$` operator. The next section explains how these concepts are used in `sl3` to model machine learning problems and algorithms.
## `sl3` Learners
`Lrnr_base` is the base class for defining machine learning algorithms, as well as fits for those algorithms to particular `sl3_Tasks`. Different machine learning algorithms are defined in classes that inherit from `Lrnr_base`. For instance, the `Lrnr_glm` class inherits from `Lrnr_base`, and defines a learner that fits generalized linear models. We will use the term learners to refer to the family of classes that inherit from `Lrnr_base`. Learner objects can be constructed from their class definitions using the `make_learner` function:
```
# make learner object
lrnr_glm <- make_learner(Lrnr_glm)
```
Because all learners inherit from `Lrnr_base`, they have many features in common, and can be used interchangeably. All learners define three main methods: `train`, `predict`, and `chain`. The first, `train`, takes an `sl3_task` object, and returns a `learner_fit`, which has the same class as the learner that was trained:
```
# fit learner to task data
lrnr_glm_fit <- lrnr_glm$train(task)
# verify that the learner is fit
lrnr_glm_fit$is_trained
```
Here, we fit the learner to the CPP task we defined above. Both `lrnr_glm` and `lrnr_glm_fit` are objects of class `Lrnr_glm`, although the former defines a learner and the latter defines a fit of that learner. We can distiguish between the learners and learner fits using the `is_trained` field, which is true for fits but not for learners.
Now that we’ve fit a learner, we can generate predictions using the predict method:
```
# get learner predictions
preds <- lrnr_glm_fit$predict()
head(preds)
```
Here, we specified task as the task for which we wanted to generate predictions. If we had omitted this, we would have gotten the same predictions because predict defaults to using the task provided to train (called the training task). Alternatively, we could have provided a different task for which we want to generate predictions.
The final important learner method, chain, will be discussed below, in the section on learner composition. As with `sl3_Task`, learners have a variety of fields and methods we haven’t discussed here. More information on these is available in the help for `Lrnr_base`.
## Pipelines
A pipeline is a set of learners to be fit sequentially, where the fit from one learner is used to define the task for the next learner. There are many ways in which a learner can define the task for the downstream learner. The chain method defined by learners defines how this will work. Let’s look at the example of pre-screening variables. For now, we’ll rely on a screener from the `SuperLearner` package, although native `sl3` screening algorithms will be implemented soon.
Below, we generate a screener object based on the `SuperLearner` function `screen.corP` and fit it to our task. Inspecting the fit, we see that it selected a subset of covariates:
```
screen_cor <- Lrnr_pkg_SuperLearner_screener$new("screen.corP")
screen_fit <- screen_cor$train(task)
print(screen_fit)
```
The `Pipeline` class automates this process. It takes an arbitrary number of learners and fits them sequentially, training and chaining each one in turn. Since `Pipeline` is a learner like any other, it shares the same interface. We can define a pipeline using `make_learner`, and use `train` and `predict` just as we did before:
```
sg_pipeline <- make_learner(Pipeline, screen_cor, lrnr_glm)
sg_pipeline_fit <- sg_pipeline$train(task)
sg_pipeline_preds <- sg_pipeline_fit$predict()
head(sg_pipeline_preds)
```
## Stacks
Like `Pipelines`, `Stacks` combine multiple learners. Stacks train learners simultaneously, so that their predictions can be either combined or compared. Again, `Stack` is just a special learner and so has the same interface as all other learners:
```
stack <- make_learner(Stack, lrnr_glm, sg_pipeline)
stack_fit <- stack$train(task)
stack_preds <- stack_fit$predict()
head(stack_preds)
```
Above, we’ve defined and fit a stack comprised of a simple `glm` learner as well as a pipeline that combines a screening algorithm with that same learner. We could have included any abitrary set of learners and pipelines, the latter of which are themselves just learners. We can see that the predict method now returns a matrix, with a column for each learner included in the stack.
## The Super Learner Algorithm
Having defined a stack, we might want to compare the performance of learners in the stack, which we may do using cross-validation. The `Lrnr_cv` learner wraps another learner and performs training and prediction in a cross-validated fashion, using separate training and validation splits as defined by `task$folds`.
Below, we define a new `Lrnr_cv` object based on the previously defined stack and train it and generate predictions on the validation set:
```
cv_stack <- Lrnr_cv$new(stack)
cv_fit <- cv_stack$train(task)
cv_preds <- cv_fit$predict()
risks <- cv_fit$cv_risk(loss_squared_error)
print(risks)
```
We can combine all of the above elements, `Pipelines`, `Stacks`, and cross-validation using `Lrnr_cv`, to easily define a Super Learner. The Super Learner algorithm works by fitting a “meta-learner”, which combines predictions from multiple stacked learners. It does this while avoiding overfitting by training the meta-learner on validation-set predictions in a manner that is cross-validated. Using some of the objects we defined in the above examples, this becomes a very simple operation:
```
metalearner <- make_learner(Lrnr_nnls)
cv_task <- cv_fit$chain()
ml_fit <- metalearner$train(cv_task)
```
Here, we used a special learner, Lrnr_nnls, for the meta-learning step. This fits a non-negative least squares meta-learner. It is important to note that any learner can be used as a meta-learner.
The Super Learner finally produced is defined as a pipeline with the learner stack trained on the full data and the meta-learner trained on the validation-set predictions. Below, we use a special behavior of pipelines: if all objects passed to a pipeline are learner fits (i.e., `learner$is_trained` is `TRUE`), the result will also be a fit:
```
sl_pipeline <- make_learner(Pipeline, stack_fit, ml_fit)
sl_preds <- sl_pipeline$predict()
head(sl_preds)
```
An optimal stacked regression model (or Super Learner) may be fit in a more streamlined manner using the `Lrnr_sl` learner. For simplicity, we will use the same set of learners and meta-learning algorithm as we did before:
```
sl <- Lrnr_sl$new(learners = stack,
metalearner = metalearner)
sl_fit <- sl$train(task)
lrnr_sl_preds <- sl_fit$predict()
head(lrnr_sl_preds)
```
We can see that this generates the same predictions as the more hands-on definition above.
## Exercise
* Construct a Super Learner using $5$ (or more) learning algorithms, fit it on the training data given below (`task_train`) , and obtain predictions on the held out set (`task_valid`).
* At least $2$ of the learners that you choose should be variations of a single learner, differentiated from one another solely by the use of different values for $1$ (or more) hyperparameters.
* After fitting the Super Learner, identify the "discrete Super Learner".
```
# let's split the data into training and validation sets
train_cpp_imputed <- as.data.table(cpp_imputed[sample(nrow(cpp_imputed), 0.75 * nrow(cpp_imputed)), ])
valid_cpp_imputed <- as.data.table(cpp_imputed[!(seq_len(nrow(cpp_imputed)) %in% rownames(train_cpp_imputed)), ])
# create the sl3 task and take a look at it
task_train <- make_sl3_Task(data = train_cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
task_train
# we'll also create an sl3 task for the holdout set
task_valid <- make_sl3_Task(data = valid_cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
```
|
github_jupyter
|
set.seed(49753)
# packages we'll be using
library(data.table)
library(SuperLearner)
library(origami)
library(sl3)
# load example data set
data(cpp_imputed)
# take a peek at the data
head(cpp_imputed)
# here are the covariates we are interested in and, of course, the outcome
covars <- c("apgar1", "apgar5", "parity", "gagebrth", "mage", "meducyrs",
"sexn")
outcome <- "haz"
# create the sl3 task and take a look at it
task <- make_sl3_Task(data = cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
# let's take a look at the sl3 task
task
# make learner object
lrnr_glm <- make_learner(Lrnr_glm)
# fit learner to task data
lrnr_glm_fit <- lrnr_glm$train(task)
# verify that the learner is fit
lrnr_glm_fit$is_trained
# get learner predictions
preds <- lrnr_glm_fit$predict()
head(preds)
screen_cor <- Lrnr_pkg_SuperLearner_screener$new("screen.corP")
screen_fit <- screen_cor$train(task)
print(screen_fit)
sg_pipeline <- make_learner(Pipeline, screen_cor, lrnr_glm)
sg_pipeline_fit <- sg_pipeline$train(task)
sg_pipeline_preds <- sg_pipeline_fit$predict()
head(sg_pipeline_preds)
stack <- make_learner(Stack, lrnr_glm, sg_pipeline)
stack_fit <- stack$train(task)
stack_preds <- stack_fit$predict()
head(stack_preds)
cv_stack <- Lrnr_cv$new(stack)
cv_fit <- cv_stack$train(task)
cv_preds <- cv_fit$predict()
risks <- cv_fit$cv_risk(loss_squared_error)
print(risks)
metalearner <- make_learner(Lrnr_nnls)
cv_task <- cv_fit$chain()
ml_fit <- metalearner$train(cv_task)
sl_pipeline <- make_learner(Pipeline, stack_fit, ml_fit)
sl_preds <- sl_pipeline$predict()
head(sl_preds)
sl <- Lrnr_sl$new(learners = stack,
metalearner = metalearner)
sl_fit <- sl$train(task)
lrnr_sl_preds <- sl_fit$predict()
head(lrnr_sl_preds)
# let's split the data into training and validation sets
train_cpp_imputed <- as.data.table(cpp_imputed[sample(nrow(cpp_imputed), 0.75 * nrow(cpp_imputed)), ])
valid_cpp_imputed <- as.data.table(cpp_imputed[!(seq_len(nrow(cpp_imputed)) %in% rownames(train_cpp_imputed)), ])
# create the sl3 task and take a look at it
task_train <- make_sl3_Task(data = train_cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
task_train
# we'll also create an sl3 task for the holdout set
task_valid <- make_sl3_Task(data = valid_cpp_imputed, covariates = covars,
outcome = outcome, outcome_type = "continuous")
| 0.668339 | 0.994077 |
# ML-Agents Open a UnityEnvironment
<img src="https://github.com/Unity-Technologies/ml-agents/blob/release_18_docs/docs/images/image-banner.png?raw=true" align="middle" width="435"/>
## Setup
```
#@title Install Rendering Dependencies { display-mode: "form" }
#@markdown (You only need to run this code when using Colab's hosted runtime)
import os
from IPython.display import HTML, display
def progress(value, max=100):
return HTML("""
<progress
value='{value}'
max='{max}',
style='width: 100%'
>
{value}
</progress>
""".format(value=value, max=max))
pro_bar = display(progress(0, 100), display_id=True)
try:
import google.colab
INSTALL_XVFB = True
except ImportError:
INSTALL_XVFB = 'COLAB_ALWAYS_INSTALL_XVFB' in os.environ
if INSTALL_XVFB:
with open('frame-buffer', 'w') as writefile:
writefile.write("""#taken from https://gist.github.com/jterrace/2911875
XVFB=/usr/bin/Xvfb
XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
PIDFILE=./frame-buffer.pid
case "$1" in
start)
echo -n "Starting virtual X frame buffer: Xvfb"
/sbin/start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
echo "."
;;
stop)
echo -n "Stopping virtual X frame buffer: Xvfb"
/sbin/start-stop-daemon --stop --quiet --pidfile $PIDFILE
rm $PIDFILE
echo "."
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: /etc/init.d/xvfb {start|stop|restart}"
exit 1
esac
exit 0
""")
pro_bar.update(progress(5, 100))
!apt-get install daemon >/dev/null 2>&1
pro_bar.update(progress(10, 100))
!apt-get install wget >/dev/null 2>&1
pro_bar.update(progress(20, 100))
!wget http://security.ubuntu.com/ubuntu/pool/main/libx/libxfont/libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(30, 100))
!wget --output-document xvfb.deb http://security.ubuntu.com/ubuntu/pool/universe/x/xorg-server/xvfb_1.18.4-0ubuntu0.12_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(40, 100))
!dpkg -i libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(50, 100))
!dpkg -i xvfb.deb >/dev/null 2>&1
pro_bar.update(progress(70, 100))
!rm libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb
pro_bar.update(progress(80, 100))
!rm xvfb.deb
pro_bar.update(progress(90, 100))
!bash frame-buffer start
os.environ["DISPLAY"] = ":1"
pro_bar.update(progress(100, 100))
```
### Installing ml-agents
```
try:
import mlagents
print("ml-agents already installed")
except ImportError:
!python -m pip install -q mlagents==0.27.0
print("Installed ml-agents")
```
## Run the Environment
```
#@title Select Environment { display-mode: "form" }
env_id = "GridWorld" #@param ['Basic', '3DBall', '3DBallHard', 'GridWorld', 'Hallway', 'VisualHallway', 'CrawlerDynamicTarget', 'CrawlerStaticTarget', 'Bouncer', 'SoccerTwos', 'PushBlock', 'VisualPushBlock', 'WallJump', 'Tennis', 'Reacher', 'Pyramids', 'VisualPyramids', 'Walker', 'FoodCollector', 'VisualFoodCollector', 'StrikersVsGoalie', 'WormStaticTarget', 'WormDynamicTarget']
```
### Start Environment from the registry
```
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
env = default_registry[env_id].make()
```
### Reset the environment
To reset the environment, simply call `env.reset()`. This method takes no argument and returns nothing but will send a signal to the simulation to reset.
```
env.reset()
```
### Behavior Specs
#### Get the Behavior Specs from the Environment
```
# We will only consider the first Behavior
behavior_name = list(env.behavior_specs)[0]
print(f"Name of the behavior : {behavior_name}")
spec = env.behavior_specs[behavior_name]
```
#### Get the Observation Space from the Behavior Specs
```
# Examine the number of observations per Agent
print("Number of observations : ", len(spec.observation_specs))
# Is there a visual observation ?
# Visual observation have 3 dimensions: Height, Width and number of channels
vis_obs = any(len(spec.shape) == 3 for spec in spec.observation_specs)
print("Is there a visual observation ?", vis_obs)
```
#### Get the Action Space from the Behavior Specs
```
# Is the Action continuous or multi-discrete ?
if spec.action_spec.continuous_size > 0:
print(f"There are {spec.action_spec.continuous_size} continuous actions")
if spec.action_spec.is_discrete():
print(f"There are {spec.action_spec.discrete_size} discrete actions")
# How many actions are possible ?
#print(f"There are {spec.action_size} action(s)")
# For discrete actions only : How many different options does each action has ?
if spec.action_spec.discrete_size > 0:
for action, branch_size in enumerate(spec.action_spec.discrete_branches):
print(f"Action number {action} has {branch_size} different options")
```
### Stepping the environment
#### Get the steps from the Environment
You can do this with the `env.get_steps(behavior_name)` method. If there are multiple behaviors in the Environment, you can call this method with each of the behavior's names.
_Note_ This will not move the simulation forward.
```
decision_steps, terminal_steps = env.get_steps(behavior_name)
```
#### Set actions for each behavior
You can set the actions for the Agents of a Behavior by calling `env.set_actions()` you will need to specify the behavior name and pass a tensor of dimension 2. The first dimension of the action must be equal to the number of Agents that requested a decision during the step.
```
env.set_actions(behavior_name, spec.action_spec.empty_action(len(decision_steps)))
```
#### Move the simulation forward
Call `env.step()` to move the simulation forward. The simulation will progress until an Agent requestes a decision or terminates.
```
env.step()
```
### Observations
#### Show the observations for one of the Agents
`DecisionSteps.obs` is a tuple containing all of the observations for all of the Agents with the provided Behavior name.
Each value in the tuple is an observation tensor containing the observation data for all of the agents.
```
import matplotlib.pyplot as plt
%matplotlib inline
for index, obs_spec in enumerate(spec.observation_specs):
if len(obs_spec.shape) == 3:
print("Here is the first visual observation")
plt.imshow(decision_steps.obs[index][0,:,:,:])
plt.show()
for index, obs_spec in enumerate(spec.observation_specs):
if len(obs_spec.shape) == 1:
print("First vector observations : ", decision_steps.obs[index][0,:])
```
### Run the Environment for a few episodes
```
for episode in range(3):
env.reset()
decision_steps, terminal_steps = env.get_steps(behavior_name)
tracked_agent = -1 # -1 indicates not yet tracking
done = False # For the tracked_agent
episode_rewards = 0 # For the tracked_agent
while not done:
# Track the first agent we see if not tracking
# Note : len(decision_steps) = [number of agents that requested a decision]
if tracked_agent == -1 and len(decision_steps) >= 1:
tracked_agent = decision_steps.agent_id[0]
# Generate an action for all agents
action = spec.action_spec.random_action(len(decision_steps))
# Set the actions
env.set_actions(behavior_name, action)
# Move the simulation forward
env.step()
# Get the new simulation results
decision_steps, terminal_steps = env.get_steps(behavior_name)
if tracked_agent in decision_steps: # The agent requested a decision
episode_rewards += decision_steps[tracked_agent].reward
if tracked_agent in terminal_steps: # The agent terminated its episode
episode_rewards += terminal_steps[tracked_agent].reward
done = True
print(f"Total rewards for episode {episode} is {episode_rewards}")
```
### Close the Environment to free the port it is using
```
env.close()
print("Closed environment")
```
|
github_jupyter
|
#@title Install Rendering Dependencies { display-mode: "form" }
#@markdown (You only need to run this code when using Colab's hosted runtime)
import os
from IPython.display import HTML, display
def progress(value, max=100):
return HTML("""
<progress
value='{value}'
max='{max}',
style='width: 100%'
>
{value}
</progress>
""".format(value=value, max=max))
pro_bar = display(progress(0, 100), display_id=True)
try:
import google.colab
INSTALL_XVFB = True
except ImportError:
INSTALL_XVFB = 'COLAB_ALWAYS_INSTALL_XVFB' in os.environ
if INSTALL_XVFB:
with open('frame-buffer', 'w') as writefile:
writefile.write("""#taken from https://gist.github.com/jterrace/2911875
XVFB=/usr/bin/Xvfb
XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
PIDFILE=./frame-buffer.pid
case "$1" in
start)
echo -n "Starting virtual X frame buffer: Xvfb"
/sbin/start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
echo "."
;;
stop)
echo -n "Stopping virtual X frame buffer: Xvfb"
/sbin/start-stop-daemon --stop --quiet --pidfile $PIDFILE
rm $PIDFILE
echo "."
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: /etc/init.d/xvfb {start|stop|restart}"
exit 1
esac
exit 0
""")
pro_bar.update(progress(5, 100))
!apt-get install daemon >/dev/null 2>&1
pro_bar.update(progress(10, 100))
!apt-get install wget >/dev/null 2>&1
pro_bar.update(progress(20, 100))
!wget http://security.ubuntu.com/ubuntu/pool/main/libx/libxfont/libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(30, 100))
!wget --output-document xvfb.deb http://security.ubuntu.com/ubuntu/pool/universe/x/xorg-server/xvfb_1.18.4-0ubuntu0.12_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(40, 100))
!dpkg -i libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1
pro_bar.update(progress(50, 100))
!dpkg -i xvfb.deb >/dev/null 2>&1
pro_bar.update(progress(70, 100))
!rm libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb
pro_bar.update(progress(80, 100))
!rm xvfb.deb
pro_bar.update(progress(90, 100))
!bash frame-buffer start
os.environ["DISPLAY"] = ":1"
pro_bar.update(progress(100, 100))
try:
import mlagents
print("ml-agents already installed")
except ImportError:
!python -m pip install -q mlagents==0.27.0
print("Installed ml-agents")
#@title Select Environment { display-mode: "form" }
env_id = "GridWorld" #@param ['Basic', '3DBall', '3DBallHard', 'GridWorld', 'Hallway', 'VisualHallway', 'CrawlerDynamicTarget', 'CrawlerStaticTarget', 'Bouncer', 'SoccerTwos', 'PushBlock', 'VisualPushBlock', 'WallJump', 'Tennis', 'Reacher', 'Pyramids', 'VisualPyramids', 'Walker', 'FoodCollector', 'VisualFoodCollector', 'StrikersVsGoalie', 'WormStaticTarget', 'WormDynamicTarget']
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
env = default_registry[env_id].make()
env.reset()
# We will only consider the first Behavior
behavior_name = list(env.behavior_specs)[0]
print(f"Name of the behavior : {behavior_name}")
spec = env.behavior_specs[behavior_name]
# Examine the number of observations per Agent
print("Number of observations : ", len(spec.observation_specs))
# Is there a visual observation ?
# Visual observation have 3 dimensions: Height, Width and number of channels
vis_obs = any(len(spec.shape) == 3 for spec in spec.observation_specs)
print("Is there a visual observation ?", vis_obs)
# Is the Action continuous or multi-discrete ?
if spec.action_spec.continuous_size > 0:
print(f"There are {spec.action_spec.continuous_size} continuous actions")
if spec.action_spec.is_discrete():
print(f"There are {spec.action_spec.discrete_size} discrete actions")
# How many actions are possible ?
#print(f"There are {spec.action_size} action(s)")
# For discrete actions only : How many different options does each action has ?
if spec.action_spec.discrete_size > 0:
for action, branch_size in enumerate(spec.action_spec.discrete_branches):
print(f"Action number {action} has {branch_size} different options")
decision_steps, terminal_steps = env.get_steps(behavior_name)
env.set_actions(behavior_name, spec.action_spec.empty_action(len(decision_steps)))
env.step()
import matplotlib.pyplot as plt
%matplotlib inline
for index, obs_spec in enumerate(spec.observation_specs):
if len(obs_spec.shape) == 3:
print("Here is the first visual observation")
plt.imshow(decision_steps.obs[index][0,:,:,:])
plt.show()
for index, obs_spec in enumerate(spec.observation_specs):
if len(obs_spec.shape) == 1:
print("First vector observations : ", decision_steps.obs[index][0,:])
for episode in range(3):
env.reset()
decision_steps, terminal_steps = env.get_steps(behavior_name)
tracked_agent = -1 # -1 indicates not yet tracking
done = False # For the tracked_agent
episode_rewards = 0 # For the tracked_agent
while not done:
# Track the first agent we see if not tracking
# Note : len(decision_steps) = [number of agents that requested a decision]
if tracked_agent == -1 and len(decision_steps) >= 1:
tracked_agent = decision_steps.agent_id[0]
# Generate an action for all agents
action = spec.action_spec.random_action(len(decision_steps))
# Set the actions
env.set_actions(behavior_name, action)
# Move the simulation forward
env.step()
# Get the new simulation results
decision_steps, terminal_steps = env.get_steps(behavior_name)
if tracked_agent in decision_steps: # The agent requested a decision
episode_rewards += decision_steps[tracked_agent].reward
if tracked_agent in terminal_steps: # The agent terminated its episode
episode_rewards += terminal_steps[tracked_agent].reward
done = True
print(f"Total rewards for episode {episode} is {episode_rewards}")
env.close()
print("Closed environment")
| 0.374791 | 0.692486 |
Risk Off Strategy
=================
```
# If you would like to refresh your data, please execute the bellow codes.
import pandas as pd
import numpy as np
from datetime import datetime
from tqdm import tqdm
import matplotlib.pyplot as plt
from mypo import Loader
DOWNLOAD = False
if DOWNLOAD:
tickers = pd.read_csv("/app/docs/tutorial/tickers.csv")
loader = Loader()
for row in tqdm(tickers.to_dict('record')):
loader.get(row['Ticker'], expense_ratio=row['ExpenseRatio'] * 0.01)
loader.save('/app/docs/tutorial/all.bin')
loader = Loader.load('/app/docs/tutorial/all.bin')
loader = loader.since(datetime(2007, 1, 1))
market = loader.get_market()
selected_tickers = ['SPY', 'QQQ', 'TLT', 'IEF', 'GLD']
from mypo import split_k_folds
from mypo.optimizer import NoOptimizer, RiskParityOptimizer
from mypo.rebalancer import MonthlyRebalancer
from mypo.evacuator import CovarianceEvacuator, MovingAverageEvacuator, CalendarEvacuator, FixedEvacuator
from mypo import Runner
folds = split_k_folds(market, 1, 200)
fold = folds[0].filter(selected_tickers)
```
Reference Performance (SPY)
----------------------------------------
```
optimizer = NoOptimizer([1, 0, 0, 0, 0])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
RiskParityOptimizer without filter
--------------------
```
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
RotationStrategy with cash filter (Covariance)
----------------------------------------
```
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=CovarianceEvacuator(long_span=250, short_span=20, factor=0.3)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
RiskParityOptimizer with cash filter (Moving Average)
----------------------------------------
```
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=MovingAverageEvacuator(span=150, risk_off=0.9)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
RiskParityOptimizer with cash filter (Fixed Level)
----------------------------------------
```
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=FixedEvacuator(level=0.05)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
RiskParityOptimizer with cash filter (Calendar)
----------------------------------------
```
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=CalendarEvacuator(months=[8], risk_off=0.9)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
```
|
github_jupyter
|
# If you would like to refresh your data, please execute the bellow codes.
import pandas as pd
import numpy as np
from datetime import datetime
from tqdm import tqdm
import matplotlib.pyplot as plt
from mypo import Loader
DOWNLOAD = False
if DOWNLOAD:
tickers = pd.read_csv("/app/docs/tutorial/tickers.csv")
loader = Loader()
for row in tqdm(tickers.to_dict('record')):
loader.get(row['Ticker'], expense_ratio=row['ExpenseRatio'] * 0.01)
loader.save('/app/docs/tutorial/all.bin')
loader = Loader.load('/app/docs/tutorial/all.bin')
loader = loader.since(datetime(2007, 1, 1))
market = loader.get_market()
selected_tickers = ['SPY', 'QQQ', 'TLT', 'IEF', 'GLD']
from mypo import split_k_folds
from mypo.optimizer import NoOptimizer, RiskParityOptimizer
from mypo.rebalancer import MonthlyRebalancer
from mypo.evacuator import CovarianceEvacuator, MovingAverageEvacuator, CalendarEvacuator, FixedEvacuator
from mypo import Runner
folds = split_k_folds(market, 1, 200)
fold = folds[0].filter(selected_tickers)
optimizer = NoOptimizer([1, 0, 0, 0, 0])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=CovarianceEvacuator(long_span=250, short_span=20, factor=0.3)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=MovingAverageEvacuator(span=150, risk_off=0.9)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=FixedEvacuator(level=0.05)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
optimizer = RiskParityOptimizer(risk_target=[0.2, 0.25, 0.2, 0.25, 0.1])
runner = Runner(
rebalancer=MonthlyRebalancer(optimizer=optimizer, evacuator=CalendarEvacuator(months=[8], risk_off=0.9)),
)
runner.run(
fold=fold,
verbose=True
)
report = runner.report()
display(report.summary())
display(report.annual_summary())
ax = report.history_assets().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_weights().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cost().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = report.history_cash_vs_assets().plot.area(stacked=True)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
plt.show()
| 0.514644 | 0.768255 |
```
from IPython.core.interactiveshell import InteractiveShell
import os
import sys
import time
from functools import partial
import pickle
import multiprocessing
import pixiedust as pxdb
import PIL
from matplotlib import pyplot as plt
import seaborn as sns
from collections import OrderedDict as ODict
import numpy as np
import pandas as pd
# PYTORCH
import torch as tc
import torchvision as tcvis
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch import nn, optim
from torch.nn import functional as fu
# TORCH HELPERS
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
import hiddenlayer as hl
from torchsummary import summary
InteractiveShell.ast_node_interactivity = 'all'
# %pixie_debugger
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
sns.set(style='white', context='notebook', palette='tab10')
%config IPCompleter.greedy = True
%config IPCompleter.use_jedi = True
basepath = '/home/alk/Documents/Git/Kaggles/MNIST'
try:
os.chdir(basepath)
print(os.getcwd())
except NotADirectoryError:
pass
datadir = os.getcwd() + '/data'
filenames = ['train.csv', 'test.csv']
datadict = ODict()
for files in filenames:
try:
with open(datadir + '/' + files, mode='r') as csvfile:
datadict[files] = np.loadtxt(csvfile, delimiter=",", skiprows=1)
csvfile.close()
print('found file: {}'.format(files))
except FileNotFoundError:
print('skipping file ./{}'.format(files))
datadict.keys(), filenames
traindata = datadict[filenames[0]]
testdata = datadict[filenames[-1]]
trainlabels = traindata[:, 0].reshape(-1, 1)
traindata = traindata[:, 1:].reshape(-1, 28, 28)
testdata = testdata.reshape(-1, 28, 28)
print(traindata.shape, trainlabels.shape, testdata.shape)
fig, ax = plt.subplots(1, 2, sharex=True, squeeze=True)
ax[0].imshow(traindata[-1, :, :], cmap='gray')
ax[1].imshow(testdata[0, :, :], cmap='gray')
class NpDataset(Dataset):
def __init__(self, x=traindata, y=trainlabels,
transforms=None):
super().__init__()
self.x = x
self.y = y
self.transform = transforms
def __len__(self):
return self.x.shape[0]
def __getitem__(self, index):
if self.y is not None:
image, label = self.x[index], self.y[index]
label = tc.from_numpy(label).type(tc.LongTensor)
else:
image, label = self.x[index], None
# HxWxC, UINT8
image = image.astype(np.uint8).reshape(28, 28, 1)
if self.transform is not None:
image = self.transform(image)
return image, label
# test
test_set = NpDataset()
print(f'target: {test_set.__getitem__(0)[1]}')
plt.imshow(test_set.__getitem__(0)[0].reshape(28, 28), cmap='gray')
MNIST_train_transform = transforms.Compose([transforms.ToPILImage(),
transforms.RandomVerticalFlip(0.25),
transforms.RandomAffine(30, (0.15,0.15),
(0.75,1.25), 30,
PIL.Image.BICUBIC,0),
transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))])
MNIST_test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))])
# test
test_set = NpDataset(transforms=MNIST_train_transform)
fig, axes = plt.subplots(5, 5, sharex=True, sharey=True, squeeze=True)
_plots = None
for axs in axes:
for ax in axs:
_plots = ax.imshow(test_set.__getitem__(np.random.randint(0, \
traindata.shape[0]))[0].numpy().reshape(28, 28),
cmap='gray');
_plots = plt.yticks([], [])
_plots = plt.xticks([], [])
_plots = plt.axis('off')
plt.show()
NN_trainloader = DataLoader(NpDataset(transforms=MNIST_train_transform),
batch_size=128,
shuffle=True,
num_workers=8,
pin_memory=True)
NN_testloader = DataLoader(NpDataset(x=testdata, y=None,
transforms=MNIST_test_transform),
batch_size=128,
shuffle=True,
num_workers=8,
pin_memory=True)
class MNIST_CNN(nn.Module):
def __init__(self,
dropout_fcp=0.0,
wcuda=True):
super(MNIST_CNN, self).__init__()
self.dropout_p = dropout_fcp
self.wcuda = wcuda
self.conv1a = nn.Conv2d(1, 20, 5, 1) #24
self.conv2a = nn.Conv2d(20, 50, 5, 1) #20
self.conv1b = nn.Conv2d(1, 20, 5, 1)
self.conv2b = nn.Conv2d(20, 50, 5, 1)
self.fc1a = nn.Linear(4*4*50, 500)
self.fc1b = nn.Linear(5*5*50, 500)
self.fcf1 = nn.Linear(1000,100)
self.fcf2 = nn.Linear(100,10)
def forward(self, x):
x1 = fu.relu(self.conv1a(x))
x1 = fu.max_pool2d(x1, 2, 2)
x1 = fu.relu(self.conv2a(x1))
x1 = fu.max_pool2d(x1, 2, 2)
x1 = x1.view(-1, 4*4*50)
x1 = fu.relu(self.fc1a(x1))
x1 = fu.dropout(x1, p=self.dropout_p,
training=self.training)
x2 = fu.relu(self.conv1a(x))
x2 = fu.relu(self.conv2a(x2))
x2 = fu.max_pool2d(x2, 4, 4)
x2 = x2.view(-1, 5*5*50)
x2 = fu.relu(self.fc1b(x2))
x2 = fu.dropout(x2, p=self.dropout_p,
training=self.training)
x = tc.cat((x1, x2), 1)
x = fu.relu(self.fcf1(x))
x = fu.dropout(x, p=self.dropout_p,
training=self.training)
x = self.fcf2(x)
x = fu.log_softmax(x, dim=1)
if self.wcuda:
return x.cuda()
else:
return x
# test
test_cnn = MNIST_CNN().cuda()
print(summary(test_cnn.cuda(), (1,28,28), device='cuda'))
hl.build_graph(model=test_cnn.cuda(), args=tc.randn(1,1,28,28).cuda())
def train(model, device, train_loader, optimizer, epoch, log_interval):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
#output = tc.argmax(output, 1).view(-1,1).type(tc.float16)
#print(output.shape, target.shape)
#print(output.type(), target.type())
loss = fu.nll_loss(output, target.view(-1))
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += fu.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
MNISTmodel = MNIST_CNN(dropout_fcp=0.25)
optimizer = optim.Adam(MNISTmodel.parameters(), lr=0.001)
device = tc.device('cuda')
epochs = 100
for epoch in range(1, epochs+1):
train(MNISTmodel, device, NN_trainloader, optimizer, epoch, 250)
```
|
github_jupyter
|
from IPython.core.interactiveshell import InteractiveShell
import os
import sys
import time
from functools import partial
import pickle
import multiprocessing
import pixiedust as pxdb
import PIL
from matplotlib import pyplot as plt
import seaborn as sns
from collections import OrderedDict as ODict
import numpy as np
import pandas as pd
# PYTORCH
import torch as tc
import torchvision as tcvis
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch import nn, optim
from torch.nn import functional as fu
# TORCH HELPERS
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
import hiddenlayer as hl
from torchsummary import summary
InteractiveShell.ast_node_interactivity = 'all'
# %pixie_debugger
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
sns.set(style='white', context='notebook', palette='tab10')
%config IPCompleter.greedy = True
%config IPCompleter.use_jedi = True
basepath = '/home/alk/Documents/Git/Kaggles/MNIST'
try:
os.chdir(basepath)
print(os.getcwd())
except NotADirectoryError:
pass
datadir = os.getcwd() + '/data'
filenames = ['train.csv', 'test.csv']
datadict = ODict()
for files in filenames:
try:
with open(datadir + '/' + files, mode='r') as csvfile:
datadict[files] = np.loadtxt(csvfile, delimiter=",", skiprows=1)
csvfile.close()
print('found file: {}'.format(files))
except FileNotFoundError:
print('skipping file ./{}'.format(files))
datadict.keys(), filenames
traindata = datadict[filenames[0]]
testdata = datadict[filenames[-1]]
trainlabels = traindata[:, 0].reshape(-1, 1)
traindata = traindata[:, 1:].reshape(-1, 28, 28)
testdata = testdata.reshape(-1, 28, 28)
print(traindata.shape, trainlabels.shape, testdata.shape)
fig, ax = plt.subplots(1, 2, sharex=True, squeeze=True)
ax[0].imshow(traindata[-1, :, :], cmap='gray')
ax[1].imshow(testdata[0, :, :], cmap='gray')
class NpDataset(Dataset):
def __init__(self, x=traindata, y=trainlabels,
transforms=None):
super().__init__()
self.x = x
self.y = y
self.transform = transforms
def __len__(self):
return self.x.shape[0]
def __getitem__(self, index):
if self.y is not None:
image, label = self.x[index], self.y[index]
label = tc.from_numpy(label).type(tc.LongTensor)
else:
image, label = self.x[index], None
# HxWxC, UINT8
image = image.astype(np.uint8).reshape(28, 28, 1)
if self.transform is not None:
image = self.transform(image)
return image, label
# test
test_set = NpDataset()
print(f'target: {test_set.__getitem__(0)[1]}')
plt.imshow(test_set.__getitem__(0)[0].reshape(28, 28), cmap='gray')
MNIST_train_transform = transforms.Compose([transforms.ToPILImage(),
transforms.RandomVerticalFlip(0.25),
transforms.RandomAffine(30, (0.15,0.15),
(0.75,1.25), 30,
PIL.Image.BICUBIC,0),
transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))])
MNIST_test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))])
# test
test_set = NpDataset(transforms=MNIST_train_transform)
fig, axes = plt.subplots(5, 5, sharex=True, sharey=True, squeeze=True)
_plots = None
for axs in axes:
for ax in axs:
_plots = ax.imshow(test_set.__getitem__(np.random.randint(0, \
traindata.shape[0]))[0].numpy().reshape(28, 28),
cmap='gray');
_plots = plt.yticks([], [])
_plots = plt.xticks([], [])
_plots = plt.axis('off')
plt.show()
NN_trainloader = DataLoader(NpDataset(transforms=MNIST_train_transform),
batch_size=128,
shuffle=True,
num_workers=8,
pin_memory=True)
NN_testloader = DataLoader(NpDataset(x=testdata, y=None,
transforms=MNIST_test_transform),
batch_size=128,
shuffle=True,
num_workers=8,
pin_memory=True)
class MNIST_CNN(nn.Module):
def __init__(self,
dropout_fcp=0.0,
wcuda=True):
super(MNIST_CNN, self).__init__()
self.dropout_p = dropout_fcp
self.wcuda = wcuda
self.conv1a = nn.Conv2d(1, 20, 5, 1) #24
self.conv2a = nn.Conv2d(20, 50, 5, 1) #20
self.conv1b = nn.Conv2d(1, 20, 5, 1)
self.conv2b = nn.Conv2d(20, 50, 5, 1)
self.fc1a = nn.Linear(4*4*50, 500)
self.fc1b = nn.Linear(5*5*50, 500)
self.fcf1 = nn.Linear(1000,100)
self.fcf2 = nn.Linear(100,10)
def forward(self, x):
x1 = fu.relu(self.conv1a(x))
x1 = fu.max_pool2d(x1, 2, 2)
x1 = fu.relu(self.conv2a(x1))
x1 = fu.max_pool2d(x1, 2, 2)
x1 = x1.view(-1, 4*4*50)
x1 = fu.relu(self.fc1a(x1))
x1 = fu.dropout(x1, p=self.dropout_p,
training=self.training)
x2 = fu.relu(self.conv1a(x))
x2 = fu.relu(self.conv2a(x2))
x2 = fu.max_pool2d(x2, 4, 4)
x2 = x2.view(-1, 5*5*50)
x2 = fu.relu(self.fc1b(x2))
x2 = fu.dropout(x2, p=self.dropout_p,
training=self.training)
x = tc.cat((x1, x2), 1)
x = fu.relu(self.fcf1(x))
x = fu.dropout(x, p=self.dropout_p,
training=self.training)
x = self.fcf2(x)
x = fu.log_softmax(x, dim=1)
if self.wcuda:
return x.cuda()
else:
return x
# test
test_cnn = MNIST_CNN().cuda()
print(summary(test_cnn.cuda(), (1,28,28), device='cuda'))
hl.build_graph(model=test_cnn.cuda(), args=tc.randn(1,1,28,28).cuda())
def train(model, device, train_loader, optimizer, epoch, log_interval):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
#output = tc.argmax(output, 1).view(-1,1).type(tc.float16)
#print(output.shape, target.shape)
#print(output.type(), target.type())
loss = fu.nll_loss(output, target.view(-1))
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += fu.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
MNISTmodel = MNIST_CNN(dropout_fcp=0.25)
optimizer = optim.Adam(MNISTmodel.parameters(), lr=0.001)
device = tc.device('cuda')
epochs = 100
for epoch in range(1, epochs+1):
train(MNISTmodel, device, NN_trainloader, optimizer, epoch, 250)
| 0.591723 | 0.297993 |
## Numerical Differentiation
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
```
Applications:
1. Derivative difficult to compute analytically
2. Rate of change in a dataset
- You have position data but you want to know velocity
3. Finding extrema
- Important for fitting models to data (**ASTR 3800**)
- Maximum likelihood methods
- Topology: finding peaks and valleys (place where slope is zero)
### Topology Example: South Pole Aitken Basin (lunar farside)
Interesting:
1. Oldest impact basin in the solar system
- Important for studies of solar system formation
2. Permananently shadowed craters
- High concentration of hydrogen (e.g., LCROSS mission)!
- Good place for an observatory (e.g., the Lunar Radio Array concept)!
```
from IPython.display import Image
Image(url='http://wordlesstech.com/wp-content/uploads/2011/11/New-Map-of-the-Moon-2.jpg')
```
### Question
> Image you're planning a mission to the South Pole Aitken Basin and want to explore some permanently shadowed craters. What factors might you consider in planning out your rover's landing site and route?
#### Most rovers can tolerate grades up to about 20%, For reference, the grade on I-70 near Eisenhower Tunnel is about 6%.
## Differentiation Review
### Numerical Derivatives on a Grid (Text Appendix B.2)
```
def forwardDifference(f, x, h):
"""
A first order differentiation technique.
Parameters
----------
f : function to be differentiated
x : point of interest
h : step-size to use in approximation
"""
return (f(x + h) - f(x)) / h # From our notes
```
```
def centralDifference(f, x, h):
"""
A second order differentiation technique.
Also known as `symmetric difference quotient.
Parameters
----------
f : function to be differentiated
x : point of interest
h : step-size to use in approximation
"""
return (f(x + h) - f(x - h)) / (2.0 * h) # From our notes
```
```
np.linspace(1,10,100).shape
def derivative(formula, func, xLower, xUpper, n):
"""
Differentiate func(x) at all points from xLower
to xUpper with n *equally spaced* points.
The differentiation formula is given by
formula(func, x, h).
"""
h = (xUpper - xLower) / float(n) # Calculate the derivative step size
xArray = np.linspace(xLower, xUpper, n) # Create an array of x values
derivArray = np.zeros(n) # Create an empty array for the derivative values
for index in range(1, n - 1): # xrange(start, stop, [step])
derivArray[index] = formula(func, xArray[index], h) # Calculate the derivative for the current
# x value using the formula passed in
return (xArray[1:-1], derivArray[1:-1]) # This returns TWO things:
# x values and the derivative values
```
#### Notice that we don't calculate the derivative at the end points because there are no points beyond them to difference with.
#### Q. So, what would happen without the [1:-1] in the return statement?
```
def derivative2(formula, func, xLower, xUpper, n):
"""
Differentiate func(x) at all points from xLower
to xUpper with n+1 *equally spaced* points.
The differentiation formula is given by
formula(func, x, h).
"""
h = (xUpper - xLower) / float(n) # Calculate the derivative step size
xArray = np.linspace(xLower, xUpper, n) # Create an array of x values
derivArray = np.zeros(n) # Create an empty array for the derivative values
for index in range(0, n): # xrange(start, stop, [step])
derivArray[index] = formula(func, xArray[index], h) # Calculate the derivative for the current
# x value using the formula passed in
return (xArray, derivArray) # This returns TWO things:
# x values and the derivative values
```
### Example: Differentiate $\sin(x)$
We know the answer:
$$\frac{d}{dx} \left[\sin(x)\right] = \cos(x)$$
```
tau = 2*np.pi
x = np.linspace(0, tau, 100)
# Plot sin and cos
pl.plot(x, np.sin(x), color='k');
pl.plot(x, np.cos(x), color='b');
# Compute derivative using central difference formula
xder, yder = derivative2(centralDifference, np.sin, 0, tau, 10)
# Plot numerical derivative as scatter plot
pl.scatter(xder, yder, color='g', s=100, marker='+', lw=2);
# s controls marker size (experiment with it)
# lw = "linewidth" in pixels
```
#### Notice that the points miss the curve.
#### Q. How can we improve the accuracy of our numerical derivative?
```
# Plot sin and cos
pl.plot(x, np.sin(x), color='k')
pl.plot(x, np.cos(x), color='b')
# Compute derivative using central difference formula
xder, yder = derivative2(centralDifference, np.sin, 0, tau, 100)
# Plot numerical derivative as scatter plot
pl.scatter(xder, yder, color='g', s=100, marker='*', lw=2)
```
### Example: Traversing A 1-D landscape
Gaussian Equation:
$$f(x)=A * e^{-\frac{(x-\mu)^2}{2*\sigma}}$$
```
numCraters = 5 # number of craters
widthMax = 1.0 # maximal width of Gaussian crater
heightMin = -1.0 # maximal depth of craters / valleys
heightMax = 2.0 # maximal height of hills / mountains
# 1-D Gaussian
def gaussian(x, A, mu, sigma):
return A * np.exp(-(x - mu)**2 / 2.0 / sigma**2)
# 1-D Gaussian (same thing using lambda)
#gaussian = lambda x, A, mu, sigma: A * np.exp(-(x - mu)**2 / 2. / sigma**2)
# Create an array of linearly spaced x values
xArray = np.linspace(0, 10, 500) # km
# Create an array of initially flat landscape (aka filled with 0's)
yArray = np.zeros_like(xArray)
# Add craters / mountains to landscape
for _ in range(numCraters): # '_' is the so called dummy variable
# Amplitude between heightMin and heightMax
A = np.random.rand() * (heightMax - heightMin) + heightMin
# Center location of the crater
center = np.random.rand() * xArray.max()
# Width of the crater
sigma = np.random.rand() * widthMax
# Add crater to landscape!
yArray += gaussian(xArray, A=A, mu=center, sigma=sigma)
```
```
pl.plot(xArray, yArray, color='k')
pl.xlabel('position [km]')
pl.ylabel('altitutde [km]')
```
#### Q. Where should our spacecraft land? What areas seem accessible?
#### Q. How do we find the lowest point? Highest? How could we determine how many "mountains" and "craters" there are?
```
dydx = np.diff(yArray) / np.diff(xArray)
```
#### Q. What do you think "diff" does?
```
arr = np.array([1,4,10, 12,5, 7])
np.diff(arr)
```
#### Q. What type of differentiation scheme does this formula represent? How is this different than our "derivative" function from earlier?
```
pl.plot(xArray[0:-1], dydx, color='r', label='slope')
pl.plot(xArray, yArray, color='k', label='data')
pl.xlabel('position [km]')
pl.ylabel('slope')
pl.plot([xArray.min(), xArray.max()], [0,0], color='k', ls=':')
#pl.ylim(-4, 4)
pl.legend(loc='best')
```
#### Q. How many hills and craters are there?
#### Q. Why did we use x[0:-1] in the above plot instead of x?
```
slopeTolerance = 0.5
```
#### Q. Using the slope, how could we determine which places we could reach and which we couldn't?
```
myArray = np.array([0, 1, 2, 3, 4]) # Create an array
tfArray = np.logical_and(myArray < 2, myArray != 0) # Use boolean logic on array
print (myArray) # Print original array
print (tfArray) # Print the True/False array (from boolean logic)
print (myArray[tfArray]) # Print the original array using True/False array to limit values
```
```
reachable = np.logical_and(dydx < slopeTolerance, dydx > -slopeTolerance)
unreachable = np.logical_not(reachable)
pl.plot(xArray, yArray, color='k')
pl.scatter(xArray[:-1][unreachable], yArray[:-1][unreachable], color='r', label='bad')
pl.scatter(xArray[:-1][reachable], yArray[:-1][reachable], color='g', label='good')
pl.legend(loc='best')
pl.xlabel('position [km]')
pl.ylabel('altitude [km]')
```
### Summary
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
from IPython.display import Image
Image(url='http://wordlesstech.com/wp-content/uploads/2011/11/New-Map-of-the-Moon-2.jpg')
def forwardDifference(f, x, h):
"""
A first order differentiation technique.
Parameters
----------
f : function to be differentiated
x : point of interest
h : step-size to use in approximation
"""
return (f(x + h) - f(x)) / h # From our notes
def centralDifference(f, x, h):
"""
A second order differentiation technique.
Also known as `symmetric difference quotient.
Parameters
----------
f : function to be differentiated
x : point of interest
h : step-size to use in approximation
"""
return (f(x + h) - f(x - h)) / (2.0 * h) # From our notes
np.linspace(1,10,100).shape
def derivative(formula, func, xLower, xUpper, n):
"""
Differentiate func(x) at all points from xLower
to xUpper with n *equally spaced* points.
The differentiation formula is given by
formula(func, x, h).
"""
h = (xUpper - xLower) / float(n) # Calculate the derivative step size
xArray = np.linspace(xLower, xUpper, n) # Create an array of x values
derivArray = np.zeros(n) # Create an empty array for the derivative values
for index in range(1, n - 1): # xrange(start, stop, [step])
derivArray[index] = formula(func, xArray[index], h) # Calculate the derivative for the current
# x value using the formula passed in
return (xArray[1:-1], derivArray[1:-1]) # This returns TWO things:
# x values and the derivative values
def derivative2(formula, func, xLower, xUpper, n):
"""
Differentiate func(x) at all points from xLower
to xUpper with n+1 *equally spaced* points.
The differentiation formula is given by
formula(func, x, h).
"""
h = (xUpper - xLower) / float(n) # Calculate the derivative step size
xArray = np.linspace(xLower, xUpper, n) # Create an array of x values
derivArray = np.zeros(n) # Create an empty array for the derivative values
for index in range(0, n): # xrange(start, stop, [step])
derivArray[index] = formula(func, xArray[index], h) # Calculate the derivative for the current
# x value using the formula passed in
return (xArray, derivArray) # This returns TWO things:
# x values and the derivative values
tau = 2*np.pi
x = np.linspace(0, tau, 100)
# Plot sin and cos
pl.plot(x, np.sin(x), color='k');
pl.plot(x, np.cos(x), color='b');
# Compute derivative using central difference formula
xder, yder = derivative2(centralDifference, np.sin, 0, tau, 10)
# Plot numerical derivative as scatter plot
pl.scatter(xder, yder, color='g', s=100, marker='+', lw=2);
# s controls marker size (experiment with it)
# lw = "linewidth" in pixels
# Plot sin and cos
pl.plot(x, np.sin(x), color='k')
pl.plot(x, np.cos(x), color='b')
# Compute derivative using central difference formula
xder, yder = derivative2(centralDifference, np.sin, 0, tau, 100)
# Plot numerical derivative as scatter plot
pl.scatter(xder, yder, color='g', s=100, marker='*', lw=2)
numCraters = 5 # number of craters
widthMax = 1.0 # maximal width of Gaussian crater
heightMin = -1.0 # maximal depth of craters / valleys
heightMax = 2.0 # maximal height of hills / mountains
# 1-D Gaussian
def gaussian(x, A, mu, sigma):
return A * np.exp(-(x - mu)**2 / 2.0 / sigma**2)
# 1-D Gaussian (same thing using lambda)
#gaussian = lambda x, A, mu, sigma: A * np.exp(-(x - mu)**2 / 2. / sigma**2)
# Create an array of linearly spaced x values
xArray = np.linspace(0, 10, 500) # km
# Create an array of initially flat landscape (aka filled with 0's)
yArray = np.zeros_like(xArray)
# Add craters / mountains to landscape
for _ in range(numCraters): # '_' is the so called dummy variable
# Amplitude between heightMin and heightMax
A = np.random.rand() * (heightMax - heightMin) + heightMin
# Center location of the crater
center = np.random.rand() * xArray.max()
# Width of the crater
sigma = np.random.rand() * widthMax
# Add crater to landscape!
yArray += gaussian(xArray, A=A, mu=center, sigma=sigma)
pl.plot(xArray, yArray, color='k')
pl.xlabel('position [km]')
pl.ylabel('altitutde [km]')
dydx = np.diff(yArray) / np.diff(xArray)
arr = np.array([1,4,10, 12,5, 7])
np.diff(arr)
pl.plot(xArray[0:-1], dydx, color='r', label='slope')
pl.plot(xArray, yArray, color='k', label='data')
pl.xlabel('position [km]')
pl.ylabel('slope')
pl.plot([xArray.min(), xArray.max()], [0,0], color='k', ls=':')
#pl.ylim(-4, 4)
pl.legend(loc='best')
slopeTolerance = 0.5
myArray = np.array([0, 1, 2, 3, 4]) # Create an array
tfArray = np.logical_and(myArray < 2, myArray != 0) # Use boolean logic on array
print (myArray) # Print original array
print (tfArray) # Print the True/False array (from boolean logic)
print (myArray[tfArray]) # Print the original array using True/False array to limit values
reachable = np.logical_and(dydx < slopeTolerance, dydx > -slopeTolerance)
unreachable = np.logical_not(reachable)
pl.plot(xArray, yArray, color='k')
pl.scatter(xArray[:-1][unreachable], yArray[:-1][unreachable], color='r', label='bad')
pl.scatter(xArray[:-1][reachable], yArray[:-1][reachable], color='g', label='good')
pl.legend(loc='best')
pl.xlabel('position [km]')
pl.ylabel('altitude [km]')
| 0.879652 | 0.988602 |
## Basic ML Classification
This notebook is based on `Chapter 3 - Classification` of Hands-On ML, which uses the standard MNIST dataset
```
# common imports
import sys
import sklearn
import numpy as np
import os
import pandas as pd
from pathlib import Path
# Setting seed value
np.random.seed(42)
#figures
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
# Sets defaults/can also be imported from a style file
mpl.rc('axes', labelsize=12)
mpl.rc('xtick', labelsize=10)
mpl.rc('ytick', labelsize=10)
# Saving path to a directory
path = Path('../input/Kannada-MNIST/')
for file in path.iterdir():
print(file)
df = pd.read_csv(path/'train.csv', low_memory=False)
df.head()
# Loading dataset into separate numpy arrays
x = df.iloc[:,1:].values
y = df.iloc[:,0].values
y[:5]
# Number of unique classes in the dataset
np.unique(y)
# Similar to MNIST image shape
x.shape, y.shape
# Viewing the digits in the dataset
# Aren't very clea
digit = x[np.random.randint(0, 100)].reshape(28, 28)
plt.imshow(digit, cmap=mpl.cm.binary, interpolation='nearest')
plt.axis("off")
plt.show()
# Plotting images of each uniques class in a row
def plot_digits(images, row_size):
n_rows = (images.shape[0]) // row_size
row_images = []
for row in range(n_rows):
rimages = images[row * row_size : (row + 1) * row_size]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap=mpl.cm.binary)
plt.axis("off")
'''
The plot of images shows that the labels are in a sequential order
So we need to randomize the data before passing through a NN model
'''
plt.figure(figsize=(9,9))
example_images = x[:100].reshape(-1, 28, 28)
plot_digits(example_images, 10)
plt.show()
# Shuffling the data
# This creates a random permutation of indices in the dataset
indices = np.random.permutation(x.shape[0])
x = x[indices]
y = y[indices]
# Now the first 100 points have been shuffled
plt.figure(figsize=(9,9))
example_images = x[:100].reshape(-1, 28, 28)
plot_digits(example_images, 10)
plt.show()
# Train-Test Split
# Using Stratified Split allows to have the sampe proportion of the data in both train and test set
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.20, random_state=42)
for train_ind, test_ind in ss.split(x, y):
x_train, x_test = x[train_ind], x[test_ind]
y_train, y_test = y[train_ind], y[test_ind]
print("Train Shape:", x_train.shape)
print("Test Shape:", x_test.shape)
'''
After train test split the proportion of the classes remains the same
'''
print("Proportion of classes in original data:", np.unique(y, return_counts=True)[1] / y.shape[0])
print("Proportion of classes in training data:", np.unique(y_train, return_counts=True)[1] / y_train.shape[0])
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
from sklearn.linear_model import SGDClassifier
# Hinge loss which is the default loss function uses the SVM Classifier
sgd = SGDClassifier(n_jobs=-1, random_state = 42)
```
### Confusion Matrix for complete dataset
```
# For confusion matrix on complete data
from sklearn.model_selection import cross_val_predict
y_pred = cross_val_predict(sgd, x, y , cv=5)
def plot_confusion_matrix(df, col=plt.cm.gray):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
cax = ax.matshow(df, cmap=col)
fig.colorbar(cax)
# The SGD Classifier works well as very few classes are wrongly predicted
plot_confusion_matrix(confusion_matrix(y, y_pred), plt.cm.plasma)
plt.show()
```
### Binary classifier for Precision Recall Curve
```
# Setting up as binary classifier as precision-recall can't be plot for multiclass
# Taking similar to book for number 5
y_5 = (y==5)
y_pred_5 = cross_val_predict(sgd, x, y_5 , cv=5, method="decision_function")
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_5, y_pred_5)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.legend(loc="center right", fontsize=16) # Not shown in the book
plt.xlabel("Threshold", fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.axis([-50000, 50000, 0, 1]) # Not shown
recall_90_precision = recalls[np.argmax(precisions >= 0.90)]
threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)]
plt.figure(figsize=(8, 4)) # Not shown
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], "r:") # Not shown
plt.plot([-50000, threshold_90_precision], [0.9, 0.9], "r:") # Not shown
plt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], "r:")# Not shown
plt.plot([threshold_90_precision], [0.9], "ro") # Not shown
plt.plot([threshold_90_precision], [recall_90_precision], "ro") # Not shown
plt.show()
# We usually select the point just before the sharp drop i.e recall=0.9 and precision=0.95
# Our model works well as the precision recall curve is steep and since our data is imbalances, it works better
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.grid(True)
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
plt.plot([0.90, 0.90], [0., 0.95], "r:")
plt.plot([0.0, 0.9], [0.95, 0.95], "r:")
plt.plot([0.9], [0.95], "ro")
plt.show()
```
### ROC curve
```
from sklearn.metrics import roc_curve
# true positive rate is recall
fpr, tpr, thresholds = roc_curve(y_5, y_pred_5)
# Here we want our curve to be as steep as possible initially
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal
plt.axis([0, 1, 0, 1]) # Not shown in the book
plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16) # Not shown
plt.ylabel('True Positive Rate (Recall)', fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.figure(figsize=(8, 6)) # Not shown
plot_roc_curve(fpr, tpr)
plt.plot([4.837e-3, 4.837e-3], [0., 0.4368], "r:") # Not shown
plt.plot([0.0, 4.837e-3], [0.4368, 0.4368], "r:") # Not shown
plt.plot([4.837e-3], [0.4368], "ro") # Not shown
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y_5, y_pred_5)
```
### Error Analysis for multiclass classification
```
conf_sum = confusion_matrix(y, y_pred)
row_sums = conf_sum.sum(axis=1, keepdims=True)
norm_conf = conf_sum / row_sums
np.fill_diagonal(norm_conf, 0)
plt.matshow(norm_conf, cmap=plt.cm.gray)
plt.show()
cl_3, cl_7 = 3, 7
X_33 = x[(y == cl_3) & (y_pred == cl_3)]
X_37 = x[(y == cl_3) & (y_pred == cl_7)]
X_73 = x[(y == cl_7) & (y_pred == cl_3)]
X_77 = x[(y == cl_7) & (y_pred == cl_7)]
# 3 and 7 both look similar with rounding in 3 in lower part and 7 having round in upper part
# Also great way of using subplots
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_33[:25].reshape(-1, 28, 28), 5)
plt.subplot(222); plot_digits(X_37[:25].reshape(-1, 28, 28), 5)
plt.subplot(223); plot_digits(X_73[:25].reshape(-1, 28, 28), 5)
plt.subplot(224); plot_digits(X_77[:25].reshape(-1, 28, 28), 5)
plt.show()
```
|
github_jupyter
|
# common imports
import sys
import sklearn
import numpy as np
import os
import pandas as pd
from pathlib import Path
# Setting seed value
np.random.seed(42)
#figures
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
# Sets defaults/can also be imported from a style file
mpl.rc('axes', labelsize=12)
mpl.rc('xtick', labelsize=10)
mpl.rc('ytick', labelsize=10)
# Saving path to a directory
path = Path('../input/Kannada-MNIST/')
for file in path.iterdir():
print(file)
df = pd.read_csv(path/'train.csv', low_memory=False)
df.head()
# Loading dataset into separate numpy arrays
x = df.iloc[:,1:].values
y = df.iloc[:,0].values
y[:5]
# Number of unique classes in the dataset
np.unique(y)
# Similar to MNIST image shape
x.shape, y.shape
# Viewing the digits in the dataset
# Aren't very clea
digit = x[np.random.randint(0, 100)].reshape(28, 28)
plt.imshow(digit, cmap=mpl.cm.binary, interpolation='nearest')
plt.axis("off")
plt.show()
# Plotting images of each uniques class in a row
def plot_digits(images, row_size):
n_rows = (images.shape[0]) // row_size
row_images = []
for row in range(n_rows):
rimages = images[row * row_size : (row + 1) * row_size]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap=mpl.cm.binary)
plt.axis("off")
'''
The plot of images shows that the labels are in a sequential order
So we need to randomize the data before passing through a NN model
'''
plt.figure(figsize=(9,9))
example_images = x[:100].reshape(-1, 28, 28)
plot_digits(example_images, 10)
plt.show()
# Shuffling the data
# This creates a random permutation of indices in the dataset
indices = np.random.permutation(x.shape[0])
x = x[indices]
y = y[indices]
# Now the first 100 points have been shuffled
plt.figure(figsize=(9,9))
example_images = x[:100].reshape(-1, 28, 28)
plot_digits(example_images, 10)
plt.show()
# Train-Test Split
# Using Stratified Split allows to have the sampe proportion of the data in both train and test set
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.20, random_state=42)
for train_ind, test_ind in ss.split(x, y):
x_train, x_test = x[train_ind], x[test_ind]
y_train, y_test = y[train_ind], y[test_ind]
print("Train Shape:", x_train.shape)
print("Test Shape:", x_test.shape)
'''
After train test split the proportion of the classes remains the same
'''
print("Proportion of classes in original data:", np.unique(y, return_counts=True)[1] / y.shape[0])
print("Proportion of classes in training data:", np.unique(y_train, return_counts=True)[1] / y_train.shape[0])
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
from sklearn.linear_model import SGDClassifier
# Hinge loss which is the default loss function uses the SVM Classifier
sgd = SGDClassifier(n_jobs=-1, random_state = 42)
# For confusion matrix on complete data
from sklearn.model_selection import cross_val_predict
y_pred = cross_val_predict(sgd, x, y , cv=5)
def plot_confusion_matrix(df, col=plt.cm.gray):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
cax = ax.matshow(df, cmap=col)
fig.colorbar(cax)
# The SGD Classifier works well as very few classes are wrongly predicted
plot_confusion_matrix(confusion_matrix(y, y_pred), plt.cm.plasma)
plt.show()
# Setting up as binary classifier as precision-recall can't be plot for multiclass
# Taking similar to book for number 5
y_5 = (y==5)
y_pred_5 = cross_val_predict(sgd, x, y_5 , cv=5, method="decision_function")
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_5, y_pred_5)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.legend(loc="center right", fontsize=16) # Not shown in the book
plt.xlabel("Threshold", fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.axis([-50000, 50000, 0, 1]) # Not shown
recall_90_precision = recalls[np.argmax(precisions >= 0.90)]
threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)]
plt.figure(figsize=(8, 4)) # Not shown
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], "r:") # Not shown
plt.plot([-50000, threshold_90_precision], [0.9, 0.9], "r:") # Not shown
plt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], "r:")# Not shown
plt.plot([threshold_90_precision], [0.9], "ro") # Not shown
plt.plot([threshold_90_precision], [recall_90_precision], "ro") # Not shown
plt.show()
# We usually select the point just before the sharp drop i.e recall=0.9 and precision=0.95
# Our model works well as the precision recall curve is steep and since our data is imbalances, it works better
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.grid(True)
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
plt.plot([0.90, 0.90], [0., 0.95], "r:")
plt.plot([0.0, 0.9], [0.95, 0.95], "r:")
plt.plot([0.9], [0.95], "ro")
plt.show()
from sklearn.metrics import roc_curve
# true positive rate is recall
fpr, tpr, thresholds = roc_curve(y_5, y_pred_5)
# Here we want our curve to be as steep as possible initially
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal
plt.axis([0, 1, 0, 1]) # Not shown in the book
plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16) # Not shown
plt.ylabel('True Positive Rate (Recall)', fontsize=16) # Not shown
plt.grid(True) # Not shown
plt.figure(figsize=(8, 6)) # Not shown
plot_roc_curve(fpr, tpr)
plt.plot([4.837e-3, 4.837e-3], [0., 0.4368], "r:") # Not shown
plt.plot([0.0, 4.837e-3], [0.4368, 0.4368], "r:") # Not shown
plt.plot([4.837e-3], [0.4368], "ro") # Not shown
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y_5, y_pred_5)
conf_sum = confusion_matrix(y, y_pred)
row_sums = conf_sum.sum(axis=1, keepdims=True)
norm_conf = conf_sum / row_sums
np.fill_diagonal(norm_conf, 0)
plt.matshow(norm_conf, cmap=plt.cm.gray)
plt.show()
cl_3, cl_7 = 3, 7
X_33 = x[(y == cl_3) & (y_pred == cl_3)]
X_37 = x[(y == cl_3) & (y_pred == cl_7)]
X_73 = x[(y == cl_7) & (y_pred == cl_3)]
X_77 = x[(y == cl_7) & (y_pred == cl_7)]
# 3 and 7 both look similar with rounding in 3 in lower part and 7 having round in upper part
# Also great way of using subplots
plt.figure(figsize=(8,8))
plt.subplot(221); plot_digits(X_33[:25].reshape(-1, 28, 28), 5)
plt.subplot(222); plot_digits(X_37[:25].reshape(-1, 28, 28), 5)
plt.subplot(223); plot_digits(X_73[:25].reshape(-1, 28, 28), 5)
plt.subplot(224); plot_digits(X_77[:25].reshape(-1, 28, 28), 5)
plt.show()
| 0.666171 | 0.951006 |
# Deploy and predict with Keras model on Cloud AI Platform.
**Learning Objectives**
1. Setup up the environment
1. Deploy trained Keras model to Cloud AI Platform
1. Online predict from model on Cloud AI Platform
1. Batch predict from model on Cloud AI Platform
## Introduction
**Verify that you have previously Trained your Keras model. If not, go back to [train_keras_ai_platform_babyweight.ipynb](../solutions/train_keras_ai_platform_babyweight.ipynb) create them.**
In this notebook, we'll be deploying our Keras model to Cloud AI Platform and creating predictions.
We will set up the environment, deploy a trained Keras model to Cloud AI Platform, online predict from deployed model on Cloud AI Platform, and batch predict from deployed model on Cloud AI Platform.
Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/deploy_keras_ai_platform_babyweight.ipynb).
## Set up environment variables and load necessary libraries
Import necessary libraries.
```
import os
```
### Lab Task #1: Set environment variables.
Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region.
```
%%bash
PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
# Change these to try this notebook out
PROJECT = "cloud-training-demos" # TODO 1: Replace with your PROJECT
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # TODO 1: Replace with your REGION
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "2.1"
%%bash
gcloud config set compute/region $REGION
gcloud config set ai_platform/region global
```
## Check our trained model files
Let's check the directory structure of our outputs of our trained model in folder we exported the model to in our last [lab](../solutions/10_train_keras_ai_platform_babyweight.ipynb). We'll want to deploy the saved_model.pb within the timestamped directory as well as the variable values in the variables folder. Therefore, we need the path of the timestamped directory so that everything within it can be found by Cloud AI Platform's model deployment service.
```
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model
%%bash
MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* \
| tail -1)
gsutil ls ${MODEL_LOCATION}
```
## Lab Task #2: Deploy trained model.
Deploying the trained model to act as a REST web service is a simple gcloud call. Complete __#TODO__ by providing location of saved_model.pb file to Cloud AI Platform model deployment service. The deployment will take a few minutes.
```
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=# TODO 2: Add GCS path to saved_model.pb file.
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION"
# gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
# gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions ${REGION}
gcloud ai-platform versions create ${MODEL_VERSION} \
--model=${MODEL_NAME} \
--origin=${MODEL_LOCATION} \
--runtime-version=2.1 \
--python-version=3.7
```
## Lab Task #3: Use model to make online prediction.
Complete __#TODO__s for both the Python and gcloud Shell API methods of calling our deployed model on Cloud AI Platform for online prediction.
### Python API
We can use the Python API to send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
```
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = # TODO 3a: Add model name
MODEL_VERSION = # TODO 3a: Add model version
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = "https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict" \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {"Authorization": "Bearer " + token }
data = {
"instances": [
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Single(1)",
"gestation_weeks": 39
},
{
"is_male": "False",
"mother_age": 29.0,
"plurality": "Single(1)",
"gestation_weeks": 38
},
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Triplets(3)",
"gestation_weeks": 39
},
# TODO 3a: Create another instance
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
```
The predictions for the four instances were: 5.33, 6.09, 2.50, and 5.86 pounds respectively when I ran it (your results might be different).
### gcloud shell API
Instead we could use the gcloud shell API. Create a newline delimited JSON file with one instance per line and submit using gcloud.
```
%%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
```
Now call `gcloud ai-platform predict` using the JSON we just created and point to our deployed `model` and `version`.
```
%%bash
gcloud ai-platform predict \
--model=babyweight \
--json-instances=inputs.json \
--version=# TODO 3b: Add model version
```
## Lab Task #4: Use model to make batch prediction.
Batch prediction is commonly used when you have thousands to millions of predictions. It will create an actual Cloud AI Platform job for prediction. Complete __#TODO__s so we can call our deployed model on Cloud AI Platform for batch prediction.
```
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT \
--region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight \
--version=# TODO 4: Add model version
```
## Lab Summary:
In this lab, we set up the environment, deployed a trained Keras model to Cloud AI Platform, online predicted from deployed model on Cloud AI Platform, and batch predicted from deployed model on Cloud AI Platform.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
import os
%%bash
PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
# Change these to try this notebook out
PROJECT = "cloud-training-demos" # TODO 1: Replace with your PROJECT
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # TODO 1: Replace with your REGION
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "2.1"
%%bash
gcloud config set compute/region $REGION
gcloud config set ai_platform/region global
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model
%%bash
MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* \
| tail -1)
gsutil ls ${MODEL_LOCATION}
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=# TODO 2: Add GCS path to saved_model.pb file.
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION"
# gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
# gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions ${REGION}
gcloud ai-platform versions create ${MODEL_VERSION} \
--model=${MODEL_NAME} \
--origin=${MODEL_LOCATION} \
--runtime-version=2.1 \
--python-version=3.7
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = # TODO 3a: Add model name
MODEL_VERSION = # TODO 3a: Add model version
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = "https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict" \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {"Authorization": "Bearer " + token }
data = {
"instances": [
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Single(1)",
"gestation_weeks": 39
},
{
"is_male": "False",
"mother_age": 29.0,
"plurality": "Single(1)",
"gestation_weeks": 38
},
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Triplets(3)",
"gestation_weeks": 39
},
# TODO 3a: Create another instance
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
%%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
gcloud ai-platform predict \
--model=babyweight \
--json-instances=inputs.json \
--version=# TODO 3b: Add model version
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT \
--region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight \
--version=# TODO 4: Add model version
| 0.108519 | 0.945801 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
train = pd.read_csv('train_data.csv')
test = pd.read_csv('test_data.csv')
features_name = [i for i in train.columns.values if i not in ['connection_id', 'target']]
target = train['target']
X_train, X_valid, y_train, y_valid = train_test_split(train, target, train_size = 0.8, stratify = target, random_state = 2017)
def modelfit(model, train_data, train_label, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=100):
if useTrainCV:
xgb_param = model.get_xgb_params()
params = {
'objective' : xgb_param['objective'],
'num_class':3,
'base_score' : xgb_param['base_score'],
'colsample_bylevel' : xgb_param['colsample_bylevel'],
'colsample_bytree' : xgb_param['colsample_bytree'],
'gamma' : xgb_param['gamma'],
'eta' : xgb_param['learning_rate'],
'max_delta_step' : xgb_param['max_delta_step'],
'max_depth' : xgb_param['max_depth'],
'min_child_weight' : xgb_param['min_child_weight'],
'alpha' : xgb_param['reg_alpha'],
'lambda': xgb_param['reg_lambda'],
'scale_pos_weight' : xgb_param['scale_pos_weight'],
'subsample' : xgb_param['subsample'],
}
dtrain = xgb.DMatrix(data=train_data[predictors], label=train_label)
cvresult = xgb.cv(params , dtrain, num_boost_round=model.get_params()['n_estimators'], stratified=True, nfold=cv_folds, metrics='mlogloss', early_stopping_rounds=early_stopping_rounds)
model.set_params(n_estimators=cvresult.shape[0])
print(cvresult)
print("########### n_estimators = %f" % cvresult.shape[0])
# Fit the algorithm on the data
model.fit(train_data[predictors], train_label,eval_metric='auc')
# Predict training set:
train_predictions = model.predict(train_data[predictors])
train_predprob = model.predict_proba(train_data[predictors])[:,1]
# Predict X_valid set:
valid_predictions = model.predict(X_valid[predictors])
valid_predprob = model.predict_proba(X_valid[predictors])[:,1]
# Print model report:
print("\nModel Report")
print("Accuracy (Train): %.5g" % accuracy_score(train_label, train_predictions))
# print("AUC Score (Train): %f" % roc_auc_score(train_label, train_predprob))
print("Accuracy (Validation): %.5g" % accuracy_score(y_valid, valid_predictions))
# print("AUC Score (Validation): %f" % roc_auc_score(y_valid, valid_predprob))
feat_imp = pd.Series(model.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Fit model using each importance as a threshold
def find_best_features_threshold(clf, predictors):
print("feature importance values for all features")
print(clf.feature_importances_)
thresholds = np.sort(clf.feature_importances_)
objective = clf.get_xgb_params()['objective']
learning_rate = clf.get_xgb_params()['learning_rate']
max_depth = clf.get_xgb_params()['max_depth']
n_estimators = clf.get_xgb_params()['n_estimators']
subsample = clf.get_xgb_params()['subsample']
colsample_bytree = clf.get_xgb_params()['colsample_bytree']
gamma = clf.get_xgb_params()['gamma']
min_child_weight = clf.get_xgb_params()['min_child_weight']
reg_alpha = clf.get_xgb_params()['reg_alpha']
print("-------------------------------------------------------------------------------------")
for thresh in thresholds:
if thresh > 0.0000000999:
# select features using threshold
selection = SelectFromModel(clf, threshold=thresh, prefit=True)
select_X_train = selection.transform(X_train[predictors])
# train model
selection_model = xgb.XGBClassifier(objective=objective,learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators, subsample=subsample, colsample_bytree=colsample_bytree, gamma=gamma, min_child_weight=min_child_weight, reg_alpha=reg_alpha)
selection_model.fit(select_X_train, y_train, eval_metric='auc')
# eval model
select_X_test = selection.transform(X_valid[predictors])
valid_pred = selection_model.predict(select_X_test)
x_train_pred = selection_model.predict(select_X_train)
# predictions = [round(value) for value in y_pred]
accuracy_valid = accuracy_score(y_valid, valid_pred)
accuracy_x_train = accuracy_score(y_train, x_train_pred )
print("Thresh=%.10f, n=%d, Accuracy (Valid): %.5g, Accuracy (X_train): %.5g" % (thresh, select_X_train.shape[1], accuracy_valid, accuracy_x_train))
def predict_test_from_threshold_all_data(clf, predictors, threshold):
thresholds = np.sort(clf.feature_importances_)
objective = clf.get_xgb_params()['objective']
learning_rate = clf.get_xgb_params()['learning_rate']
max_depth = clf.get_xgb_params()['max_depth']
n_estimators = clf.get_xgb_params()['n_estimators']
subsample = clf.get_xgb_params()['subsample']
colsample_bytree = clf.get_xgb_params()['colsample_bytree']
gamma = clf.get_xgb_params()['gamma']
min_child_weight = clf.get_xgb_params()['min_child_weight']
reg_alpha = clf.get_xgb_params()['reg_alpha']
selection = SelectFromModel(clf, threshold=threshold, prefit=True)
select_X_train = selection.transform(train[predictors])
selection_model = xgb.XGBClassifier(objective=objective,learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators, subsample=subsample, colsample_bytree=colsample_bytree, gamma=gamma, min_child_weight=min_child_weight, reg_alpha=reg_alpha)
selection_model.fit(select_X_train, target, eval_metric='auc')
select_X_test = selection.transform(test[predictors])
y_pred = selection_model.predict(select_X_test)
return y_pred
def new_features_from_threshold(prev_features, threshold):
new_features_name = []
for i in range(len(prev_features)):
if clf2.feature_importances_[i] > threshold:
new_features_name.append(prev_features[i])
return new_features_name
clf1 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=1000, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf1, train, target, features_name)
```
### Model 2 - 0.78142
objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001
```
clf2 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf2.fit(train[features_name], target,eval_metric='auc')
pred2 = clf2.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred2
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb17.csv', index=False)
find_best_features_threshold(clf2, features_name)
new_features_name = new_features_from_threshold(features_name, 0.0019994)
modelfit(clf2, train, target, new_features_name, False)
```
#### cv accuracy improved (maybe overfitting)
```
print(new_features_name)
```
so this new features with parameters<br>
objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001<br>
gives best local cv accuracy
### Model 3 - 0.78134
```
clf3 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf3.fit(train[new_features_name], target, eval_metric='auc')
pred3 = clf3.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred3
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb18.csv', index=False)
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf4, X_train, y_train, features_name, False)
```
#### it seems like model trained with X_train and all features has lower CV score than modle trained with X_train and important features
### Model 4 - 0.78128
```
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf4.fit(X_train[new_features_name], y_train, eval_metric='auc')
pred4 = clf4.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred4
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb19.csv', index=False)
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=1000, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf4, train, target, new_features_name)
```
### Model 5 - 0.78134
```
clf5 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=257, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf5.fit(train[new_features_name], target, eval_metric='auc')
pred5 = clf5.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred5
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb20.csv', index=False)
modelfit(clf5, X_train, y_train,new_features_name, False)
clf6 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=5, n_estimators=2000, subsample=0.85, colsample_bytree=0.6, gamma=0.15, min_child_weight= 3, reg_alpha=0.001)
modelfit(clf6, train, target, features_name, early_stopping_rounds = 300)
```
### Model - 7 -- 0.78136
```
clf7 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=5, n_estimators=157, subsample=0.85, colsample_bytree=0.6, gamma=0.15, min_child_weight= 3, reg_alpha=0.001)
modelfit(clf7, X_train, y_train, features_name, False)
clf7.fit(train[features_name], target, eval_metric= 'auc')
pred7 = clf7.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred7
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb21.csv', index=False)
```
### Bagging
```
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import cross_val_score
bagging = BaggingClassifier(base_estimator=clf2, n_estimators=10, random_state=2017, n_jobs=-1)
kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=2017)
results = cross_val_score(bagging, train[features_name], target, cv=kfold, scoring='accuracy')
print(results.mean())
bagging.get_params()
bagging.fit(train[features_name], target)
valid_pred_bagging = bagging.predict(X_valid[features_name])
print("Accuracy (valid_pred_bagging): %.5g" % accuracy_score(y_valid, valid_pred_bagging))
xtrain_pred_bagging = bagging.predict(X_train[features_name])
print("Accuracy (xtrain_pred_bagging): %.5g" % accuracy_score(y_train, xtrain_pred_bagging))
train_pred_bagging = bagging.predict(train[features_name])
print("Accuracy (train_pred_bagging): %.5g" % accuracy_score(target, train_pred_bagging))
```
#### on clf2 trained on entire train data gives train accuracy of 0.78166 and X_valid 0.78170
#### on bagging with clf2 trained on entire data set gives every data set accuracy of 0.78144
### Model 8 - bagging with clf2 0.78131
```
pred8 = bagging.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred8
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb22.csv', index=False)
```
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
```
from sklearn.linear_model import LogisticRegression
# solver = 'sag', multi_class ='multinomial', max_iter = 300, n_jobs = -1, C = 0.5, random_state = 2017
logistic_clf = LogisticRegression(random_state = 2017 )
lclf1.fit(train[features_name], target)
print("Accuracy (train_pred_bagging): %.5g" % accuracy_score(y_valid, lclf1.predict(X_valid[features_name])))
lclf2 = LogisticRegression(solver = 'sag', multi_class ='multinomial', max_iter = 5000, n_jobs = -1, C = 0.5, random_state = 2017)
lclf2.fit(train[features_name], target)
lclf2.get_params()
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, lclf2.predict(X_valid[features_name])))
from sklearn import svm
lin_svc_clf = svm.LinearSVC()
lin_svc_clf.fit(train[features_name], target)
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, lin_svc_clf.predict(X_valid[features_name])))
from sklearn.neighbors import KNeighborsClassifier
neigh_clf = KNeighborsClassifier(n_neighbors=5, n_jobs = -1)
neigh_clf.fit(train[features_name], target)
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, neigh_clf.predict(X_valid[features_name])))
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier(max_depth=8, min_samples_split=7, max_features='sqrt', random_state=2017)
```
### Ensembling 0.78098
```
from sklearn.ensemble import VotingClassifier
estimators = []
model1 = bagging
estimators.append(('baggingXGB', model1))
model2 = logistic_clf
estimators.append(('logistic', model2))
model3 = dt_clf
estimators.append(('cart', model3))
model4 = neigh_clf
estimators.append(('knn', model4))
# create the ensemble model
ensemble = VotingClassifier(estimators=estimators, voting='soft', weights=[0.8, 0.5, 0.6, 0.5], n_jobs=-1)
ensemble.fit(train[features_name], target)
ensemble.get_params()
import pickle
filehandler = open("ensemble.pickle","wb")
pickle.dump(ensemble,filehandler)
filehandler.close()
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble.predict(X_valid[features_name])))
pred_ensemble = ensemble.predict(test[features_name])
pred_ensemble
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb23.csv', index=False)
```
### Ensemble - 0.78134
```
estimators1 = []
model1 = bagging
estimators1.append(('baggingXGB', model1))
model2 = logistic_clf
estimators1.append(('logistic', model2))
model3 = clf2
estimators1.append(('xgb', model3))
model4 = neigh_clf
estimators1.append(('knn', model4))
# create the ensemble model
ensemble1 = VotingClassifier(estimators=estimators1, voting='soft', weights=[0.5, 0.2, 1.0 , 0.2], n_jobs=-1)
ensemble1.fit(train[features_name], target)
ensemble1.get_params()
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble1.predict(X_valid[features_name])))
pred_ensemble1 = ensemble1.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble1
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb24.csv', index=False)
```
### Ensenmble - 0.78114
```
estimators2 = []
model1 = bagging
estimators2.append(('baggingXGB', model1))
model2 = logistic_clf
estimators2.append(('logistic', model2))
model3 = clf2
estimators2.append(('xgb', model3))
model4 = neigh_clf
estimators2.append(('knn', model4))
# create the ensemble model
ensemble2 = VotingClassifier(estimators=estimators2, voting='soft', weights=[1, 1, 1, 1], n_jobs=-1)
ensemble2.fit(train[features_name], target)
pred_ensemble2 = ensemble2.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble2
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb25.csv', index=False)
estimators3 = []
model1 = bagging
estimators3.append(('baggingXGB', model1))
model2 = logistic_clf
estimators3.append(('logistic', model2))
model3 = neigh_clf
estimators3.append(('knn', model3))
# create the ensemble model
ensemble3 = VotingClassifier(estimators=estimators2, n_jobs=-1)
ensemble3.fit(train[features_name], target)
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble3.predict(X_valid[features_name])))
pred_ensemble3 = ensemble3.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble3
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb26.csv', index=False)
```
### ensemble is not working for me shhhhiii
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
train = pd.read_csv('train_data.csv')
test = pd.read_csv('test_data.csv')
features_name = [i for i in train.columns.values if i not in ['connection_id', 'target']]
target = train['target']
X_train, X_valid, y_train, y_valid = train_test_split(train, target, train_size = 0.8, stratify = target, random_state = 2017)
def modelfit(model, train_data, train_label, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=100):
if useTrainCV:
xgb_param = model.get_xgb_params()
params = {
'objective' : xgb_param['objective'],
'num_class':3,
'base_score' : xgb_param['base_score'],
'colsample_bylevel' : xgb_param['colsample_bylevel'],
'colsample_bytree' : xgb_param['colsample_bytree'],
'gamma' : xgb_param['gamma'],
'eta' : xgb_param['learning_rate'],
'max_delta_step' : xgb_param['max_delta_step'],
'max_depth' : xgb_param['max_depth'],
'min_child_weight' : xgb_param['min_child_weight'],
'alpha' : xgb_param['reg_alpha'],
'lambda': xgb_param['reg_lambda'],
'scale_pos_weight' : xgb_param['scale_pos_weight'],
'subsample' : xgb_param['subsample'],
}
dtrain = xgb.DMatrix(data=train_data[predictors], label=train_label)
cvresult = xgb.cv(params , dtrain, num_boost_round=model.get_params()['n_estimators'], stratified=True, nfold=cv_folds, metrics='mlogloss', early_stopping_rounds=early_stopping_rounds)
model.set_params(n_estimators=cvresult.shape[0])
print(cvresult)
print("########### n_estimators = %f" % cvresult.shape[0])
# Fit the algorithm on the data
model.fit(train_data[predictors], train_label,eval_metric='auc')
# Predict training set:
train_predictions = model.predict(train_data[predictors])
train_predprob = model.predict_proba(train_data[predictors])[:,1]
# Predict X_valid set:
valid_predictions = model.predict(X_valid[predictors])
valid_predprob = model.predict_proba(X_valid[predictors])[:,1]
# Print model report:
print("\nModel Report")
print("Accuracy (Train): %.5g" % accuracy_score(train_label, train_predictions))
# print("AUC Score (Train): %f" % roc_auc_score(train_label, train_predprob))
print("Accuracy (Validation): %.5g" % accuracy_score(y_valid, valid_predictions))
# print("AUC Score (Validation): %f" % roc_auc_score(y_valid, valid_predprob))
feat_imp = pd.Series(model.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Fit model using each importance as a threshold
def find_best_features_threshold(clf, predictors):
print("feature importance values for all features")
print(clf.feature_importances_)
thresholds = np.sort(clf.feature_importances_)
objective = clf.get_xgb_params()['objective']
learning_rate = clf.get_xgb_params()['learning_rate']
max_depth = clf.get_xgb_params()['max_depth']
n_estimators = clf.get_xgb_params()['n_estimators']
subsample = clf.get_xgb_params()['subsample']
colsample_bytree = clf.get_xgb_params()['colsample_bytree']
gamma = clf.get_xgb_params()['gamma']
min_child_weight = clf.get_xgb_params()['min_child_weight']
reg_alpha = clf.get_xgb_params()['reg_alpha']
print("-------------------------------------------------------------------------------------")
for thresh in thresholds:
if thresh > 0.0000000999:
# select features using threshold
selection = SelectFromModel(clf, threshold=thresh, prefit=True)
select_X_train = selection.transform(X_train[predictors])
# train model
selection_model = xgb.XGBClassifier(objective=objective,learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators, subsample=subsample, colsample_bytree=colsample_bytree, gamma=gamma, min_child_weight=min_child_weight, reg_alpha=reg_alpha)
selection_model.fit(select_X_train, y_train, eval_metric='auc')
# eval model
select_X_test = selection.transform(X_valid[predictors])
valid_pred = selection_model.predict(select_X_test)
x_train_pred = selection_model.predict(select_X_train)
# predictions = [round(value) for value in y_pred]
accuracy_valid = accuracy_score(y_valid, valid_pred)
accuracy_x_train = accuracy_score(y_train, x_train_pred )
print("Thresh=%.10f, n=%d, Accuracy (Valid): %.5g, Accuracy (X_train): %.5g" % (thresh, select_X_train.shape[1], accuracy_valid, accuracy_x_train))
def predict_test_from_threshold_all_data(clf, predictors, threshold):
thresholds = np.sort(clf.feature_importances_)
objective = clf.get_xgb_params()['objective']
learning_rate = clf.get_xgb_params()['learning_rate']
max_depth = clf.get_xgb_params()['max_depth']
n_estimators = clf.get_xgb_params()['n_estimators']
subsample = clf.get_xgb_params()['subsample']
colsample_bytree = clf.get_xgb_params()['colsample_bytree']
gamma = clf.get_xgb_params()['gamma']
min_child_weight = clf.get_xgb_params()['min_child_weight']
reg_alpha = clf.get_xgb_params()['reg_alpha']
selection = SelectFromModel(clf, threshold=threshold, prefit=True)
select_X_train = selection.transform(train[predictors])
selection_model = xgb.XGBClassifier(objective=objective,learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators, subsample=subsample, colsample_bytree=colsample_bytree, gamma=gamma, min_child_weight=min_child_weight, reg_alpha=reg_alpha)
selection_model.fit(select_X_train, target, eval_metric='auc')
select_X_test = selection.transform(test[predictors])
y_pred = selection_model.predict(select_X_test)
return y_pred
def new_features_from_threshold(prev_features, threshold):
new_features_name = []
for i in range(len(prev_features)):
if clf2.feature_importances_[i] > threshold:
new_features_name.append(prev_features[i])
return new_features_name
clf1 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=1000, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf1, train, target, features_name)
clf2 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf2.fit(train[features_name], target,eval_metric='auc')
pred2 = clf2.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred2
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb17.csv', index=False)
find_best_features_threshold(clf2, features_name)
new_features_name = new_features_from_threshold(features_name, 0.0019994)
modelfit(clf2, train, target, new_features_name, False)
print(new_features_name)
clf3 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf3.fit(train[new_features_name], target, eval_metric='auc')
pred3 = clf3.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred3
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb18.csv', index=False)
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf4, X_train, y_train, features_name, False)
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=190, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf4.fit(X_train[new_features_name], y_train, eval_metric='auc')
pred4 = clf4.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred4
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb19.csv', index=False)
clf4 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=1000, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
modelfit(clf4, train, target, new_features_name)
clf5 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=4, n_estimators=257, subsample=0.8, colsample_bytree=0.6, gamma=0.1, min_child_weight= 1, reg_alpha=0.001)
clf5.fit(train[new_features_name], target, eval_metric='auc')
pred5 = clf5.predict(test[new_features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred5
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb20.csv', index=False)
modelfit(clf5, X_train, y_train,new_features_name, False)
clf6 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=5, n_estimators=2000, subsample=0.85, colsample_bytree=0.6, gamma=0.15, min_child_weight= 3, reg_alpha=0.001)
modelfit(clf6, train, target, features_name, early_stopping_rounds = 300)
clf7 = xgb.XGBClassifier(objective ='multi:softmax',learning_rate=0.1, max_depth=5, n_estimators=157, subsample=0.85, colsample_bytree=0.6, gamma=0.15, min_child_weight= 3, reg_alpha=0.001)
modelfit(clf7, X_train, y_train, features_name, False)
clf7.fit(train[features_name], target, eval_metric= 'auc')
pred7 = clf7.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred7
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb21.csv', index=False)
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import cross_val_score
bagging = BaggingClassifier(base_estimator=clf2, n_estimators=10, random_state=2017, n_jobs=-1)
kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=2017)
results = cross_val_score(bagging, train[features_name], target, cv=kfold, scoring='accuracy')
print(results.mean())
bagging.get_params()
bagging.fit(train[features_name], target)
valid_pred_bagging = bagging.predict(X_valid[features_name])
print("Accuracy (valid_pred_bagging): %.5g" % accuracy_score(y_valid, valid_pred_bagging))
xtrain_pred_bagging = bagging.predict(X_train[features_name])
print("Accuracy (xtrain_pred_bagging): %.5g" % accuracy_score(y_train, xtrain_pred_bagging))
train_pred_bagging = bagging.predict(train[features_name])
print("Accuracy (train_pred_bagging): %.5g" % accuracy_score(target, train_pred_bagging))
pred8 = bagging.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred8
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb22.csv', index=False)
from sklearn.linear_model import LogisticRegression
# solver = 'sag', multi_class ='multinomial', max_iter = 300, n_jobs = -1, C = 0.5, random_state = 2017
logistic_clf = LogisticRegression(random_state = 2017 )
lclf1.fit(train[features_name], target)
print("Accuracy (train_pred_bagging): %.5g" % accuracy_score(y_valid, lclf1.predict(X_valid[features_name])))
lclf2 = LogisticRegression(solver = 'sag', multi_class ='multinomial', max_iter = 5000, n_jobs = -1, C = 0.5, random_state = 2017)
lclf2.fit(train[features_name], target)
lclf2.get_params()
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, lclf2.predict(X_valid[features_name])))
from sklearn import svm
lin_svc_clf = svm.LinearSVC()
lin_svc_clf.fit(train[features_name], target)
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, lin_svc_clf.predict(X_valid[features_name])))
from sklearn.neighbors import KNeighborsClassifier
neigh_clf = KNeighborsClassifier(n_neighbors=5, n_jobs = -1)
neigh_clf.fit(train[features_name], target)
print("Accuracy (train_pred_reg): %.5g" % accuracy_score(y_valid, neigh_clf.predict(X_valid[features_name])))
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier(max_depth=8, min_samples_split=7, max_features='sqrt', random_state=2017)
from sklearn.ensemble import VotingClassifier
estimators = []
model1 = bagging
estimators.append(('baggingXGB', model1))
model2 = logistic_clf
estimators.append(('logistic', model2))
model3 = dt_clf
estimators.append(('cart', model3))
model4 = neigh_clf
estimators.append(('knn', model4))
# create the ensemble model
ensemble = VotingClassifier(estimators=estimators, voting='soft', weights=[0.8, 0.5, 0.6, 0.5], n_jobs=-1)
ensemble.fit(train[features_name], target)
ensemble.get_params()
import pickle
filehandler = open("ensemble.pickle","wb")
pickle.dump(ensemble,filehandler)
filehandler.close()
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble.predict(X_valid[features_name])))
pred_ensemble = ensemble.predict(test[features_name])
pred_ensemble
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb23.csv', index=False)
estimators1 = []
model1 = bagging
estimators1.append(('baggingXGB', model1))
model2 = logistic_clf
estimators1.append(('logistic', model2))
model3 = clf2
estimators1.append(('xgb', model3))
model4 = neigh_clf
estimators1.append(('knn', model4))
# create the ensemble model
ensemble1 = VotingClassifier(estimators=estimators1, voting='soft', weights=[0.5, 0.2, 1.0 , 0.2], n_jobs=-1)
ensemble1.fit(train[features_name], target)
ensemble1.get_params()
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble1.predict(X_valid[features_name])))
pred_ensemble1 = ensemble1.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble1
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb24.csv', index=False)
estimators2 = []
model1 = bagging
estimators2.append(('baggingXGB', model1))
model2 = logistic_clf
estimators2.append(('logistic', model2))
model3 = clf2
estimators2.append(('xgb', model3))
model4 = neigh_clf
estimators2.append(('knn', model4))
# create the ensemble model
ensemble2 = VotingClassifier(estimators=estimators2, voting='soft', weights=[1, 1, 1, 1], n_jobs=-1)
ensemble2.fit(train[features_name], target)
pred_ensemble2 = ensemble2.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble2
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb25.csv', index=False)
estimators3 = []
model1 = bagging
estimators3.append(('baggingXGB', model1))
model2 = logistic_clf
estimators3.append(('logistic', model2))
model3 = neigh_clf
estimators3.append(('knn', model3))
# create the ensemble model
ensemble3 = VotingClassifier(estimators=estimators2, n_jobs=-1)
ensemble3.fit(train[features_name], target)
print("Accuracy (ensemble): %.5g" % accuracy_score(y_valid, ensemble3.predict(X_valid[features_name])))
pred_ensemble3 = ensemble3.predict(test[features_name])
## make submission
sub = pd.read_csv('sample_submission.csv')
sub['target'] = pred_ensemble3
sub['target'] = sub['target'].astype(int)
sub.to_csv('sub_xgb26.csv', index=False)
| 0.516352 | 0.463019 |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import sys
import copy
import warnings
import _pickle as pickle
from astropy.table import Table, Column, vstack, join
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from asap import io
from asap import smf
from asap import config
from asap import fitting
from asap import ensemble
from asap import plotting
from asap import predictions
from asap.parameters import AsapParams
from asap.likelihood import ln_likelihood, ln_probability
from asap.predictions import (predict_mstar_basic, predict_smf, predict_mhalo,
predict_dsigma_profiles, make_model_predictions)
from dsigma import compute_ds as ds
from dsigma import jackknife as jk
from dsigma import covariance as cov
from dsigma.stack_ds import stack_delta_sigma, batch_delta_sigma
from dsigma.plots import show_delta_sigma_profiles, show_r_delta_sigma, show_single_profile
plt.rc('text', usetex=True)
def prepare_catalog_for_acf(catalog, logm='logm_max', object_id='object_id', ra='ra', dec='dec',
redshift='z_best', min_logm=None):
"""Prepare the HSC catalog for awesome cluster finder."""
# Make a copy of the file
cat_use = copy.deepcopy(catalog)
# Add a Mstar column. Notice that this is not the logM*, but just M*
cat_use.add_column(Column(data=(10.0 ** cat_use[logm]), name='Mstar'))
# Normalize some column names
if object_id is not 'id':
cat_use.rename_column(object_id, 'id')
if ra is not 'ra':
cat_use.renmae_column(ra, 'ra')
if dec is not 'dec':
cat_use.renmae_column(dec, 'dec')
if redshift is not 'z':
cat_use.rename_column(redshift, 'z')
# Add index array if necessary
if 'index' not in cat_use.colnames:
cat_use.add_column(Column(data=np.arange(len(cat_use)), name='index'))
# Make a mass cut if necessary
if min_logm:
cat_use = cat_use[cat_use[logm] >= min_logm]
print("# Keep {} galaxies with {} >= {}".format(len(cat_use), logm, min_logm))
# Only keep the useful columns
return cat_use['id', 'ra', 'dec', 'z', 'Mstar'].as_array()
def stack_with_mask(lens_ds, lens_data, mask_use, rand_ds=None, rand_data=None,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight'):
"""Stack the DeltaSigma profiles for the selected lenses."""
lens_ds_use, lens_data_use = lens_ds[mask_use], lens_data[mask_use]
# Get the stacked profile
dsig_all, dsig_boot, dsig_jk = cov.get_bootstrap_samples_dsigma(
lens_ds_use, rand_ds, lens_data_use, rand_data, n_boots=n_boots, n_jobs=n_jobs,
z_bins=z_bins, selection_bias=selection_bias, weight_field=weight_field,
use_boost=False)
# Get the covariance matrix
dsig_cov = np.cov(dsig_boot, rowvar=False)
# Use the diagnoal terms as uncertainties
dsig_err = np.sqrt(np.diag(dsig_cov))
return dsig_all, dsig_err
```
## Predict Weak Lensing DeltaSigma Profiles for HSC Galaxies in a Halo Mass Bin
### Get the S16A lensing sample and random galaxies
* `lens_data` is the `npy` catalog for all the massive galaxies
* `rand_data` is the random catalog with 500k objects.
* `lens_pre` and `rand_pre` are the pre-computed individual lensing pairs.
- `lens_ds`, `rand_ds` are the results from lensing pre-compute.
- `radial_bins` is the definitions of radial bins of DSigma profiles.
* `lens_mass` is the lens catalog with a lot of additonal information.
* **Notice**: `lens_mass` and `lens_ds` have corresponding items, so don't mess up the order.
```
wl_dir = '/Users/song/data/massive/dr16a/s16a_massive_dsig'
# Lens data
lens_data = np.load(os.path.join(wl_dir, 'lens/s16a_massive_lens_prep_new.npy'))
rand_data = np.load(os.path.join(wl_dir, 'random/s16a_random_500k_prep_new.npy'))
# Pre-compute results
# Medium selection
lens_pre = np.load(os.path.join(wl_dir, 'results/s16a_massive_lens_basic_dsig_pre.npz'))
rand_pre = np.load(os.path.join(wl_dir, 'results/s16a_massive_rand_basic_dsig_pre.npz'))
print(len(lens_data), len(lens_pre['delta_sigma']), len(rand_pre['delta_sigma']))
ds.assert_precompute_catalog_consistent(lens_pre, lens_data)
ds.assert_lens_rand_consistent(lens_pre, rand_pre)
# Number of Jackknife fiedls
njackknife_fields = 31
# Get the pre-compute deltaSigma data, radial bins
lens_ds, radial_bins = lens_pre['delta_sigma'], lens_pre['radial_bins']
rand_ds = rand_pre["delta_sigma"]
lens_ds, rand_ds = jk.add_jackknife_both(lens_ds, rand_ds, njackknife_fields)
# The cross-match is done outside
# The astropy.table.join seems to change the order of the table.
lens_mass = Table.read(os.path.join(wl_dir, 's16a_massive_lens_prep_cog_mask.fits'))
assert np.all(lens_mass['object_id'] == lens_data['object_id'])
# Add an index array, so that we won't mess up with the order
lens_mass.add_column(Column(name='index', data=np.arange(len(lens_mass))))
```
#### A basic quality cut for the massive galaxies
* These empirical cuts make sure the galaxy has a useful 1-D profile
```
quality_mask = ((lens_mass['logm_5'] >= 10.00) & (lens_mass['logm_5'] <= 11.52) &
(lens_mass['logm_10'] >= 10.47) & (lens_mass['logm_10'] <= 11.72) &
lens_mass['clean_photometry'] & lens_mass['cog_mask'])
print(quality_mask.sum(), len(lens_data))
```
#### Try to use a very simple method to exclude satellites
* This is based on the `awesome` cluster finder from Christopher Bradshaw
##### "Largest Central" method
* Under `acf.cluster_finder.largest_centrals()`
* Format is:
```python
input_catalog, memberships = largest_centrals(input_catalog, cylinder_radius, cylinder_half_length)
```
* Both `cylinder_radius` and `cylinder_half_length` are in unit of Mpc and depen on the cosmology model.
* The `membership` catalog has three columns: `id`, `cluster_num`, `is_central`.
* For observations, we need to have these columns: `id`, `ra`, `dec`, `z`, `Mstar`.
```
import awesome_cluster_finder as acf
# `acf` now will automatically read in a config file for simulation, we can ignore that.
# Just make sure we are in the observation mode
acf.config.use = 'observation'
lens_acf = prepare_catalog_for_acf(lens_mass, logm='logm_max', object_id='index', redshift='z')
lens_acf_sorted, lens_acf_mem = acf.cluster_finder.largest_centrals(lens_acf, 1.0, 20.)
print("# There are {} central galaxies".format(lens_acf_mem['is_central'].sum()))
lens_acf_result = join(Table(lens_acf_sorted), Table(lens_acf_mem), keys='id', join_type='inner')
central_mask = (lens_acf_result['is_central'] > 0)
plt.scatter(lens_mass['logm_max'][mask_3], lens_mass['logm_10'][mask_3])
plt.scatter(lens_mass['logm_max'][mask_3_cen], lens_mass['logm_10'][mask_3_cen], alpha=0.8)
```
### Assign halo mass to HSC galaxies
#### Using Mhalo-Mmax-M10kpc plane
* `13.6826 + 3.541 * (logm_max - 11.71) - 2.542 * (logm_10 - 11.33)`
```
logmh_plane = 13.6826 + 3.541 * (lens_mass['logm_max'] - 11.71) - 2.542 * (lens_mass['logm_10'] - 11.33)
#lens_mass.remove_column('logmh_plane')
lens_mass.add_column(Column(data=logmh_plane, name='logmh_plane'))
```
#### Get the DSigma profiles for all galaxies in the bin
```
mask_1 = ((lens_mass['logmh_plane'] >= 13.00) & (lens_mass['logmh_plane'] < 13.42) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
mask_2 = ((lens_mass['logmh_plane'] >= 13.42) & (lens_mass['logmh_plane'] < 13.83) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
mask_3 = ((lens_mass['logmh_plane'] >= 13.83) & (lens_mass['logmh_plane'] < 14.25) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
print(mask_1.sum(), mask_2.sum(), mask_3.sum())
dsig_1, err_1 = stack_with_mask(
lens_ds, lens_data, mask_1, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_2, err_2 = stack_with_mask(
lens_ds, lens_data, mask_2, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_3, err_3 = stack_with_mask(
lens_ds, lens_data, mask_3, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
np.savez('asap_hsc_mvir_predict_1906_bin1.npz', rmpc=radial_bins,
dsigma=dsig_1, err=err_1)
np.savez('asap_hsc_mvir_predict_1906_bin2.npz', rmpc=radial_bins,
dsigma=dsig_2, err=err_2)
np.savez('asap_hsc_mvir_predict_1906_bin3.npz', rmpc=radial_bins,
dsigma=dsig_3, err=err_3)
```
#### Get the DSigma for "centrals" only
* TODO: there is something weird
```
mask_1_cen = ((lens_mass['logmh_plane'] >= 13.00) & (lens_mass['logmh_plane'] < 13.42) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
mask_2_cen = ((lens_mass['logmh_plane'] >= 13.42) & (lens_mass['logmh_plane'] < 13.83) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
mask_3_cen = ((lens_mass['logmh_plane'] >= 13.83) & (lens_mass['logmh_plane'] < 14.25) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
print(mask_1_cen.sum(), mask_2_cen.sum(), mask_3_cen.sum())
dsig_1_cen, err_1_cen = stack_with_mask(
lens_ds, lens_data, mask_1_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_2_cen, err_2_cen = stack_with_mask(
lens_ds, lens_data, mask_2_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_3_cen, err_3_cen = stack_with_mask(
lens_ds, lens_data, mask_3_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
fig = plt.figure(figsize=(9, 7))
fig.subplots_adjust(left=0.16, right=0.99, bottom=0.13, top=0.95,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', alpha=0.4, linewidth=2)
ax1 = show_single_profile(radial_bins, dsig_1, dsigma_err=err_1,
ax=ax1, s=70, marker='o', c=plt.cm.Accent(0.5),
label=r'$[13.00, 13.42]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_1_cen, dsigma_err=err_1_cen,
ax=ax1, s=80, marker='o', c=plt.cm.Accent(0.4),
label=r'$[13.00, 13.42]\ \rm Cen$')
ax1 = show_single_profile(radial_bins, dsig_2, dsigma_err=err_2,
ax=ax1, s=70, marker='h', c=plt.cm.Accent(0.1),
label=r'$[13.42, 13.83]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_2_cen, dsigma_err=err_2_cen,
ax=ax1, s=80, marker='h', c=plt.cm.Accent(0.2),
label=r'$[13.42, 13.83]\ \rm Cen$')
ax1 = show_single_profile(radial_bins, dsig_3, dsigma_err=err_3,
ax=ax1, s=70, marker='s', c=plt.cm.Accent(0.8),
label=r'$[13.83, 14.25]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_3_cen, dsigma_err=err_3_cen,
ax=ax1, s=80, marker='s', c=plt.cm.Accent(0.9),
label=r'$[13.83, 14.25]\ \rm Cen$')
ax1.legend(loc='upper right', fontsize=17)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax1.set_ylim(0.4, 289)
_ = ax1.set_xlabel(r'$R\ [\mathrm{Mpc}]$', fontsize=30)
_ = ax1.set_ylabel(r'$\Delta\Sigma\ (M_{\odot}/\mathrm{pc}^2)$', fontsize=30)
```
|
github_jupyter
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import sys
import copy
import warnings
import _pickle as pickle
from astropy.table import Table, Column, vstack, join
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from asap import io
from asap import smf
from asap import config
from asap import fitting
from asap import ensemble
from asap import plotting
from asap import predictions
from asap.parameters import AsapParams
from asap.likelihood import ln_likelihood, ln_probability
from asap.predictions import (predict_mstar_basic, predict_smf, predict_mhalo,
predict_dsigma_profiles, make_model_predictions)
from dsigma import compute_ds as ds
from dsigma import jackknife as jk
from dsigma import covariance as cov
from dsigma.stack_ds import stack_delta_sigma, batch_delta_sigma
from dsigma.plots import show_delta_sigma_profiles, show_r_delta_sigma, show_single_profile
plt.rc('text', usetex=True)
def prepare_catalog_for_acf(catalog, logm='logm_max', object_id='object_id', ra='ra', dec='dec',
redshift='z_best', min_logm=None):
"""Prepare the HSC catalog for awesome cluster finder."""
# Make a copy of the file
cat_use = copy.deepcopy(catalog)
# Add a Mstar column. Notice that this is not the logM*, but just M*
cat_use.add_column(Column(data=(10.0 ** cat_use[logm]), name='Mstar'))
# Normalize some column names
if object_id is not 'id':
cat_use.rename_column(object_id, 'id')
if ra is not 'ra':
cat_use.renmae_column(ra, 'ra')
if dec is not 'dec':
cat_use.renmae_column(dec, 'dec')
if redshift is not 'z':
cat_use.rename_column(redshift, 'z')
# Add index array if necessary
if 'index' not in cat_use.colnames:
cat_use.add_column(Column(data=np.arange(len(cat_use)), name='index'))
# Make a mass cut if necessary
if min_logm:
cat_use = cat_use[cat_use[logm] >= min_logm]
print("# Keep {} galaxies with {} >= {}".format(len(cat_use), logm, min_logm))
# Only keep the useful columns
return cat_use['id', 'ra', 'dec', 'z', 'Mstar'].as_array()
def stack_with_mask(lens_ds, lens_data, mask_use, rand_ds=None, rand_data=None,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight'):
"""Stack the DeltaSigma profiles for the selected lenses."""
lens_ds_use, lens_data_use = lens_ds[mask_use], lens_data[mask_use]
# Get the stacked profile
dsig_all, dsig_boot, dsig_jk = cov.get_bootstrap_samples_dsigma(
lens_ds_use, rand_ds, lens_data_use, rand_data, n_boots=n_boots, n_jobs=n_jobs,
z_bins=z_bins, selection_bias=selection_bias, weight_field=weight_field,
use_boost=False)
# Get the covariance matrix
dsig_cov = np.cov(dsig_boot, rowvar=False)
# Use the diagnoal terms as uncertainties
dsig_err = np.sqrt(np.diag(dsig_cov))
return dsig_all, dsig_err
wl_dir = '/Users/song/data/massive/dr16a/s16a_massive_dsig'
# Lens data
lens_data = np.load(os.path.join(wl_dir, 'lens/s16a_massive_lens_prep_new.npy'))
rand_data = np.load(os.path.join(wl_dir, 'random/s16a_random_500k_prep_new.npy'))
# Pre-compute results
# Medium selection
lens_pre = np.load(os.path.join(wl_dir, 'results/s16a_massive_lens_basic_dsig_pre.npz'))
rand_pre = np.load(os.path.join(wl_dir, 'results/s16a_massive_rand_basic_dsig_pre.npz'))
print(len(lens_data), len(lens_pre['delta_sigma']), len(rand_pre['delta_sigma']))
ds.assert_precompute_catalog_consistent(lens_pre, lens_data)
ds.assert_lens_rand_consistent(lens_pre, rand_pre)
# Number of Jackknife fiedls
njackknife_fields = 31
# Get the pre-compute deltaSigma data, radial bins
lens_ds, radial_bins = lens_pre['delta_sigma'], lens_pre['radial_bins']
rand_ds = rand_pre["delta_sigma"]
lens_ds, rand_ds = jk.add_jackknife_both(lens_ds, rand_ds, njackknife_fields)
# The cross-match is done outside
# The astropy.table.join seems to change the order of the table.
lens_mass = Table.read(os.path.join(wl_dir, 's16a_massive_lens_prep_cog_mask.fits'))
assert np.all(lens_mass['object_id'] == lens_data['object_id'])
# Add an index array, so that we won't mess up with the order
lens_mass.add_column(Column(name='index', data=np.arange(len(lens_mass))))
quality_mask = ((lens_mass['logm_5'] >= 10.00) & (lens_mass['logm_5'] <= 11.52) &
(lens_mass['logm_10'] >= 10.47) & (lens_mass['logm_10'] <= 11.72) &
lens_mass['clean_photometry'] & lens_mass['cog_mask'])
print(quality_mask.sum(), len(lens_data))
input_catalog, memberships = largest_centrals(input_catalog, cylinder_radius, cylinder_half_length)
import awesome_cluster_finder as acf
# `acf` now will automatically read in a config file for simulation, we can ignore that.
# Just make sure we are in the observation mode
acf.config.use = 'observation'
lens_acf = prepare_catalog_for_acf(lens_mass, logm='logm_max', object_id='index', redshift='z')
lens_acf_sorted, lens_acf_mem = acf.cluster_finder.largest_centrals(lens_acf, 1.0, 20.)
print("# There are {} central galaxies".format(lens_acf_mem['is_central'].sum()))
lens_acf_result = join(Table(lens_acf_sorted), Table(lens_acf_mem), keys='id', join_type='inner')
central_mask = (lens_acf_result['is_central'] > 0)
plt.scatter(lens_mass['logm_max'][mask_3], lens_mass['logm_10'][mask_3])
plt.scatter(lens_mass['logm_max'][mask_3_cen], lens_mass['logm_10'][mask_3_cen], alpha=0.8)
logmh_plane = 13.6826 + 3.541 * (lens_mass['logm_max'] - 11.71) - 2.542 * (lens_mass['logm_10'] - 11.33)
#lens_mass.remove_column('logmh_plane')
lens_mass.add_column(Column(data=logmh_plane, name='logmh_plane'))
mask_1 = ((lens_mass['logmh_plane'] >= 13.00) & (lens_mass['logmh_plane'] < 13.42) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
mask_2 = ((lens_mass['logmh_plane'] >= 13.42) & (lens_mass['logmh_plane'] < 13.83) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
mask_3 = ((lens_mass['logmh_plane'] >= 13.83) & (lens_mass['logmh_plane'] < 14.25) &
(lens_mass['logm_max'] >= 11.2) & quality_mask)
print(mask_1.sum(), mask_2.sum(), mask_3.sum())
dsig_1, err_1 = stack_with_mask(
lens_ds, lens_data, mask_1, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_2, err_2 = stack_with_mask(
lens_ds, lens_data, mask_2, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_3, err_3 = stack_with_mask(
lens_ds, lens_data, mask_3, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
np.savez('asap_hsc_mvir_predict_1906_bin1.npz', rmpc=radial_bins,
dsigma=dsig_1, err=err_1)
np.savez('asap_hsc_mvir_predict_1906_bin2.npz', rmpc=radial_bins,
dsigma=dsig_2, err=err_2)
np.savez('asap_hsc_mvir_predict_1906_bin3.npz', rmpc=radial_bins,
dsigma=dsig_3, err=err_3)
mask_1_cen = ((lens_mass['logmh_plane'] >= 13.00) & (lens_mass['logmh_plane'] < 13.42) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
mask_2_cen = ((lens_mass['logmh_plane'] >= 13.42) & (lens_mass['logmh_plane'] < 13.83) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
mask_3_cen = ((lens_mass['logmh_plane'] >= 13.83) & (lens_mass['logmh_plane'] < 14.25) &
(lens_mass['logm_max'] >= 11.2) & quality_mask & ~central_mask)
print(mask_1_cen.sum(), mask_2_cen.sum(), mask_3_cen.sum())
dsig_1_cen, err_1_cen = stack_with_mask(
lens_ds, lens_data, mask_1_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_2_cen, err_2_cen = stack_with_mask(
lens_ds, lens_data, mask_2_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
dsig_3_cen, err_3_cen = stack_with_mask(
lens_ds, lens_data, mask_3_cen, rand_ds=rand_ds, rand_data=rand_data,
n_boots=10000, n_jobs=4, z_bins=8, selection_bias=True,
weight_field='weight')
fig = plt.figure(figsize=(9, 7))
fig.subplots_adjust(left=0.16, right=0.99, bottom=0.13, top=0.95,
wspace=0.00, hspace=0.00)
ax1 = fig.add_subplot(111)
ax1.grid(linestyle='--', alpha=0.4, linewidth=2)
ax1 = show_single_profile(radial_bins, dsig_1, dsigma_err=err_1,
ax=ax1, s=70, marker='o', c=plt.cm.Accent(0.5),
label=r'$[13.00, 13.42]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_1_cen, dsigma_err=err_1_cen,
ax=ax1, s=80, marker='o', c=plt.cm.Accent(0.4),
label=r'$[13.00, 13.42]\ \rm Cen$')
ax1 = show_single_profile(radial_bins, dsig_2, dsigma_err=err_2,
ax=ax1, s=70, marker='h', c=plt.cm.Accent(0.1),
label=r'$[13.42, 13.83]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_2_cen, dsigma_err=err_2_cen,
ax=ax1, s=80, marker='h', c=plt.cm.Accent(0.2),
label=r'$[13.42, 13.83]\ \rm Cen$')
ax1 = show_single_profile(radial_bins, dsig_3, dsigma_err=err_3,
ax=ax1, s=70, marker='s', c=plt.cm.Accent(0.8),
label=r'$[13.83, 14.25]\ \rm All$')
ax1 = show_single_profile(radial_bins, dsig_3_cen, dsigma_err=err_3_cen,
ax=ax1, s=80, marker='s', c=plt.cm.Accent(0.9),
label=r'$[13.83, 14.25]\ \rm Cen$')
ax1.legend(loc='upper right', fontsize=17)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax1.set_ylim(0.4, 289)
_ = ax1.set_xlabel(r'$R\ [\mathrm{Mpc}]$', fontsize=30)
_ = ax1.set_ylabel(r'$\Delta\Sigma\ (M_{\odot}/\mathrm{pc}^2)$', fontsize=30)
| 0.553023 | 0.576333 |
```
import numpy as np
# --- centralms ---
from centralms import util as UT
from centralms import abcee as ABC
from centralms import catalog as Cat
from centralms import evolver as Evo
from centralms import observables as Obvs
import matplotlib as mpl
import matplotlib.pyplot as pl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
%matplotlib inline
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
for z in [0.05, 0.5, 1.]:
m, phi = Obvs.analyticSMF(z, source='li-march')
sub.plot(m, phi, label='z='+str(z))
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
censub = Cat.CentralSubhalos()
shcat = censub.Read()
fig = plt.figure()
sub = fig.add_subplot(111)
m, phi = Obvs.getMF(shcat['m.sham'])
sub.plot(m, phi, c='k', label='Snapshot 1')
for i in [2, 5, 10]:
m, phi = Obvs.getMF(shcat['m.sham.snap'+str(i)])
sub.plot(m, phi, label="Snapshot "+str(i))
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
m, phi = Obvs.analyticSMF(0.05, source='li-march')
sub.plot(m, phi, label='z=0.05 analytics')
marr, phi, phierr = Obvs.dataSMF(source='li-white')
sub.plot(marr, phi, label='Li and White')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
m, phi = Obvs.analyticSMF(UT.z_nsnap(15), source='li-march')
sub.plot(m, phi, label='z='+str(UT.z_nsnap(15))+' analytics')
start = (shcat['nsnap_start'] == 15)
m, phi = Obvs.getMF(shcat['m.star0'][start])
sub.plot(m, phi, label='Snapshot 15')
m, phi = Obvs.getMF(shcat['m.sham.snap15'])
sub.plot(m, phi, ls='--', label='SHAM Snapshot 15')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure()
sub = fig.add_subplot(111)
m, phi = Obvs.getMF(shcat['m.sham'])
sub.plot(m, phi, c='k', ls='--', label='SHAM Central SMF')
marr, phi, phierr = Obvs.dataSMF(source='li-white')
phi *= (1. - np.array([Obvs.f_sat(mm, 0.05) for mm in marr])) # sallite fraction
sub.plot(marr, phi, label=r'$f_\mathrm{cen} \times$ Li-White')
m, phi = Obvs.analyticSMF(0.05, source='li-march')
phi *= (1. - np.array([Obvs.f_sat(mm, 0.05) for mm in m])) # sallite fraction
sub.plot(m, phi, label=r'$f_\mathrm{cen} \times$ analytic')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
```
|
github_jupyter
|
import numpy as np
# --- centralms ---
from centralms import util as UT
from centralms import abcee as ABC
from centralms import catalog as Cat
from centralms import evolver as Evo
from centralms import observables as Obvs
import matplotlib as mpl
import matplotlib.pyplot as pl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
%matplotlib inline
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
for z in [0.05, 0.5, 1.]:
m, phi = Obvs.analyticSMF(z, source='li-march')
sub.plot(m, phi, label='z='+str(z))
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
censub = Cat.CentralSubhalos()
shcat = censub.Read()
fig = plt.figure()
sub = fig.add_subplot(111)
m, phi = Obvs.getMF(shcat['m.sham'])
sub.plot(m, phi, c='k', label='Snapshot 1')
for i in [2, 5, 10]:
m, phi = Obvs.getMF(shcat['m.sham.snap'+str(i)])
sub.plot(m, phi, label="Snapshot "+str(i))
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
m, phi = Obvs.analyticSMF(0.05, source='li-march')
sub.plot(m, phi, label='z=0.05 analytics')
marr, phi, phierr = Obvs.dataSMF(source='li-white')
sub.plot(marr, phi, label='Li and White')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
m, phi = Obvs.analyticSMF(UT.z_nsnap(15), source='li-march')
sub.plot(m, phi, label='z='+str(UT.z_nsnap(15))+' analytics')
start = (shcat['nsnap_start'] == 15)
m, phi = Obvs.getMF(shcat['m.star0'][start])
sub.plot(m, phi, label='Snapshot 15')
m, phi = Obvs.getMF(shcat['m.sham.snap15'])
sub.plot(m, phi, ls='--', label='SHAM Snapshot 15')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
fig = plt.figure()
sub = fig.add_subplot(111)
m, phi = Obvs.getMF(shcat['m.sham'])
sub.plot(m, phi, c='k', ls='--', label='SHAM Central SMF')
marr, phi, phierr = Obvs.dataSMF(source='li-white')
phi *= (1. - np.array([Obvs.f_sat(mm, 0.05) for mm in marr])) # sallite fraction
sub.plot(marr, phi, label=r'$f_\mathrm{cen} \times$ Li-White')
m, phi = Obvs.analyticSMF(0.05, source='li-march')
phi *= (1. - np.array([Obvs.f_sat(mm, 0.05) for mm in m])) # sallite fraction
sub.plot(m, phi, label=r'$f_\mathrm{cen} \times$ analytic')
sub.legend(loc='lower left', fontsize=20)
sub.set_yscale('log')
sub.set_xlim(9., 12.)
sub.set_ylim([1e-6, 1e-1])
| 0.419767 | 0.508117 |
```
%load_ext nb_black
%load_ext autoreload
%autoreload 2
import os
print(os.getcwd())
def update_working_directory():
from pathlib import Path
p = Path(os.getcwd()).parents[0]
os.chdir(p)
print(p)
update_working_directory()
```
# Import
```
import dill
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
from src.models.gradient_boosting import ModelGradientBoosting
import src.models.performance_metrics as performance_metrics
path_dataset_train = "data/raw/20210119/dataset_train.pkl"
path_dataset_valid = "data/raw/20210119/dataset_valid.pkl"
```
# Dataset
```
with open(path_dataset_train, "rb") as input_file:
dataset_train = dill.load(input_file)
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
```
# Overall
```
model = ModelGradientBoosting()
model.version
dataset_train = model.preprocessing_training(dataset_train)
dataset_valid = model.preprocessing_inference(dataset_valid)
model.train(dataset_train, dataset_valid)
with open(f"models/{model.version}__model.pkl", "wb") as file:
dill.dump(model, file)
```
# Data Transformation
```
vardict.keys()
```
## Target
## Numerical
## Diff time
## Boolean
## Categorical
## Overall
### vardict
```
vardict["all"] = (
vardict["numerical"]
+ vardict["diff_time"]
+ vardict["dummy_boolean"]
+ vardict["dummy_categorical"]
)
```
# 1st model
# Validation results
## Overall
```
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
model.predict_and_show_results(dataset_valid, save_folder="data/pipeline/20210121")
```
## Details
```
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
self = model
```
### Predictions
```
y_valid = dataset_valid[self.vardict["target"]].copy()
dataset_valid = self.preprocessing_inference(dataset_valid)
predictions = self.predict(dataset=dataset_valid, target_present=False)
predictions["y_true"] = y_valid.values.tolist()
predictions
```
### Results
```
binary_classification_results = performance_metrics.get_binary_classification_results(
predictions, model_name=f"{model.version}_valid"
)
binary_classification_results
regression_results = performance_metrics.get_regression_results(
predictions, model_name=f"{model.version}_valid"
)
regression_results
performance_metrics.plot_roc_auc_curve(predictions, model_name=f"{model.version}_valid")
performance_metrics.plot_precision_recall_curve(
predictions, binary_classification_results, model_name=f"{model.version}_valid"
)
performance_metrics.plot_predictions(predictions, model_name=f"{model.version}_valid")
def predict_and_show_results(model, dataset_valid, save_folder="data/processed"):
y_valid = dataset_valid[model.vardict["target"]].copy()
dataset_valid = model.preprocessing_inference(dataset_valid)
predictions = model.predict(dataset=dataset_valid, target_present=False)
predictions["y_true"] = y_valid.values.tolist()
show_results(
predictions,
model_name=model.version,
show_plot=model.global_config["show_plot"],
save_plot=model.global_config["save_plot"],
save_folder=save_folder,
)
def show_results(
predictions,
model_name,
show_plot=True,
save_plot=True,
save_folder="data/processed",
):
binary_classification_results = (
performance_metrics.get_binary_classification_results(
predictions, model_name, save_folder
)
)
regression_results = performance_metrics.get_regression_results(
predictions, model_name, save_folder
)
performance_metrics.plot_roc_auc_curve(
predictions, model_name, show_plot, save_plot, save_folder
)
performance_metrics.plot_precision_recall_curve(
predictions,
binary_classification_results,
model_name,
show_plot,
save_plot,
save_folder,
)
performance_metrics.plot_predictions(
predictions, model_name, show_plot, save_plot, save_folder
)
```
# Hyperparameters search
```
path_dataset_train = "data/raw/20210119/dataset_train.pkl"
path_dataset_valid = "data/raw/20210119/dataset_valid.pkl"
```
##### Import
```
import dill
import json
import numpy as np
import optuna
import pandas as pd
pd.set_option("display.max_columns", None)
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
from src.models.gradient_boosting import ModelGradientBoosting
import src.models.performance_metrics as performance_metrics
import src.visualization.visualize_hyperparameter as visualize_hyperparameter
```
##### Dataset
```
with open(path_dataset_train, "rb") as input_file:
dataset_train = dill.load(input_file)
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
dataset_hyperoptim = dataset_train.append(dataset_valid)
```
##### create different training/valid folds
```
nb_sessions = max(dataset_hyperoptim["id_session"]) + 1
nb_folds = 10
nb_sessions_valid = 1
list_train_dataset, list_valid_dataset = create_folds_for_hyperparameters_tuning(
nb_sessions, nb_folds, nb_sessions_valid
)
```
##### functions for hyperparametrization
##### launch hyperparameter tuning
```
study = optuna.create_study(direction="maximize", study_name="gb_20210123")
study.optimize(func=hyperparameter_objective, n_trials=20, callbacks=callback_object)
study.best_trial.params
study.trials_dataframe
```
##### study hyperparameter tuning
```
hyperparameters_df = pd.read_csv(
"data/interim/hyperparameter_tuning/gb_20210123/all_trials.csv"
)
hyperparameters_df["n_trial_all"] = list(range(1, len(hyperparameters_df) + 1))
hyperparameters_df
best_hyperparameters = hyperparameters_df.loc[
hyperparameters_df["value"].idxmax(axis=1)
]
best_hyperparameters
for hyperparameter_to_plot in [
"params_max_depth",
"params_num_leaves",
"params_bagging_fraction",
"params_feature_fraction",
"params_learning_rate",
]:
visualize_hyperparameter.plot_result_hyperparameter(
hyperparameters_df=hyperparameters_df,
hyperparameter_to_plot=hyperparameter_to_plot,
variable_objective="value",
use_log_scale=(hyperparameter_to_plot in ["params_learning_rate"]),
minimize_objective=False,
folder_save=f"data/interim/hyperparameter_tuning/gb_20210123",
)
```
##### time taken for each trial
```
visualize_hyperparameter.plot_time_hyperparameter(
hyperparameters_df=hyperparameters_df,
folder_save=f"data/interim/hyperparameter_tuning/gb_20210123",
)
```
# Probas & Predictions
|
github_jupyter
|
%load_ext nb_black
%load_ext autoreload
%autoreload 2
import os
print(os.getcwd())
def update_working_directory():
from pathlib import Path
p = Path(os.getcwd()).parents[0]
os.chdir(p)
print(p)
update_working_directory()
import dill
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
from src.models.gradient_boosting import ModelGradientBoosting
import src.models.performance_metrics as performance_metrics
path_dataset_train = "data/raw/20210119/dataset_train.pkl"
path_dataset_valid = "data/raw/20210119/dataset_valid.pkl"
with open(path_dataset_train, "rb") as input_file:
dataset_train = dill.load(input_file)
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
model = ModelGradientBoosting()
model.version
dataset_train = model.preprocessing_training(dataset_train)
dataset_valid = model.preprocessing_inference(dataset_valid)
model.train(dataset_train, dataset_valid)
with open(f"models/{model.version}__model.pkl", "wb") as file:
dill.dump(model, file)
vardict.keys()
vardict["all"] = (
vardict["numerical"]
+ vardict["diff_time"]
+ vardict["dummy_boolean"]
+ vardict["dummy_categorical"]
)
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
model.predict_and_show_results(dataset_valid, save_folder="data/pipeline/20210121")
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
self = model
y_valid = dataset_valid[self.vardict["target"]].copy()
dataset_valid = self.preprocessing_inference(dataset_valid)
predictions = self.predict(dataset=dataset_valid, target_present=False)
predictions["y_true"] = y_valid.values.tolist()
predictions
binary_classification_results = performance_metrics.get_binary_classification_results(
predictions, model_name=f"{model.version}_valid"
)
binary_classification_results
regression_results = performance_metrics.get_regression_results(
predictions, model_name=f"{model.version}_valid"
)
regression_results
performance_metrics.plot_roc_auc_curve(predictions, model_name=f"{model.version}_valid")
performance_metrics.plot_precision_recall_curve(
predictions, binary_classification_results, model_name=f"{model.version}_valid"
)
performance_metrics.plot_predictions(predictions, model_name=f"{model.version}_valid")
def predict_and_show_results(model, dataset_valid, save_folder="data/processed"):
y_valid = dataset_valid[model.vardict["target"]].copy()
dataset_valid = model.preprocessing_inference(dataset_valid)
predictions = model.predict(dataset=dataset_valid, target_present=False)
predictions["y_true"] = y_valid.values.tolist()
show_results(
predictions,
model_name=model.version,
show_plot=model.global_config["show_plot"],
save_plot=model.global_config["save_plot"],
save_folder=save_folder,
)
def show_results(
predictions,
model_name,
show_plot=True,
save_plot=True,
save_folder="data/processed",
):
binary_classification_results = (
performance_metrics.get_binary_classification_results(
predictions, model_name, save_folder
)
)
regression_results = performance_metrics.get_regression_results(
predictions, model_name, save_folder
)
performance_metrics.plot_roc_auc_curve(
predictions, model_name, show_plot, save_plot, save_folder
)
performance_metrics.plot_precision_recall_curve(
predictions,
binary_classification_results,
model_name,
show_plot,
save_plot,
save_folder,
)
performance_metrics.plot_predictions(
predictions, model_name, show_plot, save_plot, save_folder
)
path_dataset_train = "data/raw/20210119/dataset_train.pkl"
path_dataset_valid = "data/raw/20210119/dataset_valid.pkl"
import dill
import json
import numpy as np
import optuna
import pandas as pd
pd.set_option("display.max_columns", None)
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
from src.models.gradient_boosting import ModelGradientBoosting
import src.models.performance_metrics as performance_metrics
import src.visualization.visualize_hyperparameter as visualize_hyperparameter
with open(path_dataset_train, "rb") as input_file:
dataset_train = dill.load(input_file)
with open(path_dataset_valid, "rb") as input_file:
dataset_valid = dill.load(input_file)
dataset_hyperoptim = dataset_train.append(dataset_valid)
nb_sessions = max(dataset_hyperoptim["id_session"]) + 1
nb_folds = 10
nb_sessions_valid = 1
list_train_dataset, list_valid_dataset = create_folds_for_hyperparameters_tuning(
nb_sessions, nb_folds, nb_sessions_valid
)
study = optuna.create_study(direction="maximize", study_name="gb_20210123")
study.optimize(func=hyperparameter_objective, n_trials=20, callbacks=callback_object)
study.best_trial.params
study.trials_dataframe
hyperparameters_df = pd.read_csv(
"data/interim/hyperparameter_tuning/gb_20210123/all_trials.csv"
)
hyperparameters_df["n_trial_all"] = list(range(1, len(hyperparameters_df) + 1))
hyperparameters_df
best_hyperparameters = hyperparameters_df.loc[
hyperparameters_df["value"].idxmax(axis=1)
]
best_hyperparameters
for hyperparameter_to_plot in [
"params_max_depth",
"params_num_leaves",
"params_bagging_fraction",
"params_feature_fraction",
"params_learning_rate",
]:
visualize_hyperparameter.plot_result_hyperparameter(
hyperparameters_df=hyperparameters_df,
hyperparameter_to_plot=hyperparameter_to_plot,
variable_objective="value",
use_log_scale=(hyperparameter_to_plot in ["params_learning_rate"]),
minimize_objective=False,
folder_save=f"data/interim/hyperparameter_tuning/gb_20210123",
)
visualize_hyperparameter.plot_time_hyperparameter(
hyperparameters_df=hyperparameters_df,
folder_save=f"data/interim/hyperparameter_tuning/gb_20210123",
)
| 0.459561 | 0.717516 |
## Load Data
```
from data import Images_load
train, validation, test = Images_load.load_data()
train.features.shape
```
# ResNet 50 transfer learning:
```
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input
def _prepare_data(train, validation, test):
"""
Prepare datasets of images for CNN
:param train:
:param validation:
:param test:
:return:
"""
features, y_train = train.features, train.labels
featuresV, y_val = validation.features, validation.labels
featuresT, y_test = test.features, test.labels
x_train = np.stack(features)
x_val = np.stack(featuresV)
x_test = np.stack(featuresT)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_val = x_val.astype('float32')
x_val /= 255
x_train /= 255
x_test /= 255
return x_train, y_train, x_test, y_test, x_val, y_val
import numpy as np
x_train, y_train, x_test, y_test, x_val, y_val=_prepare_data(train,validation,test)
# Load ResNet50 Trained on imagenet
resnet_model = ResNet50(weights="imagenet")
# We should preprocess the images the same way resnet images were preprocessed
x_train_preprocessed = preprocess_input(x_train)
x_test_preprocessed = preprocess_input(x_test)
x_val_preprocess = preprocess_input(x_val)
# Build a new model that is ResNet50 minus the very last layer
last_layer = resnet_model.get_layer("avg_pool")
resnet_layers = keras.Model(inputs=resnet_model.inputs, outputs=last_layer.output)
# We can directly stich the models together
ResNet_adapt=Sequential()
ResNet_adapt.add(resnet_layers)
ResNet_adapt.add(Dense(2, activation="sigmoid"))
ResNet_adapt.layers[0].trainable=False # we are just going to tune the last layer weight the other weights inside of the resnet model will remain the same
ResNet_adapt.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
ResNet_adapt.summary()
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import TensorBoard
my_callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto', restore_best_weights=True),
ModelCheckpoint(filepath='Resnet_transfer.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max'),
TensorBoard(log_dir="logs", histogram_freq=0, write_graph=True, write_images=True)
]
%%time
history=ResNet_adapt.fit(x_train_preprocessed, y_train, epochs=50, validation_data=(x_val_preprocess, y_val), callbacks=my_callbacks)
from matplotlib import pyplot
# plot train and validation loss
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show() # overfittinnng gued rassi
from sklearn.metrics import classification_report, confusion_matrix
score = ResNet_adapt.evaluate(x_train_preprocessed, y_train, verbose=2)
print('Train accuracy:', score[ 1 ])
```
|
github_jupyter
|
from data import Images_load
train, validation, test = Images_load.load_data()
train.features.shape
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input
def _prepare_data(train, validation, test):
"""
Prepare datasets of images for CNN
:param train:
:param validation:
:param test:
:return:
"""
features, y_train = train.features, train.labels
featuresV, y_val = validation.features, validation.labels
featuresT, y_test = test.features, test.labels
x_train = np.stack(features)
x_val = np.stack(featuresV)
x_test = np.stack(featuresT)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_val = x_val.astype('float32')
x_val /= 255
x_train /= 255
x_test /= 255
return x_train, y_train, x_test, y_test, x_val, y_val
import numpy as np
x_train, y_train, x_test, y_test, x_val, y_val=_prepare_data(train,validation,test)
# Load ResNet50 Trained on imagenet
resnet_model = ResNet50(weights="imagenet")
# We should preprocess the images the same way resnet images were preprocessed
x_train_preprocessed = preprocess_input(x_train)
x_test_preprocessed = preprocess_input(x_test)
x_val_preprocess = preprocess_input(x_val)
# Build a new model that is ResNet50 minus the very last layer
last_layer = resnet_model.get_layer("avg_pool")
resnet_layers = keras.Model(inputs=resnet_model.inputs, outputs=last_layer.output)
# We can directly stich the models together
ResNet_adapt=Sequential()
ResNet_adapt.add(resnet_layers)
ResNet_adapt.add(Dense(2, activation="sigmoid"))
ResNet_adapt.layers[0].trainable=False # we are just going to tune the last layer weight the other weights inside of the resnet model will remain the same
ResNet_adapt.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
ResNet_adapt.summary()
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import TensorBoard
my_callbacks = [
EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto', restore_best_weights=True),
ModelCheckpoint(filepath='Resnet_transfer.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max'),
TensorBoard(log_dir="logs", histogram_freq=0, write_graph=True, write_images=True)
]
%%time
history=ResNet_adapt.fit(x_train_preprocessed, y_train, epochs=50, validation_data=(x_val_preprocess, y_val), callbacks=my_callbacks)
from matplotlib import pyplot
# plot train and validation loss
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show() # overfittinnng gued rassi
from sklearn.metrics import classification_report, confusion_matrix
score = ResNet_adapt.evaluate(x_train_preprocessed, y_train, verbose=2)
print('Train accuracy:', score[ 1 ])
| 0.936894 | 0.87142 |
## Nice Extensions
#### Too long, didn't read:
- Gist-it - Share your notebook in just a few clicks
- Code Folding - Allows you to fold inside of code cells just like a regular IDE
- AutoSaveTime - Auto save your notebook every n minutes
- ExecuteTime - Automatically show how long each cell took to execute. No need for time magics.
- Hinterland - Code autocomplete suggestions for each keypress, not just after tab
- Rubberband - Drag the mouse to select multiple cells at once
- MoveSelectedCells - Add shortcuts to move cells up/down with Alt+up and Alt+down
- Scratchpad - Create a throwaway cell to run code while not cluttering up your nb
- Scrolldown - When a cell has long output, autoscroll down to the latest output
#### Enabling all "Nice Extensions"
```
!jupyter nbextension enable gist_it/main
!jupyter nbextension enable codefolding/main
!jupyter nbextension enable autosavetime/main
!jupyter nbextension enable execute_time/ExecuteTime
!jupyter nbextension enable hinterland/hinterland
!jupyter nbextension enable rubberband/main
!jupyter nbextension enable move_selected_cells/main
!jupyter nbextension enable scratchpad/main
!jupyter nbextension enable scroll_down/main
```
**Don't forget to refresh the page!**
### Gist-it
<strong>The easiest way to share your notebooks</strong>
Gist-it adds a button to publish the current notebook as a [github gist](https://help.github.com/en/github/writing-on-github/creating-gists). Gists are a quick and easy way to share your code with others. Each gist is it's own git repository which means people can clone/fork your work and expand on it, if you choose to make it public.

Clicking the github gist button will give you a popup window that will allow you to make a public/private gist in one click.

<strong>Warning: the gists you upload won't include local media like images on your computer. </strong> Possible solutions are to only use online images, or host them online and make the links point there. The best solution is to follow the Tips and Customization section below and link your github account with an auth token, then you'll be able to add images to the repo that's created when you make the gist. Not sure how to add images to the gist repo? [Here's a nice gist that explains it](https://gist.github.com/mroderick/1afdd71aa69f6b29601d335751a1a9be)
#### Tips and Customization

If you want to have the gist be associated with your github account, you'll need to generate and add a personal access token, something that only takes a few short steps.
1. When logged in go to your [Github Personal Access Tokens](https://github.com/settings/tokens)
1. Click Generate new token
1. A big scary list of permissions pops up, you only need one (gist)

1. Copy that personal access token and paste it in the nbconfigurator for gistit where it says "Github personal access token"
That's it, now you're ready to share your code with the world. Googling and finding gists has saved me countless times.
### MoveSelectedCells
<strong>Tired of trying combinations of C/V/Ctrl-C/Ctrl-V to move your cells around? This is an easier way.</strong>
MoveSelectedCells brings a new keyboard shortcut that is standard in every other ide, the ability to slide a line of code up and down without using copy/paste. This gives us the same functionality, but for cells instead of individual lines. Using Alt+Up/Down while in command mode(blue) will move the cell up and down throughout the notebook.
Better still, cells collapsed using the Collapsible Headings extension will move together as one cell using MoveSelectedCells! If you try to copy/paste or cut/paste a collapsed cell, only the header moves and everything breaks. I'm going to use this shortcut right now to reorder this tutorial.
### ExecuteTime
<strong>See how long every code cell takes to execute without having to add/remove timing code.</strong>
This extension borders on essential. It gives you a timestamp and execution time in ms for every code cell, while taking up next to no additional space.

#### Tips and Customization
But can we trust it? The answer is "well, kind of". It appears to add a consistent 4-5ms of it's own execution time, so for anything that is important and runs in < 10ms, use %%time for a better estimate

There are multiple ways to customize the output string, and they are clearly described if you click on ExecuteTime in nbconfigurator.
### Code Folding
<strong>The question isn't why should you have code folding, it's why shouldn't you?</strong>
Codefolding is like collapsible headers, but for nested code instead of nested headers. Most of us don't have huge blocks of code in our jupyter notebooks but there's no reason not to have the option to fold your code, and if you're working through a code heavy notebook, it will probably be useful
Here's a real class from a fastai audio library we are developing entirely in Jupyter notebook. It will fail if you try to run it, but it's a fairly good example of code to be folded, so play around with the little arrows on the side.
```
class SignalShifter(RandTransform):
def __init__(self, p=0.5, max_pct= 0.2, max_time=None, direction=0, roll=False):
if direction not in [-1, 0, 1]: raise ValueError("Direction must be -1(left) 0(bidirectional) or 1(right)")
store_attr(self, "max_pct,max_time,direction,roll")
super().__init__(p=p, as_item=True)
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.shift_factor = random.uniform(-1, 1)
if self.direction != 0: self.shift_factor = self.direction*abs(self.shift_factor)
def encodes(self, ai:AudioTensor):
if self.max_time is None: s = self.shift_factor*self.max_pct*ai.nsamples
else: s = self.shift_factor*self.max_time*ai.sr
ai.data = shift_signal(ai.data, int(s), self.roll)
return ai
def encodes(self, sg:AudioSpectrogram):
if self.max_time is None: s = self.shift_factor*self.max_pct*sg.width
else: s = self.shift_factor*self.max_time*sg.sr
return shift_signal(sg, int(s), self.roll)
```
If I'm working on the encodes section for AudioSpectrograms, it's easier to work on if it looks like this.

And if I'm working on some other class that is adjacent to `SignalShifter`, life will be easier if it looks like this

#### Tips and Customization
The shortcut for folding is Alt-F but only works if you're on a line that has a triangular folding icon
### AutoSaveTime
<strong>Automatically save your notebook every n minutes</strong>
Autosavetime lets you choose how often to autosave your notebooks. It creates a dropdown in the toolbar that lets you choose the setting. I have it set to two minutes which saves me in case my Google Cloud Server gets preempted while I'm working.

### Hinterland
<strong>VSCode doesn't make me hit tab to see autocomplete options, why should Jupyter?</strong>
The hinterland extension is simple but powerful. Start typing in a code cell and watch the options for autocomplete pop up automatically. Normally in Jupyter, you need to hit tab to show autocomplete options. Whether or not this extension is for you comes down to personal preference, so try it and find out. One annoying caveat is that magics (jupyter % and %% commands) are included in the autocomplete in contexts where they couldn't possibly appear
. Maybe someone can figure out a way to not include magics in the results, if so, contact me and I'll credit you here.
### Rubberband
<strong>Select multiple cells at once by dragging the mouse</strong>
This simple feature should be built in to Jupyter already. Hold down shift and click, then drag the mouse and a red dotted selection window appears that selects any cell it comes in contact with. Of course you can achieve multiple cell selection with shift + up/down while in command mode, but it's still a nice addition that comes with no cost

### Scratchpad
<strong>For the types who write notebooks that require an hour+ of cleanup before sharing</strong>
This extension creates a little gray arrow in the lower right corner of your browser window. Clicking it opens a cell that will allow you to run as many code cells as you need to figure out how to do the thing you're trying to do, all without cluttering up your notebook, You can also show/hide the cell with the shortcut Ctrl-B.

State created in the scratchpad cells will carry over to the main notebook (i.e. x now is set to 1512 in the main notebook) so be extra cautious of any reproducibility issues you may accidentally introduce.
### Scrolldown
<strong>Okay, things are getting pretty marginal at this point, last one!</strong>
Scrolldown is simple, once the output window overflows and a scrollbar is created, it automatically scrolls down and shows you the latest output
```
#this should demonstrate what we're talking about
import time
for i in range(1000):
print(i)
time.sleep(0.01)
```
### Disabling "Nice Extensions"
Choose what you want and please move on
```
# !jupyter nbextension enable gist_it/main
# !jupyter nbextension disable codefolding/main
# !jupyter nbextension disable autosavetime/main
# !jupyter nbextension disable execute_time/ExecuteTime
# !jupyter nbextension disable hinterland/hinterland
# !jupyter nbextension disable rubberband/main
# !jupyter nbextension disable move_selected_cells/main
# !jupyter nbextension disable scratchpad/main
# !jupyter nbextension disable scroll_down/main
```
|
github_jupyter
|
!jupyter nbextension enable gist_it/main
!jupyter nbextension enable codefolding/main
!jupyter nbextension enable autosavetime/main
!jupyter nbextension enable execute_time/ExecuteTime
!jupyter nbextension enable hinterland/hinterland
!jupyter nbextension enable rubberband/main
!jupyter nbextension enable move_selected_cells/main
!jupyter nbextension enable scratchpad/main
!jupyter nbextension enable scroll_down/main
class SignalShifter(RandTransform):
def __init__(self, p=0.5, max_pct= 0.2, max_time=None, direction=0, roll=False):
if direction not in [-1, 0, 1]: raise ValueError("Direction must be -1(left) 0(bidirectional) or 1(right)")
store_attr(self, "max_pct,max_time,direction,roll")
super().__init__(p=p, as_item=True)
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.shift_factor = random.uniform(-1, 1)
if self.direction != 0: self.shift_factor = self.direction*abs(self.shift_factor)
def encodes(self, ai:AudioTensor):
if self.max_time is None: s = self.shift_factor*self.max_pct*ai.nsamples
else: s = self.shift_factor*self.max_time*ai.sr
ai.data = shift_signal(ai.data, int(s), self.roll)
return ai
def encodes(self, sg:AudioSpectrogram):
if self.max_time is None: s = self.shift_factor*self.max_pct*sg.width
else: s = self.shift_factor*self.max_time*sg.sr
return shift_signal(sg, int(s), self.roll)
#this should demonstrate what we're talking about
import time
for i in range(1000):
print(i)
time.sleep(0.01)
# !jupyter nbextension enable gist_it/main
# !jupyter nbextension disable codefolding/main
# !jupyter nbextension disable autosavetime/main
# !jupyter nbextension disable execute_time/ExecuteTime
# !jupyter nbextension disable hinterland/hinterland
# !jupyter nbextension disable rubberband/main
# !jupyter nbextension disable move_selected_cells/main
# !jupyter nbextension disable scratchpad/main
# !jupyter nbextension disable scroll_down/main
| 0.457379 | 0.851583 |
```
class Solution:
def permuteUnique(self, nums):
self.res = set()
self.dfs(nums, [])
return self.res
def dfs(self, nums, path):
if not nums and path not in self.res:
self.res.append(list(path))
return
for i in range(len(nums)):
path.append(nums[i])
self.dfs(nums[:i] + nums[i+1:], path)
path.pop()
from collections import Counter
class Solution:
def permuteUnique(self, nums):
res = []
def dfs(counter, path):
if len(path) == len(nums):
res.append(list(path))
return
for x in counter:
if counter[x]:
counter[x] -= 1
path.append(x)
dfs(counter, path)
path.pop()
counter[x] += 1
dfs(Counter(nums), [])
return res
class Solution:
def permuteUnique(self, nums):
self.res = []
self.seen = [False] * len(nums)
nums.sort()
self.dfs([], nums)
return self.res
def dfs(self, temp, nums):
if len(nums) == len(temp):
self.res.append(list(temp))
return
for i in range(len(nums)):
if self.seen[i] is False: # 没有使用过
if i > 0 and self.seen[i-1] is False and nums[i] == nums[i-1]:
continue
self.seen[i] = True
temp.append(nums[i])
self.dfs(temp, nums)
temp.pop()
self.seen[i] = False
class Solution:
def permuteUnique(self, nums):
res = []
nums.sort()
self.dfs(nums, [], res)
return res
def dfs(self, nums, path, res):
if not nums:
res.append(path)
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i-1]:
continue
self.dfs(nums[:i]+nums[i+1:], path+[nums[i]], res)
class Solution:
def permuteUnique(self, nums):
self.res = []
nums.sort()
self.dfs([], nums)
return self.res
def dfs(self, temp, nums):
if not nums:
self.res.append(list(temp))
return
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: # 防止重复
continue
temp.append(nums[i])
self.dfs(temp, nums[:i]+nums[i+1:])
temp.pop()
solution = Solution()
solution.permuteUnique([1,1,2])
```
|
github_jupyter
|
class Solution:
def permuteUnique(self, nums):
self.res = set()
self.dfs(nums, [])
return self.res
def dfs(self, nums, path):
if not nums and path not in self.res:
self.res.append(list(path))
return
for i in range(len(nums)):
path.append(nums[i])
self.dfs(nums[:i] + nums[i+1:], path)
path.pop()
from collections import Counter
class Solution:
def permuteUnique(self, nums):
res = []
def dfs(counter, path):
if len(path) == len(nums):
res.append(list(path))
return
for x in counter:
if counter[x]:
counter[x] -= 1
path.append(x)
dfs(counter, path)
path.pop()
counter[x] += 1
dfs(Counter(nums), [])
return res
class Solution:
def permuteUnique(self, nums):
self.res = []
self.seen = [False] * len(nums)
nums.sort()
self.dfs([], nums)
return self.res
def dfs(self, temp, nums):
if len(nums) == len(temp):
self.res.append(list(temp))
return
for i in range(len(nums)):
if self.seen[i] is False: # 没有使用过
if i > 0 and self.seen[i-1] is False and nums[i] == nums[i-1]:
continue
self.seen[i] = True
temp.append(nums[i])
self.dfs(temp, nums)
temp.pop()
self.seen[i] = False
class Solution:
def permuteUnique(self, nums):
res = []
nums.sort()
self.dfs(nums, [], res)
return res
def dfs(self, nums, path, res):
if not nums:
res.append(path)
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i-1]:
continue
self.dfs(nums[:i]+nums[i+1:], path+[nums[i]], res)
class Solution:
def permuteUnique(self, nums):
self.res = []
nums.sort()
self.dfs([], nums)
return self.res
def dfs(self, temp, nums):
if not nums:
self.res.append(list(temp))
return
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: # 防止重复
continue
temp.append(nums[i])
self.dfs(temp, nums[:i]+nums[i+1:])
temp.pop()
solution = Solution()
solution.permuteUnique([1,1,2])
| 0.362518 | 0.516839 |
```
import pandas as pd
from matplotlib import pyplot as plt
import itertools
sample_data = pd.read_csv('../sample_data_wk5.csv')
real_data = pd.read_csv('../real_week5.csv')
sample_data[0:1000:15]
real_data
nfl_marg_d={}
homecount = len(set(sample_data.hometeam))
for i,home_t in enumerate(set(sample_data.hometeam)):
home_all_marg = list((sample_data[sample_data.hometeam == home_t]).margin)
home_avg_marg = sum(home_all_marg)/len(home_all_marg)
nfl_marg_d[i] = (home_t,home_avg_marg)
for j, away_t in enumerate(set(sample_data.awayteam)):
away_all_marg = list((sample_data[sample_data.awayteam == away_t]).margin)
away_avg_marg = sum(away_all_marg)/len(away_all_marg)
nfl_marg_d[j+homecount] = (away_t, -away_avg_marg)
dict(itertools.islice(nfl_marg_d.items(), 5))
from itertools import count
nfl_comp_marg_d={}
num_games = len(real_data)
for i,team in enumerate(zip(count(step=2),real_data.hometeam,real_data.awayteam)):
r_home_team = team [1]
r_home_marg = int(real_data.margin[real_data.hometeam == r_home_team])
r_away_team = team [2]
r_away_marg = -r_home_marg
nfl_comp_marg_d[i] = (r_home_team,r_home_marg)
nfl_comp_marg_d[i+num_games] = (r_away_team,r_away_marg)
from itertools import count
d_cmp={}
num_games = len(real_data)
for i,team in enumerate(zip(count(step=2),real_data.hometeam,real_data.awayteam)):
r_home_team = team [1]
r_away_team = team [2]
r_home_marg = int(real_data.margin[real_data.hometeam == r_home_team])
r_away_marg = -r_home_marg
p_home_all_marg = list((sample_data[sample_data.hometeam == r_home_team]).margin)
p_home_marg = sum(p_home_all_marg)/len(p_home_all_marg)
#p_away_all_marg = list((sample_data[sample_data.away == r_away_team]).margin)
#p_away_marg = sum(p_away_all_marg)/len(p_away_all_marg)
p_away_marg = -p_home_marg
d_home_marg = r_home_marg - p_home_marg
d_away_marg = r_away_marg - p_away_marg
d_cmp[i] = (r_home_team,d_home_marg)
d_cmp[i+num_games] = (r_away_team,d_away_marg)
d_cmp
df_marg = pd.DataFrame.from_dict(nfl_marg_d, orient='index',columns = ['team','margin'])
df_marg_team = df_marg.sort_values(by=['team'],ignore_index=True,ascending = False)
df_marg_top = df_marg.sort_values(by=['margin'],ignore_index=True,ascending = False)
df_marg_bot = df_marg.sort_values(by=['margin'],ignore_index=True,ascending = True)
#display(df_marg_top[:5])
#df_marg_bot[:5]
df_marg_team[:5]
df_comp_marg = pd.DataFrame.from_dict(nfl_comp_marg_d, orient='index',columns = ['team','margin'])
df_comp_marg_team = df_comp_marg.sort_values(by=['team'],ignore_index=True,ascending = False)
df_comp_marg_top = df_comp_marg.sort_values(by=['margin'],ignore_index=True,ascending = False)
df_comp_marg_bot = df_comp_marg.sort_values(by=['margin'],ignore_index=True,ascending = True)
#df_comp_marg_top[:5])
#df_comp_marg_bot[:5]
df_comp_marg_team[:5]
df_cmp = pd.DataFrame.from_dict(d_cmp, orient='index',columns = ['team','diff_margin'])
df_cmp = df_cmp.sort_values(by=['team'],ignore_index=True,ascending = False)
df_cmp['r_margin'] = df_comp_marg_team['margin']
df_cmp['p_margin'] = df_marg_team['margin']
df_cmp_marg_srt = df_cmp.sort_values(by=['diff_margin'],ignore_index=True,ascending = False)
df_cmp_marg_srt
from matplotlib.pyplot import figure
plt.figure(figsize = (18,10))
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin,'x')
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin)
plt.xticks(rotation=90)
plt.xlabel('Team')
plt.ylabel('Real Pnt Margin - Predicted Pnt Margin')
plt.title('Difference in Actual vs Predicted Point Spread (NFL Week 5)')
plt.show()
from matplotlib.pyplot import figure
plt.figure(figsize = (18,9))
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin)
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.r_margin,'x')
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.p_margin,'x')
plt.legend(['diff', 'real', 'predicted'])
plt.xticks(rotation=90)
plt.xlabel('Team')
plt.ylabel('Real Pnt Margin - Predicted Pnt Margin')
plt.title('Difference in Actual vs Predicted Point Spread (NFL Week 5)')
plt.show()
```
|
github_jupyter
|
import pandas as pd
from matplotlib import pyplot as plt
import itertools
sample_data = pd.read_csv('../sample_data_wk5.csv')
real_data = pd.read_csv('../real_week5.csv')
sample_data[0:1000:15]
real_data
nfl_marg_d={}
homecount = len(set(sample_data.hometeam))
for i,home_t in enumerate(set(sample_data.hometeam)):
home_all_marg = list((sample_data[sample_data.hometeam == home_t]).margin)
home_avg_marg = sum(home_all_marg)/len(home_all_marg)
nfl_marg_d[i] = (home_t,home_avg_marg)
for j, away_t in enumerate(set(sample_data.awayteam)):
away_all_marg = list((sample_data[sample_data.awayteam == away_t]).margin)
away_avg_marg = sum(away_all_marg)/len(away_all_marg)
nfl_marg_d[j+homecount] = (away_t, -away_avg_marg)
dict(itertools.islice(nfl_marg_d.items(), 5))
from itertools import count
nfl_comp_marg_d={}
num_games = len(real_data)
for i,team in enumerate(zip(count(step=2),real_data.hometeam,real_data.awayteam)):
r_home_team = team [1]
r_home_marg = int(real_data.margin[real_data.hometeam == r_home_team])
r_away_team = team [2]
r_away_marg = -r_home_marg
nfl_comp_marg_d[i] = (r_home_team,r_home_marg)
nfl_comp_marg_d[i+num_games] = (r_away_team,r_away_marg)
from itertools import count
d_cmp={}
num_games = len(real_data)
for i,team in enumerate(zip(count(step=2),real_data.hometeam,real_data.awayteam)):
r_home_team = team [1]
r_away_team = team [2]
r_home_marg = int(real_data.margin[real_data.hometeam == r_home_team])
r_away_marg = -r_home_marg
p_home_all_marg = list((sample_data[sample_data.hometeam == r_home_team]).margin)
p_home_marg = sum(p_home_all_marg)/len(p_home_all_marg)
#p_away_all_marg = list((sample_data[sample_data.away == r_away_team]).margin)
#p_away_marg = sum(p_away_all_marg)/len(p_away_all_marg)
p_away_marg = -p_home_marg
d_home_marg = r_home_marg - p_home_marg
d_away_marg = r_away_marg - p_away_marg
d_cmp[i] = (r_home_team,d_home_marg)
d_cmp[i+num_games] = (r_away_team,d_away_marg)
d_cmp
df_marg = pd.DataFrame.from_dict(nfl_marg_d, orient='index',columns = ['team','margin'])
df_marg_team = df_marg.sort_values(by=['team'],ignore_index=True,ascending = False)
df_marg_top = df_marg.sort_values(by=['margin'],ignore_index=True,ascending = False)
df_marg_bot = df_marg.sort_values(by=['margin'],ignore_index=True,ascending = True)
#display(df_marg_top[:5])
#df_marg_bot[:5]
df_marg_team[:5]
df_comp_marg = pd.DataFrame.from_dict(nfl_comp_marg_d, orient='index',columns = ['team','margin'])
df_comp_marg_team = df_comp_marg.sort_values(by=['team'],ignore_index=True,ascending = False)
df_comp_marg_top = df_comp_marg.sort_values(by=['margin'],ignore_index=True,ascending = False)
df_comp_marg_bot = df_comp_marg.sort_values(by=['margin'],ignore_index=True,ascending = True)
#df_comp_marg_top[:5])
#df_comp_marg_bot[:5]
df_comp_marg_team[:5]
df_cmp = pd.DataFrame.from_dict(d_cmp, orient='index',columns = ['team','diff_margin'])
df_cmp = df_cmp.sort_values(by=['team'],ignore_index=True,ascending = False)
df_cmp['r_margin'] = df_comp_marg_team['margin']
df_cmp['p_margin'] = df_marg_team['margin']
df_cmp_marg_srt = df_cmp.sort_values(by=['diff_margin'],ignore_index=True,ascending = False)
df_cmp_marg_srt
from matplotlib.pyplot import figure
plt.figure(figsize = (18,10))
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin,'x')
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin)
plt.xticks(rotation=90)
plt.xlabel('Team')
plt.ylabel('Real Pnt Margin - Predicted Pnt Margin')
plt.title('Difference in Actual vs Predicted Point Spread (NFL Week 5)')
plt.show()
from matplotlib.pyplot import figure
plt.figure(figsize = (18,9))
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.diff_margin)
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.r_margin,'x')
plt.plot(df_cmp_marg_srt.team,df_cmp_marg_srt.p_margin,'x')
plt.legend(['diff', 'real', 'predicted'])
plt.xticks(rotation=90)
plt.xlabel('Team')
plt.ylabel('Real Pnt Margin - Predicted Pnt Margin')
plt.title('Difference in Actual vs Predicted Point Spread (NFL Week 5)')
plt.show()
| 0.281011 | 0.245684 |
# ServerSim Overview and Tutorial
## Introduction
This is an overview and tutorial about ***ServerSim***, a framework for the creation of discrete event simulation models to analyze the performance, throughput, and scalability of services deployed on computer servers.
Following the overview of ServerSim, we will provide an example and tutorial of its use for the comparison of two major service deployment patterns.
This document is a Jupyter notebook. See http://jupyter.org/ for more information on Jupyter notebooks.
## ServerSim Core Concepts
***ServerSim*** is a small framework based on ***SimPy***, a well-known discrete-event simulation framework written in the ***Python*** language. The reader should hav at least a cursory familiarity with Python and SimPy (https://simpy.readthedocs.io/en/latest/contents.html) in order to make the most of this document.
Python is well-suited to this kind of application due to its rapid development dynamic language characteristics and the availability of powerful libraries relevant for this kind of work. In addition to SimPy, we will use portions of ***SciPy***, a powerful set of libraries for efficient data analysis and visualization that includes *Matplotlib*, which will be used for plotting graphs in our tutorial.
ServerSim consists of a several classes and utilities. The main classes are described below.
### class Server
Represents a server -- physical, VM, or container, with a predetermined computation capacity. A server can execute arbitrary service request types. The computation capacity of a server is represented in terms of a number of hardware threads and a total speed number (computation units processed per unit of time). The total speed is equally apportioned among the hardware threads, to give the speed per hardware thread. A server also has a number of associated software threads (which must be no smaller than the number of hardware threads). Software threads are relevant for blocking computations only.
The simulations in this document assume non-blocking services, so the software threads will not be of consequence in the tutorial example.
Attributes:
- *env*: SimPy Environment, used to start and end simulations, and used internally by SimPy to control simulation events.
- *max_concurrency*: The maximum of _hardware threads for the server.
- *num_threads*: The maximum number of software threads for the server.
- *speed*: Aggregate server speed across all _hardware threads.
- *name*: The server's name.
- *hw_svc_req_log*: If not None, a list where hardware
service requests will be logged. Each log entry is a
triple ("hw", name, svc_req), where name is this server's
name and svc_req is the current service request asking for
hardware resources.
- *sw_svc_req_log*: If not None, a list where software thread
service requests will be logged. Each log entry is a
triple ("sw", name, svc_req), where name is this server's
name and svc_req is the current service request asking for a
software thread.
### class SvcRequest
A request for execution of computation units on one or more servers.
A service request submission is implemented as a SimPy Process.
A service request can be a composite of sub-requests.
A service request that is part of a composite has an attribute that
is a reference to its parent service request. In addition,
such a composition is implemented through the *gen* attribute of the
service request, which is a generator. That generator can yield service
request submissions for other service requests.
By default, a service request is non-blocking, i.e., a thread is
held on the target server only while the service request itself
is executing; the thread is relinquished when the request
finishes executing and it passes control to its sub-requests.
However, blocking service requests can be modeled as well (see the
*Blkg* class).
Attributes:
- *env*: The SimPy Environment.
- *parent*: The immediately containing service request, in case this
is part of a composite service request. None othersise.
- *svc_name*: Name of the service this request is associated with.
- *gen*: Generator which defines the behavior of this request. The
generator produces an iterator which yields simpy.Event
instances. The submit() method wratps the iterator in a
simpy.Process object to schedule the request for execution
by SimPy..
- *server*: The target server. May be None for composite service
requests, i.e., those not produced by CoreSvcRequester.
- *in_val*: Optional input value of the request.
- *in_blocking_call*: Indicates whether this request is
in the scope of a blocking call. When this parameter
is true, the service request will hold a software
thread on the target server while the service
request itself and any of its sub-requests (calls
to other servers) are executing. Otherwise, the
call is non-blocking, so a thread is held on the
target server only while the service request itself
is executing; the thread is relinquished when
this request finishes executing and it passes control
to its sub-requests.
- *out_val*: Output value produced from in_val by the service
execution. None by default.
- *id*: The unique numerical ID of this request.
- *time_log*: List of tag-time pairs
representing significant occurrences for this request.
- *time_dict*: Dictionary with contents of *time_log*,
for easier access to information.
### class SvcRequester
Base class of service requesters.
A service requester represents a service. In this framework,
a service requester is a factory for service requests. "Deploying"
a service on a server is modeled by having service requests
produced by the service requester sent to the target server.
A service requester can be a composite of sub-requesters, thus
representing a composite service.
Attributes:
- *env*: The SimPy Environment.
- *svc_name*: Name of the service.
- *log*: Optional list to collect all service request objects
produced by this service requester.
### class UserGroup
Represents a set of identical users or clients that submit
service requests.
Each user repeatedly submits service requests produced
by service requesters randomly selected from the set
of service requesters specified for the group.
Attributes:
- *env*: The Simpy Environment.
- *num_users*: Number of users in group. This can be either a
positive integer or a sequence of (float, int), where
the floats are monotonically increasing. In this case,
the sequence represents a step function of time, where each pair
represents a step whose range of *x* values extend from the
first component of the pair (inclusive) to the first
component of the next pair (exclusive), and whose *y* value
is the second component of the pair. The first pair in
the sequence must have 0 as its first component.
If the num_users argument is an int, it is transformed
into the list [(0, num_users)].
- *name*: This user group's name.
- *weighted_svcs*: List of pairs of
SvcRequester instances and positive numbers
representing the different service request types issued by
the users in the group and their weights. The weights are
the relative frequencies with which the service requesters
will be executed (the weights do not need to add up to 1,
as they are normalized by this class).
- *min_think_time*: The minimum think time between service
requests from a user. Think time will be uniformly
distributed between min_think_time and max_think_time.
- *max_think_time*: The maximum think time between service
requests from a user. Think time will be uniformly
distributed between min_think_time and max_think_time.
- *quantiles*: List of quantiles to be tallied. It
defaults to [0.5, 0.95, 0.99] if not provided.
- *svc_req_log*: If not None, a sequence where service requests will
be logged. Each log entry is a pair (name, svc_req), where
name is this group's name and svc_req is the current
service request generated by this group.
- *svcs*: The first components of *weighted_svcs*.
### class CoreSvcRequester(SvcRequester)
This is the core service requester implementation that
interacts with servers to utilize server resources.
All service requesters are either instances of this class or
composites of such instances created using the various
service requester combinators in this module
Attributes:
- *env*: See base class.
- *svc_name*: See base class.
- *fcompunits*: A (possibly randodm) function that
generates the number of compute units required to execute a
service request instance produced by this object.
- *fserver*: Function that produces a server (possibly round-robin,
random, or based on server load information) when given a
service request name. Models a load-balancer.
- *log*: See base class.
- *f*: An optional function that is applied to a service request's
in_val to produce its out_val. If f is None, the constant
function that always returns None is used.
### *Other service requester classes*
*Following are other service requester classes (subclasses of SvcRequester) in addition to CoreSvcRequester, that can be used to define more complex services, including blocking services, asynchronous fire-and-forget services, sequentially dependednt services, parallel service calls, and service continuations. These additional classes are not used in the simulations in this document.*
### class Async(SvcRequester)
Wraps a service requester to produce asynchronous fire-and-forget
service requests.
An asynchronous service request completes and returns immediately
to the parent request, while the underlying (child) service request is
scheduled for execution on its target server.
Attributes:
- *env*: See base class.
- *svc_requester*: The underlying service requester that is wrapped
by this one.
- *log*: See base class.
### class Blkg(SvcRequester)
Wraps a service requester to produce blocking service requests.
A blocking service request will hold a software thread on the
target server until the service request itself and all of its
non-asynchronous sub-requests complete.
Attributes:
- *env*: See base class.
- *svc_requester*: The underlying service requester that is wrapped
by this one.
- *log*: See base class.
### class Seq(SvcRequester)
Combines a non-empty list of service requesters to yield a
sequential composite service requester.
This service requester produces composite service requests.
A composite service request produced by this service
requester consists of a service request from each of the
provided service requesters. Each of the service requests is
submitted in sequence, i.e., each service request is
submitted when the previous one completes.
Attributes:
- *env*: See base class.
- *svc_name*: See base class.
- *svc_requesters*: A composite service request produced by this
service requester consists of a service request from each of
the provided service requesters
- *cont*: If true, the sequence executes as continuations
of the first request, all on the same server. Otherwise,
each request can execute on a different server.
- *log*: See base class.
### class Par(SvcRequester)
Combines a non-empty list of service requesters to yield a
parallel composite service requester.
This service requester produces composite service requests.
A composite service request produced by this service
requester consists of a service request from each of the
provided service requesters. All of the service requests are
submitted concurrently.
When the attribute cont is True, this represents multi-threaded
execution of requests on the same server. Otherwise, each
service request can execute on a different server.
Attributes:
- *env*: See base class.
- *svc_name*: See base class.
- *svc_requesters*: See class docstring.
- *f*: Optional function that takes the outputs of all the component
service requests and produces the overall output
for the composite. If None then the constant function
that always produces None is used.
- *cont*: If true, all the requests execute on the same server.
Otherwise, each request can execute on a different server.
When cont is True, the server is the container service
request's server if not None, otherwise the server is
picked from the first service request in the list of
generated service requests.
- *log*: See base class.
## Tutorial Example: Comparison of Two Service Deployment Patterns
Below we compare two major service deployment patterns by using discrete-event simulations. Ideally the reader will have had some prior exposure to the Python language in order to follow along all the details. However, the concepts and conclusions should be understandable to readers with software architecture or engineering background even if not familiar with Python.
We assume an application made up of multiple multi-threaded services and consider two deployment patterns:
- **Cookie-cutter deployment**, where all services making up an application are deployed together on each VM or container. This is typical for "monolithic" applications but can also be used for micro-services. See [Fowler](https://martinfowler.com/bliki/MicroservicePremium.html) and [Hammant](https://paulhammant.com/2011/11/29/cookie-cutter-scaling/).
- **Individualized deployment**, where each of the services is deployed on its own VM or (more likely) it own container.
In the simulations below, the application is made up of just two services, to simplify the model and the analysis, but without loss of generality in terms of the main conclusions.
### Environment set-up
The code used in these simulations should be compatible with both Python 2.7 and Python 3.x.
Python and the following Python packages need to be installed in your computer:
- jupyter-notebook
- simpy
- matplotlib
- LiveStats
The model in this document should be run from the parent directory of the `serversim` package directory, which contains the source files for the ServerSim framework.
### The core simulation function
Following is the the core function used in the simulations This function will be called with different arguments to simulate different scenarios.
This function sets up a simulation with the following givens:
- Simulation duration of 200 time units (e.g., seconds).
- A set of servers. Each server has 10 hardware threads, 20 software threads, and a speed of 20 compute units per unit of time. The number of servers is fixed by the server_range1 and server_range2 parameters described below.
- Two services:
- svc_1, which consumes a random number of compute units per request, with a range from 0.2 to 3.8, averaging 2.0 compute units per request
- svc_2, which consumes a random number of compute units per request, with a range from 0.1 to 1.9, averaging 1.0 compute units per request
- A user group, with a number of users determined by the num_users parameter described below. The user group generates service requests from the two services, with probabilities proportional to the parameters weight_1 and weight_2 described below. The think time for users in the user group ranges from 2.0 to 10.0 time units.
#### Parameters:
- *num_users*: the number of users being simulated. This parameter can be either a positive integer or a list of pairs. In the second case, the list of pairs represents a number of users that varies over time as a step function. The first elements of the pairs in the list must be strictly monotonically increasing and each pair in the list represents a step in the step function. Each step starts (inclusive) at the time represented by the first component of the corresponding pair and ends (exclusive) at the time represented by the first component of the next pair in the list.
- *weight1*: the relative frequency of service requests for the first service.
- *weight2*: the relative frequency of service requests for the second service.
- *server_range1*: a Python range representing the numeric server IDs of the servers on which the first service can be deployed.
- *server_range2*: a Python range representing the numeric server IDs of the servers on which the second service can be deployed. This and the above range can be overlapping. In case they are overlapping, the servers in the intersection of the ranges will host both the first and the second service.
#### Imports
We import the required libraries, as well as the `__future__` import for compatibility between Python 2.7 and Python 3.x.
```
# %load simulate_deployment_scenario.py
from __future__ import print_function
from typing import List, Tuple, Sequence
from collections import namedtuple
import random
import simpy
from serversim import *
def simulate_deployment_scenario(num_users, weight1, weight2, server_range1,
server_range2):
# type: (int, float, float, Sequence[int], Sequence[int]) -> Result
Result = namedtuple("Result", ["num_users", "weight1", "weight2", "server_range1",
"server_range2", "servers", "grp"])
def cug(mid, delta):
"""Computation units generator"""
def f():
return random.uniform(mid - delta, mid + delta)
return f
def ld_bal(svc_name):
"""Application server load-balancer."""
if svc_name == "svc_1":
svr = random.choice(servers1)
elif svc_name == "svc_2":
svr = random.choice(servers2)
else:
assert False, "Invalid service type."
return svr
simtime = 200
hw_threads = 10
sw_threads = 20
speed = 20
svc_1_comp_units = 2.0
svc_2_comp_units = 1.0
quantiles = (0.5, 0.95, 0.99)
env = simpy.Environment()
n_servers = max(server_range1[-1] + 1, server_range2[-1] + 1)
servers = [Server(env, hw_threads, sw_threads, speed, "AppServer_%s" % i)
for i in range(n_servers)]
servers1 = [servers[i] for i in server_range1]
servers2 = [servers[i] for i in server_range2]
svc_1 = CoreSvcRequester(env, "svc_1", cug(svc_1_comp_units,
svc_1_comp_units*.9), ld_bal)
svc_2 = CoreSvcRequester(env, "svc_2", cug(svc_2_comp_units,
svc_2_comp_units*.9), ld_bal)
weighted_txns = [(svc_1, weight1),
(svc_2, weight2)
]
min_think_time = 2.0 # .5 # 4
max_think_time = 10.0 # 1.5 # 20
svc_req_log = [] # type: List[Tuple[str, SvcRequest]]
grp = UserGroup(env, num_users, "UserTypeX", weighted_txns, min_think_time,
max_think_time, quantiles, svc_req_log)
grp.activate_users()
env.run(until=simtime)
return Result(num_users=num_users, weight1=weight1, weight2=weight2,
server_range1=server_range1, server_range2=server_range2,
servers=servers, grp=grp)
```
### Printing the simulation results
The following function prints the outputs from the above core simulation function.
```
# %load print_results.py
from __future__ import print_function
from typing import Sequence, Any, IO
from serversim import *
def print_results(num_users=None, weight1=None, weight2=None, server_range1=None,
server_range2=None, servers=None, grp=None, fi=None):
# type: (int, float, float, Sequence[int], Sequence[int], Sequence[Server], UserGroup, IO[str]) -> None
if fi is None:
import sys
fi = sys.stdout
print("\n\n***** Start Simulation --", num_users, ",", weight1, ",", weight2, ", [", server_range1[0], ",", server_range1[-1] + 1,
") , [", server_range2[0], ",", server_range2[-1] + 1, ") *****", file=fi)
print("Simulation: num_users =", num_users, file=fi)
print("<< ServerExample >>\n", file=fi)
indent = " " * 4
print("\n" + "Servers:", file=fi)
for svr in servers:
print(indent*1 + "Server:", svr.name, file=fi)
print(indent * 2 + "max_concurrency =", svr.max_concurrency, file=fi)
print(indent * 2 + "num_threads =", svr.num_threads, file=fi)
print(indent*2 + "speed =", svr.speed, file=fi)
print(indent * 2 + "avg_process_time =", svr.avg_process_time, file=fi)
print(indent * 2 + "avg_hw_queue_time =", svr.avg_hw_queue_time, file=fi)
print(indent * 2 + "avg_thread_queue_time =", svr.avg_thread_queue_time, file=fi)
print(indent * 2 + "avg_service_time =", svr.avg_service_time, file=fi)
print(indent * 2 + "avg_hw_queue_length =", svr.avg_hw_queue_length, file=fi)
print(indent * 2 + "avg_thread_queue_length =", svr.avg_thread_queue_length, file=fi)
print(indent * 2 + "hw_queue_length =", svr.hw_queue_length, file=fi)
print(indent * 2 + "hw_in_process_count =", svr.hw_in_process_count, file=fi)
print(indent * 2 + "thread_queue_length =", svr.thread_queue_length, file=fi)
print(indent * 2 + "thread_in_use_count =", svr.thread_in_use_count, file=fi)
print(indent*2 + "utilization =", svr.utilization, file=fi)
print(indent*2 + "throughput =", svr.throughput, file=fi)
print(indent*1 + "Group:", grp.name, file=fi)
print(indent*2 + "num_users =", grp.num_users, file=fi)
print(indent*2 + "min_think_time =", grp.min_think_time, file=fi)
print(indent*2 + "max_think_time =", grp.max_think_time, file=fi)
print(indent * 2 + "responded_request_count =", grp.responded_request_count(None), file=fi)
print(indent * 2 + "unresponded_request_count =", grp.unresponded_request_count(None), file=fi)
print(indent * 2 + "avg_response_time =", grp.avg_response_time(), file=fi)
print(indent * 2 + "std_dev_response_time =", grp.std_dev_response_time(None), file=fi)
print(indent*2 + "throughput =", grp.throughput(None), file=fi)
for svc in grp.svcs:
print(indent*2 + svc.svc_name + ":", file=fi)
print(indent * 3 + "responded_request_count =", grp.responded_request_count(svc), file=fi)
print(indent * 3 + "unresponded_request_count =", grp.unresponded_request_count(svc), file=fi)
print(indent * 3 + "avg_response_time =", grp.avg_response_time(svc), file=fi)
print(indent * 3 + "std_dev_response_time =", grp.std_dev_response_time(svc), file=fi)
print(indent*3 + "throughput =", grp.throughput(svc), file=fi)
```
### Mini-batching, plotting, and comparison of results
The following three functions handle mini-batching, plotting, and comparison of results.
- ***minibatch_resp_times*** -- This function takes the user group from the results of the deployment_example function, scans the service request log of the user group, and produces mini-batch statistics for every time_resolution time units. For example, with a simulation of 200 time units and a time_resolution of 5 time units, we end up with 40 mini-batches. The statistics produced are the x values corresponding to each mini-batch, and the counts, means, medians, 95th percentile, and 99th percentile in each mini-batch.
- ***plot_counts_means_q95*** -- Plots superimposed counts, means, and 95th percentiles for two mini-batch sets coming from two simulations.
- ***compare_scenarios*** -- Combines the above two functions to produce comparison plots from two simulations.
```
# %load report_resp_times.py
from typing import TYPE_CHECKING, Sequence, Tuple
import functools as ft
from collections import OrderedDict
import matplotlib.pyplot as plt
from livestats import livestats
if TYPE_CHECKING:
from serversim import UserGroup
def minibatch_resp_times(time_resolution, grp):
# type: (float, UserGroup) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]
quantiles = [0.5, 0.95, 0.99]
xys = [(int(svc_req.time_dict["submitted"]/time_resolution),
svc_req.time_dict["completed"] - svc_req.time_dict["submitted"])
for (_, svc_req) in grp.svc_req_log
if svc_req.is_completed]
def ffold(map_, p):
x, y = p
if x not in map_:
map_[x] = livestats.LiveStats(quantiles)
map_[x].add(y)
return map_
xlvs = ft.reduce(ffold, xys, dict())
xs = xlvs.keys()
xs.sort()
counts = [xlvs[x].count for x in xs]
means = [xlvs[x].average for x in xs]
q_50 = [xlvs[x].quantiles()[0] for x in xs]
q_95 = [xlvs[x].quantiles()[1] for x in xs]
q_99 = [xlvs[x].quantiles()[2] for x in xs]
return xs, counts, means, q_50, q_95, q_99
def plot_counts_means_q95(quantiles1, quantiles2):
x = quantiles1[0] # should be same as quantiles2[0]
counts1 = quantiles1[1]
counts2 = quantiles2[1]
means1 = quantiles1[2]
means2 = quantiles2[2]
q1_95 = quantiles1[4]
q2_95 = quantiles2[4]
# Plot counts
plt.plot(x, counts1, color='b', label="Counts 1")
plt.plot(x, counts2, color='r', label="Counts 2")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel("Time buckets")
plt.ylabel("Throughput")
plt.show()
# Plot averages and 95th percentiles
plt.plot(x, means1, color='b', label="Means 1")
plt.plot(x, q1_95, color='c', label="95th Percentile 1")
plt.plot(x, means2, color='r', label="Means 2")
plt.plot(x, q2_95, color='m', label="95th Percentile 2")
# Hack to avoid duplicated labels (https://stackoverflow.com/questions/13588920/stop-matplotlib-repeating-labels-in-legend)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0.)
plt.xlabel("Time buckets")
plt.ylabel("Response times")
plt.show()
def compare_scenarios(sc1, sc2):
grp1 = sc1.grp
grp2 = sc2.grp
quantiles1 = minibatch_resp_times(5, grp1)
quantiles2 = minibatch_resp_times(5, grp2)
plot_counts_means_q95(quantiles1, quantiles2)
```
### Random number generator seed
We set the random number generator seed to a known value to produce repeatable simulations. Comment-out this line to have a different system-generated seed every time the simulations are executed.
```
random.seed(123456)
```
### Simulations
Several simulation scenarios are executed below. See the descriptions of the parameters and hard-coded given values of the core simulation function above.
With 10 servers and weight_1 = 2 and weight_2 = 1, this configuration supports 720 users with average response times close to the minimum possible. How did we arrive at that number? For svc_1, the heavier of the two services, the minimum possible average response time is 1 time unit (= 20 server compute units / 10 hardware threads / 2 average service compute units). One server can handle 10 concurrent svc_1 users without think time, or 60 concurrent svc_1 users with average think time of 6 time units. Thus, 10 servers can handle 600 concurrent svc_1 users. Doing the math for both services and taking their respective probabilities into account, the number of users is 720. For full details, see the spreadsheet [CapacityPlanning.xlsx](https://github.com/pvillela/ServerSim/blob/master/CapacityPlanning.xlsx). Of course, due to randomness, there will be queuing and the average response times will be greater than the minimum possible. With these numbers, the servers will be running *hot* as there is no planned slack capacity.
#### Simulation 0
This is a simulation of one scenario (not a comparison) and printing out of its results. It illustrates the use of the print_results function. The scenario here is the same as the first scenario for Simulation 1 below.
```
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
print_results(**sc1.__dict__)
```
#### Simulation 1
In the first scenario, there are 10 servers which are shared by both services. In the second scenario, there are 10 servers, of which 8 are allocated to the first service and 2 are allocated to the second service. This allocation is proportional to their respective loads.
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
```
Repeating above comparison to illustrate variability of results.
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
```
***Conclusions:*** The results of the two deployment strategies are similar in terms of throughput, mean response times, and 95th percentile response times. This is as would be expected, since the capacities allocated under the individualized deployment strategy are proportional to the respective service loads.
#### Simulation 2
Now, we change the weights of the different services, significantly increasing the weight of svc_1 from 2 to 5.
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=5, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=5, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
```
***Conclusions:*** The cookie-cutter deployment strategy was able to absorb the change in load mix, while the individualized strategy was not, with visibly lower throughput and higher mean and 95th percentile response times.
#### Simulation 3
For this simulation, we also change the weights of the two services, but now in the opposite direction -- we change the weight of svc_1 from 2 to 1.
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
```
***Conclusions:*** Again the cookie-cutter deployment strategy was able to absorb the change in load mix, while the individualized strategy was not, with visibly lower throughput and higher mean and 95th percentile response times. Notice that due to the changed load mix, the total load was lower than before and, with the same number of servers, the cookie-cutter configuration had excess capacity while the individualized configuration had excess capacity for svc_1 and insufficient capacity for svc_2.
#### Simulation 4
We now continue with the weights used in Simulation 3, but adjust server capacity to account for the lower aggregate load and different load mix.
Below we have three scenarios:
- Scenario 1 (cookie-cutter) removes one server
- Scenario 2 (individualized) removes one server from the pool allocated to svc_1
- Scenario 3 (individualized) removes one server and reassigns one server from the svc_1 pool to the svc_2 pool.
Run the three scenarios:
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 9), server_range2=range(0, 9))
random.setstate(rand_state)
sc2a = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 7), server_range2=range(7, 9))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 6), server_range2=range(6, 9))
```
Compare the results of scenarios 1 and 2a:
```
compare_scenarios(sc1, sc2a)
```
Compare the results of scenarios 1 and 2b:
```
compare_scenarios(sc1, sc2b)
```
***Conclusions:*** Scenario 1 performs significantly than better Scenario 2a and comparably to Scenario 2b. This simulation shows again that the cookie-cutter strategy is comparable in performance and throughput to a tuned individualized configuration, and beats hands-down an individualized configuration that is not perfectly tuned for the load mix.
#### Vary the number of users over time
The simulations below will vary the load over time by varying the number of users over time. The list below defines a step function the represents the number of users varying over time. In this case, the number of users changes every 50 time units.
```
users_curve = [(0, 900), (50, 540), (100, 900), (150, 540)]
```
#### Simulation 5
This simulation is similar to Simulation 1, the difference being the users curve instead of a constant 720 users.
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=users_curve, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=users_curve, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
```
***Conclusions:*** The cookie-cutter and individualized strategies produced similar results.
#### Simulation 6
We now run a simulation similar to Simulation 4, with the difference that the number of users varies over time. This combines load variability over time as well as a change in load mix. As in Simulation 4, we adjust server capacity to account for the lower aggregate load and different load mix.
Below we have three scenarios:
- Scenario 1 (cookie-cutter) removes one server
- Scenario 2a (individualized) removes one server from the pool allocated to svc_1
- Scenario 2b (individualized) removes one server and reassigns one server from the svc_1 pool to the svc_2 pool.
Run the three scenarios:
```
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 9), server_range2=range(0, 9))
random.setstate(rand_state)
sc2a = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 7), server_range2=range(7, 9))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 6), server_range2=range(6, 9))
```
Compare the results of scenarios 1 and 2a:
```
compare_scenarios(sc1, sc2a)
```
Compare the results of scenarios 1 and 2b:
```
compare_scenarios(sc1, sc2b)
```
***Conclusions:*** Scenario 1 performs significantly better than Scenario 2a and comparably to Scenario 2b. This simulation shows again that the cookie-cutter strategy is comparable in performance and throughput to a tuned individualized configuration, and beats an individualized configuration that is not perfectly tuned for the load mix.
#### Simulation 7
This final simulation is similar to Simulation 1, with the difference that the number of users is 864 instad of 720. In this scenario, the total number of servers required for best capacity utilization can be calculated to be 12 (see [CapacityPlanning.xlsx](https://github.com/pvillela/ServerSim/blob/master/CapacityPlanning.xlsx)). Under the individualized deployment strategy, the ideal number of servers allocated to svc_1 and svc_2 would be 9.6 and 2.4, respectively. Since the number of servers needs to be an integer, we will run simulations with server allocations to svc_1 and svc_2, respectively, of 10 and 2, 9 and 3, and 10 and 3.
Thus, we have five scenarios:
- Scenario 1a (cookie-cutter) with 12 servers
- Scenario 2a1 (individualized) with 9 servers for svc_1 and 3 servers for svc_2
- Scenario 2a2 (individualized) with 10 servers for svc_1 and 2 servers for svc_2
- Scenario 1b (cookie-cutter) with 13 servers
- Scenario 2b (individualized) with 10 servers for svc_1 and 3 servers for svc_2
Run the scenarios:
```
rand_state = random.getstate()
sc1a = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 12), server_range2=range(0, 12))
random.setstate(rand_state)
sc2a1 = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 9), server_range2=range(9, 12))
random.setstate(rand_state)
sc2a2 = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(10, 12))
random.setstate(rand_state)
sc1b = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 13), server_range2=range(0, 13))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(10, 13))
```
Compare the results of scenarios 1a and 2a1:
```
compare_scenarios(sc1a, sc2a1)
```
Compare the results of scenarios 1a and 2a2:
```
compare_scenarios(sc1a, sc2a2)
```
Compare the results of scenarios 1b and 2b:
```
compare_scenarios(sc1b, sc2b)
```
***Conclusions:*** Scenario 1a has comparable throughput but somewhat better response times than Scenario 2a1. Scenario 1a has somewhat better throughput and response times than Scenario 2a2. Scenario 1b has comparable throughput and a bit less extreme response times than Scenario 2b. In all three comparisons, the cookie-cutter strategy performs better than or comparably to the individualized strategy.
#### Overall Conclusions
The various simulations show consistently that the cookie-cutter strategy is comparable in performance and throughput (and therefore hardware utilization) to a tuned individualized configuration, and beats an individualized configuration that is not well-tuned for the load mix. Cookie-cutter thus proves to be a more robust and stable deployment strategy in many realistic situations, in the face of likely load mix fluctuations, mismatches between forecast average load mixes and actual average load mixes, and mismatches between forecast load mixes and allocated server capacities. However, although not highlighted on the simulation graphs presented, it is a fact (that can be observed in the simulation logs) that response times for svc_2 are better under a well-tuned individualized configuration because then svc_2 requests don't have to share a queue with longer-running svc_1 requests. When that's an important consideration, an individualized deployment strategy could be a more appropriate choice.
|
github_jupyter
|
# %load simulate_deployment_scenario.py
from __future__ import print_function
from typing import List, Tuple, Sequence
from collections import namedtuple
import random
import simpy
from serversim import *
def simulate_deployment_scenario(num_users, weight1, weight2, server_range1,
server_range2):
# type: (int, float, float, Sequence[int], Sequence[int]) -> Result
Result = namedtuple("Result", ["num_users", "weight1", "weight2", "server_range1",
"server_range2", "servers", "grp"])
def cug(mid, delta):
"""Computation units generator"""
def f():
return random.uniform(mid - delta, mid + delta)
return f
def ld_bal(svc_name):
"""Application server load-balancer."""
if svc_name == "svc_1":
svr = random.choice(servers1)
elif svc_name == "svc_2":
svr = random.choice(servers2)
else:
assert False, "Invalid service type."
return svr
simtime = 200
hw_threads = 10
sw_threads = 20
speed = 20
svc_1_comp_units = 2.0
svc_2_comp_units = 1.0
quantiles = (0.5, 0.95, 0.99)
env = simpy.Environment()
n_servers = max(server_range1[-1] + 1, server_range2[-1] + 1)
servers = [Server(env, hw_threads, sw_threads, speed, "AppServer_%s" % i)
for i in range(n_servers)]
servers1 = [servers[i] for i in server_range1]
servers2 = [servers[i] for i in server_range2]
svc_1 = CoreSvcRequester(env, "svc_1", cug(svc_1_comp_units,
svc_1_comp_units*.9), ld_bal)
svc_2 = CoreSvcRequester(env, "svc_2", cug(svc_2_comp_units,
svc_2_comp_units*.9), ld_bal)
weighted_txns = [(svc_1, weight1),
(svc_2, weight2)
]
min_think_time = 2.0 # .5 # 4
max_think_time = 10.0 # 1.5 # 20
svc_req_log = [] # type: List[Tuple[str, SvcRequest]]
grp = UserGroup(env, num_users, "UserTypeX", weighted_txns, min_think_time,
max_think_time, quantiles, svc_req_log)
grp.activate_users()
env.run(until=simtime)
return Result(num_users=num_users, weight1=weight1, weight2=weight2,
server_range1=server_range1, server_range2=server_range2,
servers=servers, grp=grp)
# %load print_results.py
from __future__ import print_function
from typing import Sequence, Any, IO
from serversim import *
def print_results(num_users=None, weight1=None, weight2=None, server_range1=None,
server_range2=None, servers=None, grp=None, fi=None):
# type: (int, float, float, Sequence[int], Sequence[int], Sequence[Server], UserGroup, IO[str]) -> None
if fi is None:
import sys
fi = sys.stdout
print("\n\n***** Start Simulation --", num_users, ",", weight1, ",", weight2, ", [", server_range1[0], ",", server_range1[-1] + 1,
") , [", server_range2[0], ",", server_range2[-1] + 1, ") *****", file=fi)
print("Simulation: num_users =", num_users, file=fi)
print("<< ServerExample >>\n", file=fi)
indent = " " * 4
print("\n" + "Servers:", file=fi)
for svr in servers:
print(indent*1 + "Server:", svr.name, file=fi)
print(indent * 2 + "max_concurrency =", svr.max_concurrency, file=fi)
print(indent * 2 + "num_threads =", svr.num_threads, file=fi)
print(indent*2 + "speed =", svr.speed, file=fi)
print(indent * 2 + "avg_process_time =", svr.avg_process_time, file=fi)
print(indent * 2 + "avg_hw_queue_time =", svr.avg_hw_queue_time, file=fi)
print(indent * 2 + "avg_thread_queue_time =", svr.avg_thread_queue_time, file=fi)
print(indent * 2 + "avg_service_time =", svr.avg_service_time, file=fi)
print(indent * 2 + "avg_hw_queue_length =", svr.avg_hw_queue_length, file=fi)
print(indent * 2 + "avg_thread_queue_length =", svr.avg_thread_queue_length, file=fi)
print(indent * 2 + "hw_queue_length =", svr.hw_queue_length, file=fi)
print(indent * 2 + "hw_in_process_count =", svr.hw_in_process_count, file=fi)
print(indent * 2 + "thread_queue_length =", svr.thread_queue_length, file=fi)
print(indent * 2 + "thread_in_use_count =", svr.thread_in_use_count, file=fi)
print(indent*2 + "utilization =", svr.utilization, file=fi)
print(indent*2 + "throughput =", svr.throughput, file=fi)
print(indent*1 + "Group:", grp.name, file=fi)
print(indent*2 + "num_users =", grp.num_users, file=fi)
print(indent*2 + "min_think_time =", grp.min_think_time, file=fi)
print(indent*2 + "max_think_time =", grp.max_think_time, file=fi)
print(indent * 2 + "responded_request_count =", grp.responded_request_count(None), file=fi)
print(indent * 2 + "unresponded_request_count =", grp.unresponded_request_count(None), file=fi)
print(indent * 2 + "avg_response_time =", grp.avg_response_time(), file=fi)
print(indent * 2 + "std_dev_response_time =", grp.std_dev_response_time(None), file=fi)
print(indent*2 + "throughput =", grp.throughput(None), file=fi)
for svc in grp.svcs:
print(indent*2 + svc.svc_name + ":", file=fi)
print(indent * 3 + "responded_request_count =", grp.responded_request_count(svc), file=fi)
print(indent * 3 + "unresponded_request_count =", grp.unresponded_request_count(svc), file=fi)
print(indent * 3 + "avg_response_time =", grp.avg_response_time(svc), file=fi)
print(indent * 3 + "std_dev_response_time =", grp.std_dev_response_time(svc), file=fi)
print(indent*3 + "throughput =", grp.throughput(svc), file=fi)
# %load report_resp_times.py
from typing import TYPE_CHECKING, Sequence, Tuple
import functools as ft
from collections import OrderedDict
import matplotlib.pyplot as plt
from livestats import livestats
if TYPE_CHECKING:
from serversim import UserGroup
def minibatch_resp_times(time_resolution, grp):
# type: (float, UserGroup) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]
quantiles = [0.5, 0.95, 0.99]
xys = [(int(svc_req.time_dict["submitted"]/time_resolution),
svc_req.time_dict["completed"] - svc_req.time_dict["submitted"])
for (_, svc_req) in grp.svc_req_log
if svc_req.is_completed]
def ffold(map_, p):
x, y = p
if x not in map_:
map_[x] = livestats.LiveStats(quantiles)
map_[x].add(y)
return map_
xlvs = ft.reduce(ffold, xys, dict())
xs = xlvs.keys()
xs.sort()
counts = [xlvs[x].count for x in xs]
means = [xlvs[x].average for x in xs]
q_50 = [xlvs[x].quantiles()[0] for x in xs]
q_95 = [xlvs[x].quantiles()[1] for x in xs]
q_99 = [xlvs[x].quantiles()[2] for x in xs]
return xs, counts, means, q_50, q_95, q_99
def plot_counts_means_q95(quantiles1, quantiles2):
x = quantiles1[0] # should be same as quantiles2[0]
counts1 = quantiles1[1]
counts2 = quantiles2[1]
means1 = quantiles1[2]
means2 = quantiles2[2]
q1_95 = quantiles1[4]
q2_95 = quantiles2[4]
# Plot counts
plt.plot(x, counts1, color='b', label="Counts 1")
plt.plot(x, counts2, color='r', label="Counts 2")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel("Time buckets")
plt.ylabel("Throughput")
plt.show()
# Plot averages and 95th percentiles
plt.plot(x, means1, color='b', label="Means 1")
plt.plot(x, q1_95, color='c', label="95th Percentile 1")
plt.plot(x, means2, color='r', label="Means 2")
plt.plot(x, q2_95, color='m', label="95th Percentile 2")
# Hack to avoid duplicated labels (https://stackoverflow.com/questions/13588920/stop-matplotlib-repeating-labels-in-legend)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0.)
plt.xlabel("Time buckets")
plt.ylabel("Response times")
plt.show()
def compare_scenarios(sc1, sc2):
grp1 = sc1.grp
grp2 = sc2.grp
quantiles1 = minibatch_resp_times(5, grp1)
quantiles2 = minibatch_resp_times(5, grp2)
plot_counts_means_q95(quantiles1, quantiles2)
random.seed(123456)
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
print_results(**sc1.__dict__)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=5, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=5, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 9), server_range2=range(0, 9))
random.setstate(rand_state)
sc2a = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 7), server_range2=range(7, 9))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=720, weight1=1, weight2=1,
server_range1=range(0, 6), server_range2=range(6, 9))
compare_scenarios(sc1, sc2a)
compare_scenarios(sc1, sc2b)
users_curve = [(0, 900), (50, 540), (100, 900), (150, 540)]
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=users_curve, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(0, 10))
random.setstate(rand_state)
sc2 = simulate_deployment_scenario(num_users=users_curve, weight1=2, weight2=1,
server_range1=range(0, 8), server_range2=range(8, 10))
compare_scenarios(sc1, sc2)
rand_state = random.getstate()
sc1 = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 9), server_range2=range(0, 9))
random.setstate(rand_state)
sc2a = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 7), server_range2=range(7, 9))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=users_curve, weight1=1, weight2=1,
server_range1=range(0, 6), server_range2=range(6, 9))
compare_scenarios(sc1, sc2a)
compare_scenarios(sc1, sc2b)
rand_state = random.getstate()
sc1a = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 12), server_range2=range(0, 12))
random.setstate(rand_state)
sc2a1 = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 9), server_range2=range(9, 12))
random.setstate(rand_state)
sc2a2 = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(10, 12))
random.setstate(rand_state)
sc1b = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 13), server_range2=range(0, 13))
random.setstate(rand_state)
sc2b = simulate_deployment_scenario(num_users=864, weight1=2, weight2=1,
server_range1=range(0, 10), server_range2=range(10, 13))
compare_scenarios(sc1a, sc2a1)
compare_scenarios(sc1a, sc2a2)
compare_scenarios(sc1b, sc2b)
| 0.63114 | 0.869548 |
# Edit polygon
This notebook implements polygon editor which illustrates combining mouse event modalities with reference frames.
Click to start the polygon.
Type "." to drop a new vertex.
Click again to close the polygon.
Press the reset button to play again.
```
from jp_doodle import dual_canvas
from IPython.display import display
poly_edit = dual_canvas.SnapshotCanvas("editted polygon.png", width=320, height=320)
poly_edit.display_all()
poly_edit.js_init("""
// Add a light backdrop
var background = element.rect({name: "background", x:-15, y:-15, w:370, h:370, color:"#def"})
// Edit in cartesian area [-1:1] x [-1:1]
let frame = element.frame_region(20, 20, 300, 300,
-1, -1, 1, 1);
// Show axes for reference
frame.lower_left_axes({
x_anchor: 0,
y_anchor: 0,
tick_line_config: {color: "green"},
tick_text_config: {color: "blue"},
max_tick_count: 5
})
// Helpful informative text.
var info_div = $("<div>Useful information should show up here eventually.</div>").appendTo(element);
// Store polygon points here.
var polygon_points = [];
// Convenience to add a point to the polygon
var push_location = function (location) {
polygon_points.push([location.x, location.y]);
}
var poly = null;
var circ = null;
var start_polygon = function () {
debugger;
// wait for a mouse click to define the first point of the polygon.
frame.forget_objects(["poly", "circ"]); // forget graphical objects if they exist.
element.reset_events();
polygon_points = [];
// The graphical polygon to edit
poly = frame.polygon({
name: "poly", points: polygon_points, color:"red",
close:false, fill:false, lineWidth:5, events:false});
// A circle to track the mouse
circ = frame.circle({name: "circ", x:0, y:0, r:5, color:"magenta", hide:true, events:false});
var track_mouse = function(event) {
var frame_location = frame.event_model_location(event);
circ.change({x: frame_location.x, y: frame_location.y, hide: false});
};
background.on("mousemove", track_mouse);
var start_click = function(event) {
debugger
var frame_location = frame.event_model_location(event);
// store the initial point and a mouse tracking point
polygon_points = [];
push_location(frame_location); // first vertex
push_location(frame_location); // mouse tracking vertex
// switch modes to continue polygon after a short delay in case of duplicate mouse clicks
element.reset_events();
setTimeout(continue_polygon, 100);
};
background.on("click", start_click);
info_div.html("<div>Click to start polygon.</div>")
};
var continue_polygon = function () {
// space bar drops a point, mouse click completes the polygon
element.reset_events();
var track_mouse = function(event) {
// track the cursor
var frame_location = frame.event_model_location(event);
circ.change({x: frame_location.x, y: frame_location.y, hide: false});
// change the last polygon point to track the cursor
polygon_points.pop();
push_location(frame_location); // replace mouse tracking vertex
poly.change({points: polygon_points});
// set the focus to the canvas so the canvas can recieve keyboard events.
element.focus_canvas();
};
background.on("mousemove", track_mouse);
var dot_drops_point = function (event) {
// When the user hits "." drop a new vertex on the polygon
const dot_key_code = 190;
if (event.keyCode == dot_key_code) {
// 'keydown' events do not have locations. Duplicate the tracking vertex.
var vertex = polygon_points.pop();
polygon_points.push(vertex); // dropped location
polygon_points.push(vertex); // mouse tracking vertex
poly.change({points: polygon_points});
}
}
background.on("keydown", dot_drops_point);
var stop_click = function(event) {
// When the user "clicks" in "continue" mode, complete the polygon.
var frame_location = frame.event_model_location(event);
// switch modes to continue polygon
circ.change({hide: true});
// change the last polygon point to track the cursor
polygon_points.pop(); // remove previous mouse tracking vertex
polygon_points.push([frame_location.x, frame_location.y]);
poly.change({points: polygon_points, close: true});
element.reset_events();
info_div.html("<div>Polygon complete.</div>")
};
background.on("click", stop_click);
info_div.html("<div>Type '.' to add a vertex. Click to complete polygon.</div>");
};
//element.invisible_canvas.show();
start_polygon();
$("<button>Restart</button>")
.appendTo(element)
.click(start_polygon);
""")
```
|
github_jupyter
|
from jp_doodle import dual_canvas
from IPython.display import display
poly_edit = dual_canvas.SnapshotCanvas("editted polygon.png", width=320, height=320)
poly_edit.display_all()
poly_edit.js_init("""
// Add a light backdrop
var background = element.rect({name: "background", x:-15, y:-15, w:370, h:370, color:"#def"})
// Edit in cartesian area [-1:1] x [-1:1]
let frame = element.frame_region(20, 20, 300, 300,
-1, -1, 1, 1);
// Show axes for reference
frame.lower_left_axes({
x_anchor: 0,
y_anchor: 0,
tick_line_config: {color: "green"},
tick_text_config: {color: "blue"},
max_tick_count: 5
})
// Helpful informative text.
var info_div = $("<div>Useful information should show up here eventually.</div>").appendTo(element);
// Store polygon points here.
var polygon_points = [];
// Convenience to add a point to the polygon
var push_location = function (location) {
polygon_points.push([location.x, location.y]);
}
var poly = null;
var circ = null;
var start_polygon = function () {
debugger;
// wait for a mouse click to define the first point of the polygon.
frame.forget_objects(["poly", "circ"]); // forget graphical objects if they exist.
element.reset_events();
polygon_points = [];
// The graphical polygon to edit
poly = frame.polygon({
name: "poly", points: polygon_points, color:"red",
close:false, fill:false, lineWidth:5, events:false});
// A circle to track the mouse
circ = frame.circle({name: "circ", x:0, y:0, r:5, color:"magenta", hide:true, events:false});
var track_mouse = function(event) {
var frame_location = frame.event_model_location(event);
circ.change({x: frame_location.x, y: frame_location.y, hide: false});
};
background.on("mousemove", track_mouse);
var start_click = function(event) {
debugger
var frame_location = frame.event_model_location(event);
// store the initial point and a mouse tracking point
polygon_points = [];
push_location(frame_location); // first vertex
push_location(frame_location); // mouse tracking vertex
// switch modes to continue polygon after a short delay in case of duplicate mouse clicks
element.reset_events();
setTimeout(continue_polygon, 100);
};
background.on("click", start_click);
info_div.html("<div>Click to start polygon.</div>")
};
var continue_polygon = function () {
// space bar drops a point, mouse click completes the polygon
element.reset_events();
var track_mouse = function(event) {
// track the cursor
var frame_location = frame.event_model_location(event);
circ.change({x: frame_location.x, y: frame_location.y, hide: false});
// change the last polygon point to track the cursor
polygon_points.pop();
push_location(frame_location); // replace mouse tracking vertex
poly.change({points: polygon_points});
// set the focus to the canvas so the canvas can recieve keyboard events.
element.focus_canvas();
};
background.on("mousemove", track_mouse);
var dot_drops_point = function (event) {
// When the user hits "." drop a new vertex on the polygon
const dot_key_code = 190;
if (event.keyCode == dot_key_code) {
// 'keydown' events do not have locations. Duplicate the tracking vertex.
var vertex = polygon_points.pop();
polygon_points.push(vertex); // dropped location
polygon_points.push(vertex); // mouse tracking vertex
poly.change({points: polygon_points});
}
}
background.on("keydown", dot_drops_point);
var stop_click = function(event) {
// When the user "clicks" in "continue" mode, complete the polygon.
var frame_location = frame.event_model_location(event);
// switch modes to continue polygon
circ.change({hide: true});
// change the last polygon point to track the cursor
polygon_points.pop(); // remove previous mouse tracking vertex
polygon_points.push([frame_location.x, frame_location.y]);
poly.change({points: polygon_points, close: true});
element.reset_events();
info_div.html("<div>Polygon complete.</div>")
};
background.on("click", stop_click);
info_div.html("<div>Type '.' to add a vertex. Click to complete polygon.</div>");
};
//element.invisible_canvas.show();
start_polygon();
$("<button>Restart</button>")
.appendTo(element)
.click(start_polygon);
""")
| 0.456894 | 0.81582 |
# Frequentist & Bayesian Statistics With Py4J & PyMC3
-----
__[1. Introduction](#first-bullet)__
__[2. Sampling A Distribution Written In Scala Using Py4J](#second-bullet)__
__[3. The Maximum Likelihood Estimator](#third-bullet)__
__[4. Confidence Intervals From Fisher Information](#fourth-bullet)__
__[5. Bayesian Esimatators & Credible Intervals With PyMC3](#fifth-bullet)__
__[6. Connecting The Two Methods](#sixth-bullet)__
__[7. Conclusions](#seventh-bullet)__
## 1. Introduction <a class="anchor" id="first-bullet"></a>
-------------
In this post I want to go back to the basics of statistics, but with an advanced spin on things. By "advanced spin" I mean, both from in terms of mathematics and computational techniques. The topic I'll dive into is:
Estimating a single parameter value from a distribution and then quantifying the uncertantity in the estimate.
In general I will take two approaches to quantitfying the uncertainity in the estimate, the first of which is [frequentist](https://en.wikipedia.org/wiki/Frequentist_inference) and second that is [Bayesian](https://en.wikipedia.org/wiki/Bayesian_statistics). I was originally inspired by [Jake VanderPlas' post](http://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/) and admit that I am not very seasoned with Bayesian methods. That's why I'll be sticking to a simple example of estimating the mean rate or 𝜆 in a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution#) from sampled data.
From the computational perspective, I wanted to do something different and decided to write the probability distribution for generating the data in [Scala](https://www.scala-lang.org/), but then use it with Python. Why did I do this? Well, I like Scala and enjoyed the challenge of writing a Poisson distribution using a functional approach. I also wanted to learn more about how to use [Py4J](https://www.py4j.org/) which can be used to work with functions and objects in the [JVM](https://en.wikipedia.org/wiki/Java_virtual_machine) from Python. [Apache Spark](https://spark.apache.org/) actually uses Py4J in PySpark to write Python wrappers for their Scala API. I've used both PySpark and Spark in Scala extensively in the past and doing this project gave me an opportunity to understand how PySpark works better.
The source code for this project can be found [here](https://github.com/mdh266/BayesMLE).
Let's get into how I wrote the Poisson distribution in Scala and used it from Python to sample data.
## 2. Sampling A Distribution Written In Scala Using Py4J <a class="anchor" id="second-bullet"></a>
---------------
I wrote a [Poisson distribution in Scala](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonDistribution.scala) so that I could sample data from it to estimate the mean rate $\lambda$ of that distribution. The Poisson distribution is a probability distribution for a random variable $y \, \in \, \mathbb{Z}^{+}$ that represents some count phenomena, i.e. a number of non-negative integer occurences in some fixed time frame. For example the number of trains passing through a station per day or the number of customers that visit a website per hour can be modeled with Poisson distribution. The mathematical form of the distribution is,
$$ p(y \, = \, k) \; = \; \frac{\lambda^{k} e^{-\lambda} }{k!} $$
The parameter $\lambda \, \in \, \mathbb{R}^{+}$ is the rate variable, i.e. the true number of customers that visit the website per hour and can be any non-negative real valued number.
The first step in this project was to create the Poisson class. I did this in a previous [project](https://github.com/mdh266/PoissonDistributionInScala), however, one key difference is for that Py4J the return value of any public function in Scala/Java needs to be a Java object. Specifically for me the [sample](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/Poisson.scala) method needs to return a Java List of integers ([java.util.List[Int]](https://www.javatpoint.com/java-list)). I originally tried returning a [Scala List](https://www.scala-lang.org/api/current/scala/collection/immutable/List.html) which worked fine in pure Scala, but when returning the list to Python I got a generic "Java Object" and realized Py4J was only able to serialize specific datatypes between Python and the JVM.
In order to use [this class](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonDistribution.scala) from Python with Py4J I needed to do three things:
1. Create a [Gateway Server](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/Main.scala)
2. Create a [class entrypoint](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) to allow for setting the Poisson attributes
3. Package the code as a jar using a build tool such as [Maven](https://maven.apache.org/) or [SBT](https://www.scala-sbt.org/)
The first step is pretty straight forward to from the [Py4J Documentation](https://www.py4j.org/getting_started.html) and is in the [Main.Scala](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/Main.scala) object. However, in order to accommodate the use of [Docker]() I had to adapt the address for the [GatewayServer](https://www.py4j.org/_static/javadoc/index.html?py4j/GatewayServer.html) based on this [discussion on GitHub](https://github.com/bartdag/py4j/issues/360):
import java.net.InetAddress
import py4j.GatewayServer
object Main {
def main(args: Array[String]) = {
System.setProperty("java.net.preferIPv4Stack", "true");
val addr = InetAddress.getByName("0.0.0.0")
val app = new PoissonEntryPoint()
val builder = new GatewayServer.GatewayServerBuilder(app)
builder.javaAddress(addr);
val server = builder.build();
server.start()
println("Gateway Server Started")
}
}
The [GatewayServer](https://www.py4j.org/_static/javadoc/py4j/GatewayServer.html) in the author's own words *it allows Python programs to communicate with the JVM through a local network socket.* The GatewayServer takes an *entrypoint* as a parameter which can be any object (see [here](https://www.py4j.org/getting_started.html#writing-the-python-program) for more info). However, the entrypoint doesn't really offer a way for us to pass the $\lambda$ value from [Python](https://www.py4j.org/getting_started.html#writing-the-python-program) to the Poisson constructor in Scala. To get around this issue I created a [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) case class:
case class PoissonEntryPoint() {
def Poisson(lambda : Double) : PoissonDistribution = {
new PoissonDistribution(lambda)
}
}
This case class really just acts a [Singleton](https://docs.scala-lang.org/tour/singleton-objects.html), but is a class instead of an object. The point of the `PoissonEntryPoint` class is simply to be able to create a Poisson class with a specific $\lambda$ value after starting the GatewayServer.
Now let's talk about how the project is structured and how to package it for use. The project structure is:
src/
main/
scala/
Main.scala
PoissonDistribution.scala
PoissonEntryPoint.scala
pom.xml
The `pom.xml` file is called the [project object model](https://maven.apache.org/guides/introduction/introduction-to-the-pom.html) and is a file that contains all the instructions for [Maven](https://maven.apache.org/). I won't go into the details here, but I will say that Maven is a Java build tool to compile and package code and [SBT](https://www.scala-sbt.org/) is the Scala equivalent build tool. Since Scala is a [JVM language](https://en.wikipedia.org/wiki/List_of_JVM_languages) we can use either build tool and I went with Maven since I'm more familiar with it and because it was much easier to find examples with Py4J using Maven than with SBT.
To package the code into a [uber jar](https://stackoverflow.com/questions/11947037/what-is-an-uber-jar), use the command:
mvn package
Then we can start our our Py4J Web server with the command:
java -jar target/poisson-1.0-jar-with-dependencies.jar
We can test that the server is running on the default port 25333 on your local machine with the command,
nc -vz 0.0.0.0 25333
and you should see,
Connection to 0.0.0.0 port 25333 [tcp/*] succeeded!
Now we can start up our Jupyter notebook and connect Python to the JVM with the following code taken directly from [Py4J's](https://www.py4j.org/index.html#) home page. This involves setting up the [JavaGatway](https://www.py4j.org/py4j_java_gateway.html) which is the *main interaction point between a Python VM and a JVM*. When running on your local machine this is simple, however, in order to use the my Poisson Distribution and Jupyter Lab within [Docker Compose](https://docs.docker.com/compose/) I had to pass the appropriate [GatewayParameters](https://www.py4j.org/py4j_java_gateway.html#py4j.java_gateway.GatewayParameters) which specify the address for the Scala [GatewayServer](https://www.py4j.org/_static/javadoc/py4j/GatewayServer.html) (the `py4jserver` service in Docker compose) and the port it uses. In addition, I had to pass the [CallbackServerParameters](https://www.py4j.org/py4j_java_gateway.html#py4j.java_gateway.CallbackServerParameters) which specify the address for this notebook (the `jupyter` service in Docker compose) as well as the port it uses.
The callback server allows the JVM to call back Python objects as discussed [here](https://www.py4j.org/advanced_topics.html#implementing-java-interfaces-from-python-callback). I definitely had to have a friend that knows DevOps to help figure this one out, but it doesnt add too much complexity to the basic Py4J example:
```
from py4j.java_gateway import JavaGateway, GatewayParameters, CallbackServerParameters
gateway = JavaGateway(
gateway_parameters=GatewayParameters(address='py4jserver', port=25333),
callback_server_parameters=CallbackServerParameters(address='jupyter', port=25334)
)
app = gateway.entry_point
```
The app is now the instantiated [PoissonEntryPoint](https://github.com/mdh266/BayesMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) class. We can see the class type in Python
```
type(app)
```
As well as looking at the methods for the class:
```
dir(app)
```
We can see `Poisson` class method! Since PoissonEntryPoint is a [case class](https://docs.scala-lang.org/tour/case-classes.html) it comes with a number of default methods just like a [data class](https://realpython.com/python-data-classes/) in Python.
We can then create a Poisson class instance and see that the value of $\lambda$ is 1.0:
```
p1 = app.Poisson(1.0)
```
We can then instantiate another Poisson object:
```
p2 = app.Poisson(3.0)
```
Note that the [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) class has a function `Poisson` that returns a specific [PoissonDistribution](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) object that was initailized with the value $\lambda$. It is important that $\lambda$ is not an attribute of the [PoissonEntryPoint](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonEntryPoint.scala) othwerwise we would not get the seperate values of $\lambda$'s:
```
p1.getLambda()
p2.getLambda()
```
The really nice thing about Py4J *is that you can treat objects in the JVM as if they are Python objects.* For instance we can see the methods in the object:
```
dir(p1)
```
We can then just use the methods in the [PoissonDistribution](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) object just like they would be used directly in Scala. For instance we can get the probability of $y=1$ when $\lambda = 1$:
```
p1.prob(1)
```
Now let's generate a random samle from the Poisson object:
```
sample = p1.sample(1000)
sample[:3]
```
It looks like Py4J returns a Python list while the [PoissonDistribution class](https://github.com/mdh266/BayesBootstrapMLE/blob/main/src/main/scala/PoissonDistribution.scala) returns a `java.util.List[Int]`:
```
type(sample)
```
We can then convert it to a Python list
```
type(list(sample))
```
As mentioned previously, Py4J can only serialize specific Java objects back to Python, but I think that's still awesome! This is also why I needed to convert to from a Scala `List[Int]` to a `java.util.List[Int]`; without it the returned object would just be a generic `Java Object` and I wouldnt be able to access its contents.
Now let's visualize the Poission distribution for different values of $\lambda$
```
import pandas as pd
import seaborn as sns
df = pd.melt(
pd.DataFrame({
'1':list(app.Poisson(1.0).sample(100)),
'2':list(app.Poisson(2.0).sample(100)),
'3':list(app.Poisson(3.0).sample(100)),
'4':list(app.Poisson(4.0).sample(100)),
'5':list(app.Poisson(5.0).sample(100)),
'6':list(app.Poisson(6.0).sample(100))
}),
var_name=["lambda"]
)
sns.displot(df, x="value", hue="lambda", kind='kde', height=5)
```
Note that the negative values are not real, but an artifact caused by interpolation with [Kernel Density Esimation](https://en.wikipedia.org/wiki/Kernel_density_estimation). The same is true with the wiggles in the distribution.
We can verify this the former,
```
df.query("value < 0")
```
Now let's get into the Maximum Likelihood Estimator for $\lambda$ using the distribution `p1`.
## 3. The Maximum Likelihood Estimator <a class="anchor" id="third-bullet"></a>
----------
First what is the difference between a statistic and an estimator? A **statistic** is any function of a sample. An **estimator** is any function of a sample that is used to estimate a population parameter. The **maximum likelihood estimator** is the value of a population distribution $\lambda$ that maximizes the probability of observing the sample.
We can find the MLE from a independent, identically distributed sample $y_1, y_2, \, \ldots \,, y_{n}$ from $f(y \, \vert \, \lambda)$ by defining the **likelihood function**,
$$ L(\lambda \, \vert \, y_1, \, y_2, \, \ldots, \, y_n) \; = \; \prod_{i=1}^{n}f(y_{i} \, \vert \, \lambda) $$
Then we can find the MLE $\widehat{\lambda}$ such that,
$$
\hat{\lambda}_{n} \; = \; \max_{\lambda} \, L(\lambda \, \vert \, y_1, y_2, \ldots, \, y_n)
$$
From calculus we know that we can find the maximum (or minimum) of any function by solving,
$$
\frac{\partial L(\lambda \, \vert y_1, y_2, \ldots, y_n)}{\partial \lambda} \; = \; 0
$$
for $\lambda$. The MLE has many important properties, the most important in my mind are some are,
1. It is a consistent estimator.
2. It is invariant, so that if $\widehat{\lambda}$ is the MLE for $\lambda$, then for any function $\tau(\lambda)$, the MLE for $\tau(\lambda)$ is $\tau(\widehat{\lambda})$.
3. The MLE is an asymptotically normal estimator. That is $\widehat{\lambda} \; \sim \; N(\lambda, \, \mathcal{I}^{-1})$.
To explain the first property we, must note that since an estimator is a function of the sample space, it is also a [random variable](https://en.wikipedia.org/wiki/Random_variable). Let $X_1, \, \ldots, \, X_n$ be a sequence of random variables then $X_{i} \; \xrightarrow{\mathcal{P}} \; X$ if,
$$\forall \epsilon > 0, \; \lim\limits_{n \rightarrow \infty} P(\vert X_i - X \vert > \epsilon ) \; = \; 0$$
then we say the random variable [converges in probability](https://en.wikipedia.org/wiki/Convergence_of_random_variables). For an estimator this property of convergence is called **consistency**. Consistency is a necessary condition of any estimator in statistics and basically signifies that estimator eventually settles down to constant or some distribution of values.
The second property of the MLE allows us to transform our likelihood function into one that is often easier to calculate the MLE with, i.e. the log-likelihood function. That is the MLE, $\hat{\lambda}$ will satisfy,
$$ \frac{\partial \log(L(\lambda \, \vert y_1, \, \ldots, y_n ))}{\partial \lambda} \; = \; 0 $$
The third property of the MLE, of asymptotic normality, is helpful in modeling since your standardized residuals are normal. Hence the sum of squares of the residuals are $\chi^2$ distributed. This allows us to define confidence intervals around of estimates. The term $\mathcal{I}$ is the [Fisher Information](https://en.wikipedia.org/wiki/Fisher_information) and will be discussed in the next section.
For the Poisson distribution the likelihood function is,
$$
\begin{align} L(\lambda \, \vert y_1, \, y_2, \, \ldots, \, y_n)
\; &= \;
\prod_{i=1}^{n} \frac{e^{-\lambda} \, \lambda^{y_i}} {y_i!} \newline
&= \;
e^{-n \, \lambda} \lambda^{n \, \bar{y}_{n}} \frac{1} {\prod_{i=1}^{n} y_i!}
\end{align}
$$
We take the log of both sides and then setting the derivative equal to zero we find
$$
- n \, + \, \frac{n \, \bar{y}_{n}}{\widehat{\lambda}} \, = \, 0
$$
Then solving for $\hat{\lambda}$ we find the MLE is,
$$ \widehat{\lambda} \, = \, \bar{y}_{n} $$
```
from typing import List
def mle(sample: List[int]) -> float:
converted = list(sample)
return sum(converted) / len(converted)
lam = mle(p2.sample(1000))
print(f"lambda = {lam}")
```
Our estimate for $\lambda$ is pretty close to the true value of 3 which is correct for `p2`!
Now, since the maximum likelihood estimator is the mean we know it satifies the [Central Limit Theorem](https://en.wikipedia.org/wiki/Central_limit_theorem),
$$ \hat{\lambda} _{n}\, = \, \bar{y}_{n} \; \xrightarrow{\mathcal{D}} \; N(\lambda,\lambda/n) $$
Hence we can repeatedly sample `p2` and compute the distribution of the MLE for various values of sample size $n$ to show how the MLE converges in distribution.
```
import numpy as np
# sample the MLE 100 times for each n = 10, 50, 100, 500, 1000
samples = [ [ mle(p2.sample(n)) for k in range(100)] for n in [10, 20, 50, 100, 200, 500, 1000]]
sample_df = pd.melt(
pd.DataFrame(np.array(samples).T,
columns=['10', '20', '50','100', '200', '500', '1000']),
var_name=["n"]
)
# # plot the MLE for various value of sample size
sns.displot(sample_df, x="value", hue="n", kind="kde", height=5,)
```
As $n \rightarrow \infty$ we see the MLE $\bar{y}_{n}$ has a distribution that is more sharply peaked around $3$ and hence shows that the esimator is converging to the true value!
We have seen that the MLE $\hat{\lambda}_{n}$ converges to the true value of $\lambda$, but for any finite value of $n$ the esimator can be incorrect. How do we measure our confidence in our estimae $\hat{\lambda}_{n}$? The answer is using [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval).
## 4. Confidence Intervals From Fisher Information <a class="anchor" id="fourth-bullet"></a>
-------------------
Given a distribution $y_{i} \, \sim \, f(y \, \vert \, \alpha)$ for $i \, = \, 1, \ldots , n$ and the likelihood function,
$$ L(\alpha \, \vert \, y_1, y_2, \ldots, y_n) \; = \; \prod_{i=1}^{n} \, f(y_i \, \vert \, \alpha) $$
We define the [score statistic](https://en.wikipedia.org/wiki/Score_(statistics)) $S(\alpha)$ to be,
$$
S(\alpha) \; = \; \frac{d\log \left(L(\alpha \, \vert \, y_1, y_2, \ldots, y_n) \right)}{d\alpha}
$$
For a distribution from the [exponential family](https://en.wikipedia.org/wiki/Exponential_family) the score function satisfies,
$$
E_{y}[S] \; = \; 0
$$
Another important property of the score statistic is that it satisfies,
$$ S(\widehat{\alpha}) \; = \; 0$$
for the MLE $\widehat{\alpha}$. This property can be used to compute the MLE using the so-called [scoring algorithm](https://en.wikipedia.org/wiki/Scoring_algorithm) which is equivalent to [Newton–Raphson method](https://en.wikipedia.org/wiki/Newton%27s_method). The later method is more frequently used in calculations as it is a general optimization method and has many efficient implmentations.
The [Fisher information](https://en.wikipedia.org/wiki/Fisher_information) is defined as,
\begin{align}
\mathcal{I} \; &= \; \text{Cov}[S] \\
& = \; \text{Var}[ S S^{T}] \\
& = \; E_y[ - S'] \\
&= \; - E_{y} \left[ \frac{d^{2}\log \left(L(\alpha \, | \, y_1, y_2, \ldots, y_n) \right)}{d\alpha^{2}} \right]
\end{align}
One can show that the standard error for the maximum likelihood estimate $(\widehat{\alpha})$ will then be,
$$ \text{S.E.}(\widehat{\alpha}) \; = \; \mathcal{I}^{-1/2}$$
The curvature of the log-likelihood at the MLE is dictated by the Fisher information. If $L$ flat at the MLE then the $\mathcal{I}$ is small and the MLE is not stable or well-defined. Higher Fisher information at the MLE means the distribution is highly peaked and implies the MLE is well defined and stable.
As previously mentioned the MLE asymptotically normal which tells us mathematically that,
\begin{equation}
\widehat{\alpha} \; \sim \; N(\alpha, \, \mathcal{I}^{-1})
\end{equation}
These facts can be used to calculate confidence intervals for the MLE,
$$\text{CI}_{\alpha} \, = \, [ \widehat{\alpha} - Z_{\alpha/2} \, \mathcal{I}^{-1}/\sqrt{n}, \, \widehat{\alpha} + Z_{\alpha/2} \, \mathcal{I}^{-1} /\sqrt{n}]$$
The Fisher information for a Poisson distribution is,
$$ \mathcal{I} \, = \, 1/\lambda $$
This means for our MLE of the Poisson distribution the confidence interval will be:
$$\text{CI}_{\hat{\lambda}} \, = \, [ \bar{y}_{n} - Z_{\alpha/2} \, \sqrt{\bar{y}_{n} / n}, \, \bar{y}_{n} + Z_{\alpha/2} \, \sqrt{\bar{y}_{n}/ n}]$$
We can then come up with a functin to compute the 94% confidence interval (most people choose 95%, but to be consistent with [PyMC3](https://docs.pymc.io/) we use 94%) for the sample:
```
from typing import Tuple
def ci(sample: List[int]) -> Tuple[float,float]:
"""
Computes the 94% confidence interval for sampled
data from a Poisson distribution
"""
z = 1.88
m = mle(sample)
n = len(sample)
return (m - z*np.sqrt(m/n), m + z*np.sqrt(m/n))
```
We can then get the MLE for the sampled data from `Poisson(1.0)`,
```
mle(sample)
```
The 94% confidence interval is then,
```
ci(sample)
```
We can see that the confidence interval does contain the true $\lambda \, = \, 1$. *Many people think a 94% confidence interval, $\text{CI}_{94}$, can be used to say that there is a 94% probability that the true $\lambda$ is in the confidence interval $\text{CI}_{94}$. This interpetation is wrong, in frequentist methods, the parameter $\lambda$ is assumed to be an unknown fixed value. One cannot make probability statements about fixed values.*
*The confidence interval is a function of the sample space and therefore a random variable. One can make probability statements about the confidence intervals. Indeed the correct interpretation is that if you are able to repeatedly re-sample the population distribution $f(y \, \vert \, \lambda)$ to form many confidence intervals $\text{CI}_{94}$, 94% of them would contain the true population parameter $\lambda$.*
We can test this by creating a function which returns a boolean indicating whether or not the parameter `lam` for $\lambda$ is contained in the 94% confidence interval from the data `sample`:
```
def in_ci(lam: float, sample: List[int]) -> bool:
interval = ci(sample)
return (lam >= interval[0] and lam <= interval[1])
```
We can then test this function,
```
in_ci(1, sample)
in_ci(3, sample)
```
We can loop over 1,000 confidence intervals to see how many times they capture the true rate parameter,
```
count_in_ci = [1 if in_ci(1, p1.sample(1000)) else 0 for i in range(1000)]
print("Confidence interval captures true rate {}% of times".format(100*sum(count_in_ci)/len(count_in_ci)))
```
This is nearly spot on to what the theory says! Let's now move on to Bayesian methods!
## 5. Bayesian Esimatators & Credible Intervals With PyMC3 <a class="anchor" id="fifth-bullet"></a>
-----------
In the frequentist approach the parameter we wish to estimation $\lambda$ is fixed, but unknown. The observed data $\left\{ y_1, y_2, \ldots, y_n \right\}$ is assumed to be from a population $f(y \, \vert \, \lambda)$ and estimates about the value of the population paremeter $\lambda$ is obtained by using the maximum likelihood. As we discussed above, probability statements about the unknown rate constant $\lambda$ don't make sense as its a fixed value and not a random variable. However, probability statements can be made about a confidence interval for $\lambda$. In the maximum likelihood method, asympotic normality allows us to use confidence intervals as a way to quantify the uncertaintity in our estimator.
In contrast, in Bayesian statistics, $\lambda$ is not a fixed value, but assumed to have values coming from a probability distribution called the [prior](https://en.wikipedia.org/wiki/Prior_probability) $P(\lambda)$. This is often subjective and the choice of distribution for the prior often comes from domain knowledge. The observed data $\left\{ y_1, y_2, \ldots, y_n \right\}$ and samples from the prior taken to evaluate the likelihood of the [posterior distribution](https://en.wikipedia.org/wiki/Posterior_distribution) model, $P(\lambda \, \vert \, y_1, y_2, \ldots, y_n )$. **Now we can formulate estimators for $\lambda$ and quanitify the uncertaintity in those esimates by directly using the posterior distribution.**
Let $P(y_1, y_2, \ldots, y_n \, \vert \, \lambda )$ be the sampling distribution then [Baye's theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem) states,
$$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n)
\; = \;
\frac{ P ( y_1, y_2, \ldots, y_n \, \vert \, \lambda) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)}, \quad \text{and} \qquad m(y_1, y_2, \ldots, y_n) \; = \; \int P ( y_1, y_2, \ldots, y_n\, \vert \, \lambda) \, P(\lambda) \, d\lambda
$$
Where $m(y_1, y_2, \ldots, y_n)$ is called the marginal distribution and used for normalization. Another way to rewrite Baye's formula is in terms of the Likelihood functions, $L(\lambda \, \vert \, y_1, y_2, \ldots, y_n)$
$$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n)
\; = \;
\frac{ L(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)}
$$
The [Bayesian estimator](https://en.wikipedia.org/wiki/Bayes_estimator) (often called the posterior mean) is taken the be the expected value over the the random variable $y$,
$$ \widehat{\lambda} \; = \; E_{\lambda \, \vert \, y}(\lambda \, \vert \, y_1, y_2, \ldots, y_n)$$
Until the advant of computers, statisticians were stuck with using [conjugate priors](https://en.wikipedia.org/wiki/Conjugate_prior) with Bayesian methods since there are analytic solutions for the posterior distribution.
The conjugate prior for a Poisson distribution is a [Gamma distributed](https://en.wikipedia.org/wiki/Gamma_distribution) which for $\alpha, \beta > 0$ takes the form,
$$
P(\lambda \, \vert \, \alpha, \beta ) \; = \; \frac{\beta^{\alpha }}{(\alpha-1)! } \, x^{\alpha-1} \, e^{-\beta y}
$$
A depiction of the Gamma distribution for various values of $\alpha$ and $\beta$ can be seen from [PyMC3's website](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Gamma).

Then the posterior is [again a Gamma distribution](https://people.stat.sc.edu/Hitchcock/slides535day5spr2014.pdf), but with $\alpha^{\prime} \, = \, \alpha \, + \, n$ and $\beta^{\prime} \, = \, \bar{y}_{n}, \, \beta \, + \, n$. This leads to a posterior mean,
$$\hat{\lambda} \; = \; \frac{\alpha}{\beta \, + \, n}
\, + \, \frac{ \bar{y}_{n}}{1 \, + \, \beta \, / \, n\, }$$
We can see that with little data (small $n$) our estimate we are closer to the prior mean ($\frac{\alpha}{\beta}$) while with lots of data (large $n$) we move towards the average $\bar{y}_{n}$.
Let's see this convergence for ourselves! We can define the posterior mean function:
```
def posterior_mean(alpha: float, beta: float , sample: List[int]) -> float:
n = len(sample)
m = sum(sample) / n
return alpha / (beta + n) + m / (1 + beta / n)
```
Then define a 1,000 random samples of various sizes:
```
from random import randint
nums_samples = [randint(2,1000) for i in range(1000)]
nums_samples.sort()
```
We can the calculate and plot the posterior mean for the [Poisson-Gamma model](https://people.stat.sc.edu/Hitchcock/slides535day5spr2014.pdf) using the data and $\alpha \, = \, 1$ and $\beta \, = \, 3$,
```
alpha = 3.0
beta = 1.0
samples = [ posterior_mean(alpha=alpha, beta=beta, sample = p1.sample(n))
for n in nums_samples]
(pd.DataFrame({
"posterior_mean":samples,
"sample_size":nums_samples
}).plot(x="sample_size",
y="posterior_mean",
title="Covergence of Posterior Mean",
ylim=(0,1.75)))
```
Now let's talk about the confidence of this estimate. The anology of confidence in Bayesian esimation is called the [credible Interval](https://en.wikipedia.org/wiki/Credible_interval) which requires the full posterior. I wrote a function to plot the posterior for the sample from `p1` below,
```
from gamma import posterior_distribution
posterior_distribution(alpha, beta, sample)
```
As we saw the Bayesian estimator requires the full posterior distribution. Without a conjugate prior Bayesian methods requires numerical approximation to the posterior which computationally expenisve. Despite the added complexity, Bayesian methods allow us to handle situations where we might not have much data and can often lead us to estimates with smaller variance.
One approach to approximating the posterior distribution is to randomly sample the the prior distribution and then evaluate the likelihood of that prior value and the data using Bayes rule,
$$ P(\lambda \, \vert \, y_1, y_2, \ldots, y_n)
\; = \;
\frac{ L(\lambda \, \vert \, y_1, y_2, \ldots, y_n) \, P(\lambda)}{m(y_1, y_2, \ldots, y_n)}
$$
Repeatedly sampling the prior and evaluating the likelihood multiple times gives us a good approximation to the posterior distribution. Once we have the posterior distribution we can then evaluate the expected value of $\lambda$. A common method for generating the random samples of the prior above is through [Markov Chain Monte Carlo Methods](https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo). Bayesian methods with MCMC can be used easily with [PyMC](https://docs.pymc.io/)!
We begin by importing the library
```
import pymc3 as pm
```
Then define the model as the same Poisson-Gamma above, and sample it 5,000 times to get the expected mean:
```
with pm.Model() as model_1:
λ = pm.Gamma('λ', alpha=3, beta=1)
y = pm.Poisson('y', mu=λ, observed=list(sample))
trace = pm.sample(5000, tune=2000, return_inferencedata=True)
```
We can then view the posterior distribution using the [ArviZ](https://arviz-devs.github.io/arviz/) library,
```
import arviz as az
az.plot_posterior(trace)
```
The results are the same as before with the analytical posterior.
*We should note that **Bayesian estimators are ALWAYS biased due to their choice of prior**, however, they can reduce the variance in our estimators.* This is will become evident in the next example where we show another area where Bayesian method shine is when you have a limited amount of data, but a lot of domain knowledge.
Say we only have 20 sample points, we can calculate the MLE,
```
mle(sample[-20:])
```
Not too bad! However, the confidence interval is quite large,
```
ci(sample[-20:])
```
Let's define our model to be a Poisson-Exponential model where the prior distribution is an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution) pictured below from [PyMC3's site](https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Exponential):

It's unfortunate that these two distributions both use $\lambda$ for their paramaters, but I will do my best to make it clear which $\lambda$ I refer to. Using a larger $\lambda$ in the exponential prior gives us a smaller sample space while a smaller $\lambda$ in the exponential prior gives us a larger sampling space. Let's choose a define the prior $\exp(-1)$ and sample it using MCMC methods 500 times. We can then plot the posterior and the sampling space using the [plot_trace](https://arviz-devs.github.io/arviz/api/generated/arviz.plot_trace.html) method,
```
with pm.Model() as model_2:
λ = pm.Exponential('λ', 1)
y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1]))
trace2 = pm.sample(500, tune=2000, cores=2, return_inferencedata=True)
az.plot_trace(trace2, var_names=['λ'])
```
We can then calculate the expected value of the posterior and the credible region,
```
az.summary(trace2, kind="stats")
```
The values are nearly the same as MLE.
One thing to note is that we can see that the posterior isn't very well defined using only 500 MCMC steps. We can see that there seems to be some [autocorrelation in the sample space](https://www.coursera.org/lecture/introduction-to-pymc3/autocorrelation-and-effective-sample-size-YSW3x).
```
az.plot_autocorr(trace2)
```
Let's bump the number of samples up to 10,000 to see how the posterior distribution looks,
```
with model_2:
trace3 = pm.sample(10000, tune=2000, cores=2, return_inferencedata=True)
az.plot_trace(trace3, var_names=['λ'])
```
We can see the posterior is pretty well defined, with a little skew right. Let's get the expected mean and credible interval,
```
az.summary(trace3, kind="stats")
```
The Bayesian in method in this case isnt much better than the MLE, but credible interval is more narrow than the confidence interval.
We can also see the mode of the posterior distribution is nearly directly over 1.0, which is the correct value for our parameter.
Using the posterior mode as an estimator is called the [Maximum A-Posteriori (MAP)](https://en.wikipedia.org/wiki/Maximum_a_posteriori_estimation) and we can see the calculated value below,
```
pm.find_MAP(model=model_2)
```
That's really good for only 20 data points!!
## 6. Connecting The Two Methods <a class="anchor" id="sixth-bullet"></a>
------
One way we can connect Bayesian methods with the MLE is by choosing a constant prior $C$ or uniform ($U(\theta)$ so long $\theta \, \geq \, \lambda$). Then MAP is the same as the MLE:
$$ \max_{\lambda} \, P(\lambda \, \vert \, y_1, y_2, \ldots, y_n)
\; = \;
\frac{1}{m(y_1, y_2, \ldots, y_n)} \, \max_{\lambda} \, L(\lambda \, \vert \, y_1, y_2, \ldots, y_n)
$$
We can show with PyMC3 by choosing the prior as a $U(10)$:
```
with pm.Model() as model_3:
λ = pm.Uniform('λ', lower=0, upper=10)
y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1]))
pm.find_MAP(model=model_3)
```
This the the same value as the MLE!
The [Bernstein-von Miss Theorem](
https://en.wikipedia.org/wiki/Bernstein%E2%80%93von_Mises_theorem) shows rigorously that in the limit of large data Bayesian estimators and Maximum Likelihood estimators converge to the same thing.
## 7. Conclusions <a class="anchor" id="seventh-bullet"></a>
In this post I discussed frequentist and Bayesian estimation techniques applied to data from a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) and covered how to quantity the uncertaintity in each method. I showed how to sample a probability distribution written in Scala from Python using [Py4J](https://www.py4j.org/). For frequentist methods I covered [maximum likelihood estimation](https://en.wikipedia.org/wiki/Maximum_likelihood_estimation), its convergence and quantifying uncertaintity with confidence intervals using the [Fisher information](https://en.wikipedia.org/wiki/Fisher_information). I also covered using Bayesian estimators using [PyMC3](https://docs.pymc.io/) and quantifying their uncertaintity with [credible intervals](https://en.wikipedia.org/wiki/Credible_interval) using [ArviZ](https://arviz-devs.github.io/arviz/). Finally we showed the connection between the maximum likelihood esimators and Bayesian estimators by choosing a [flat prior](https://stats.stackexchange.com/questions/124753/what-are-examples-of-flat-priors#:~:text=The%20term%20%22flat%22%20in%20reference,c%20over%20the%20real%20line.)
I leared a lot in creating this post and hope you enjoyed it!
|
github_jupyter
|
from py4j.java_gateway import JavaGateway, GatewayParameters, CallbackServerParameters
gateway = JavaGateway(
gateway_parameters=GatewayParameters(address='py4jserver', port=25333),
callback_server_parameters=CallbackServerParameters(address='jupyter', port=25334)
)
app = gateway.entry_point
type(app)
dir(app)
p1 = app.Poisson(1.0)
p2 = app.Poisson(3.0)
p1.getLambda()
p2.getLambda()
dir(p1)
p1.prob(1)
sample = p1.sample(1000)
sample[:3]
type(sample)
type(list(sample))
import pandas as pd
import seaborn as sns
df = pd.melt(
pd.DataFrame({
'1':list(app.Poisson(1.0).sample(100)),
'2':list(app.Poisson(2.0).sample(100)),
'3':list(app.Poisson(3.0).sample(100)),
'4':list(app.Poisson(4.0).sample(100)),
'5':list(app.Poisson(5.0).sample(100)),
'6':list(app.Poisson(6.0).sample(100))
}),
var_name=["lambda"]
)
sns.displot(df, x="value", hue="lambda", kind='kde', height=5)
df.query("value < 0")
from typing import List
def mle(sample: List[int]) -> float:
converted = list(sample)
return sum(converted) / len(converted)
lam = mle(p2.sample(1000))
print(f"lambda = {lam}")
import numpy as np
# sample the MLE 100 times for each n = 10, 50, 100, 500, 1000
samples = [ [ mle(p2.sample(n)) for k in range(100)] for n in [10, 20, 50, 100, 200, 500, 1000]]
sample_df = pd.melt(
pd.DataFrame(np.array(samples).T,
columns=['10', '20', '50','100', '200', '500', '1000']),
var_name=["n"]
)
# # plot the MLE for various value of sample size
sns.displot(sample_df, x="value", hue="n", kind="kde", height=5,)
from typing import Tuple
def ci(sample: List[int]) -> Tuple[float,float]:
"""
Computes the 94% confidence interval for sampled
data from a Poisson distribution
"""
z = 1.88
m = mle(sample)
n = len(sample)
return (m - z*np.sqrt(m/n), m + z*np.sqrt(m/n))
mle(sample)
ci(sample)
def in_ci(lam: float, sample: List[int]) -> bool:
interval = ci(sample)
return (lam >= interval[0] and lam <= interval[1])
in_ci(1, sample)
in_ci(3, sample)
count_in_ci = [1 if in_ci(1, p1.sample(1000)) else 0 for i in range(1000)]
print("Confidence interval captures true rate {}% of times".format(100*sum(count_in_ci)/len(count_in_ci)))
def posterior_mean(alpha: float, beta: float , sample: List[int]) -> float:
n = len(sample)
m = sum(sample) / n
return alpha / (beta + n) + m / (1 + beta / n)
from random import randint
nums_samples = [randint(2,1000) for i in range(1000)]
nums_samples.sort()
alpha = 3.0
beta = 1.0
samples = [ posterior_mean(alpha=alpha, beta=beta, sample = p1.sample(n))
for n in nums_samples]
(pd.DataFrame({
"posterior_mean":samples,
"sample_size":nums_samples
}).plot(x="sample_size",
y="posterior_mean",
title="Covergence of Posterior Mean",
ylim=(0,1.75)))
from gamma import posterior_distribution
posterior_distribution(alpha, beta, sample)
import pymc3 as pm
with pm.Model() as model_1:
λ = pm.Gamma('λ', alpha=3, beta=1)
y = pm.Poisson('y', mu=λ, observed=list(sample))
trace = pm.sample(5000, tune=2000, return_inferencedata=True)
import arviz as az
az.plot_posterior(trace)
mle(sample[-20:])
ci(sample[-20:])
with pm.Model() as model_2:
λ = pm.Exponential('λ', 1)
y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1]))
trace2 = pm.sample(500, tune=2000, cores=2, return_inferencedata=True)
az.plot_trace(trace2, var_names=['λ'])
az.summary(trace2, kind="stats")
az.plot_autocorr(trace2)
with model_2:
trace3 = pm.sample(10000, tune=2000, cores=2, return_inferencedata=True)
az.plot_trace(trace3, var_names=['λ'])
az.summary(trace3, kind="stats")
pm.find_MAP(model=model_2)
with pm.Model() as model_3:
λ = pm.Uniform('λ', lower=0, upper=10)
y = pm.Poisson('y', mu=λ, observed=list(sample[-20:-1]))
pm.find_MAP(model=model_3)
| 0.822046 | 0.931774 |
<br>
**<font face="calibri" color="black" size="6">Data Exploration and Prediction of House Price</font>**
<br><br>
**<font face="calibri"size="4" color="black" >July 2017</font>** <br> <br> <br> <br>
**<font face="calibri" color="blue" size="5">Part I Introduction</font>**
<br><br>
**<font face="calibri" size="4" color="black" >Goal</font>**
<br><br><font face="calibri" color="black" size="4">Predict the sale price of houses. </font>
<br><br><br>
**<font face="calibri" size="4"color="black" >Dataset Description</font>**
<br><br><font face="calibri" color="black" size="4">In this dataset, there are 1460 observations with 79 explanatory variables describing (almost) every aspect of residential homes in Ames, Iowa. Among explanatory variables, there are 37 integer variables, such as Id, MSSubClass, LotFrontage, and 43 factor variables, such as MSZoning, Street, LotShape. Descriptive analysis and quantitative analysis will use subsets of it depending on models.</font>
<br><br>
<font face="calibri" color="black" size="4">First part of this report: <b>Descriptive and Exploratory Analysis</b>
<br><br>Second part of this report: <b>Predictive Analysis. </b></font>
<img src="http://sturdyhome.com/wp-content/uploads/2013/09/House-images-2013-3.jpg
">
**<font face="calibri" color="blue" size="5">Part II Data Assessment</font>**
**<font face="calibri" color="blue" size="4">2.1 Import Data</font>**
```
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(gplots)
library(repr)
# Change plot size to 9 x 6
options(repr.plot.width=9, repr.plot.height=6)
list.files("../input")
train <- read.csv("../input/train.csv")
```
**<font face="calibri" color="blue" size="4">2.2 Check Missing Data</font>**
<font face="calibri" color="black" size="4">We find that every row in this dataset has missing value, and we will deal with it later. </font>
```
# list rows of data that have missing values
missing_row <- train[!complete.cases(train),]
head(missing_row)
nrow(missing_row)
```
**<font face="calibri" color="blue" size="4">2.3 Select Variables</font>**
<br><br>
<font face="calibri" color="black" size="4"><b>Step 1</b>: select variables that may have greater impact on house price <br><br> <b>Step 2</b>: build subset of train dataset for prediction.
<br><br></font>
```
## show all variable names
var_name <- names(train)
var_name
#Here, we select these important variables by creating a vector that contains variable names
select_var <- c('Id','MSZoning','Utilities', 'Neighborhood','BldgType','HouseStyle',
'OverallQual','OverallCond','YearBuilt', 'ExterQual','ExterCond',
'BsmtQual','BsmtCond','TotalBsmtSF','Heating','HeatingQC',
'CentralAir','Electrical','GrLivArea','BedroomAbvGr','KitchenAbvGr',
'KitchenQual','TotRmsAbvGrd','Functional','Fireplaces','FireplaceQu',
'GarageArea','GarageQual','GarageCond','OpenPorchSF','PoolArea',
'Fence','MoSold','YrSold','SaleType','SaleCondition','SalePrice')
# construct subset of train dataset that is used for prediction
select_train <- train[,select_var]
head(select_train)
summary(select_train)
```
<font face="calibri" color="black" size="4">To get better understanding about this dataset, I summaried all importabt variables in terms of minimum, first quartile, median, mean, third quantile and maximum value. </font>
**<font face="calibri" color="blue" size="4">2.4 Descriptive and exploratory analysis of SalePrice</font><br><br>**
<font face="calibri" color="Black" size="4">SalePrice is our target variable and also the dependent variable for prediction. According to the assumptions of Linear Regression, data should be normally distributed. By checking the distribution of SalePrice, we can decide if we need non-linear transformation, like log term, to make better prediction. </font>
**<font face="calibri" color="blue" size="4">2.4.1 Summary of Target Variable: SalePrice</font>**
```
# Five number summary
summary(select_train$SalePrice)
# Draw a higtogram to figure out the distribution of SalePrice
options(scipen=10000)
ggplot(select_train, aes(x = SalePrice, fill = ..count..)) +
geom_histogram(binwidth = 5000) +
ggtitle("Figure 1 Histogram of SalePrice") +
ylab("Count of houses") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5))
```
<font face="calibri" size="4">From the histogram above, the distribution of our target variable-- SalePrice is skewed to right. Thus, a log term of SalePrice should be generated for linear regression. Here, we name it lSalePrice. </font>
```
#log term of SalePrice
select_train$lSalePrice <- log(select_train$SalePrice)
# Draw a higtogram to figure out the distribution of log SalePrice
ggplot(select_train, aes(x = lSalePrice, fill = ..count..)) +
geom_histogram(binwidth = 0.05) +
ggtitle("Figure 2 Histogram of log SalePrice") +
ylab("Count of houses") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5))
```
<font face="calibri" color="Black" size="4">Great! After fixing, <b>lSalePrice</b> is normally distributed. We will use this log term of SalePrice later in models.</font>
**<font face="calibri" color="blue" size="4">2.4.2 Explore the distribution of SalePrice by MSZoning</font>**
<font face="calibri" size="4"> When it comes to housing price, the value of house is usually related to two types of elements: internal and external. <b>Internal elements</b> are the key features of house itself, like total area, the number of rooms. As for <b>External elements</b>, environment is one of the key factors.
<br><br>First, let's figure out the variable that can indicates housing environment in our dataset. Here, I choose <b>MSZoning </b> as this indicator. It's a dummy variable and the identiciation is: </font>
<br><br>
<font face="calibri" size="4">MSZoning: Identifies the general zoning classification of the sale.</font>
A Agriculture
C Commercial
FV Floating Village Residential
I Industrial
RH Residential High Density
RL Residential Low Density
RP Residential Low Density Park
RM Residential Medium Density
<font face="calibri" size="4">Therefore, in this section, I will explore the relationship between MSZoning and our target variable SalePrice. </font>
<font face="calibri" size="4">First, let's have a close look at <b>MSZoning</b>. Because it's a dummy variable, I am curious about the total number of houses in each category. </font>
```
# count house by MSZoning
options(repr.plot.width=5, repr.plot.height=4)
ggplot(select_train, aes(x = MSZoning, fill = MSZoning )) +
geom_bar()+
scale_fill_hue(c = 80)+
ggtitle("Figure 3 Distribution of MSZoning")+
theme(plot.title = element_text(hjust = 0.5),legend.position="right", legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))+
geom_text(stat='count',aes(label=..count..),vjust=-0.25)
# Distribution of MSZoning
table(select_train$MSZoning)
```
<font face="calibri" size="4">From the graph and table above, it is obvious that most of houses in this dataset are built in the area of Residential Low Density(1151 houses), and follows by Residential Medium Density(218 houses). Few houes are built in Commercial, Floating Village and Residential High Density.
<br><br>
Since a large amount of houses comes to the categoreis of Residential Low Density and Residential Medium Density, these two areas should be paid more attention for housing price analysis. </font>
<br><br>
<font face="calibri" size="4">On top of it, let's add our target variable into analysis. How does housing price look like in each category? Here, I use boxplot to show the distribution of prices in each MSZoning. </font>
```
# Change plot size to 9 x 6
options(repr.plot.width=9, repr.plot.height=6)
#boxplot of SalePrice by MSZoning
#add average value of SalePrice as red point
ggplot(select_train, aes(x=MSZoning, y=SalePrice, fill=MSZoning)) +
geom_boxplot(alpha=0.3) +
stat_summary(fun.y=mean, geom="point", shape=20, size=4, color="red", fill="red")+
theme(legend.position="none")+
ggtitle("Figure 4 Boxplot of SalePrice by MSZoning")+
theme(plot.title = element_text(hjust = 0.5))
```
<font face="calibri" size="4">The graph above shows the distribution of SalePrice by MSZoning. The sales in "Floating Village Residential" area have the highest average sale price, and then followed by "Residential Low Density". While "Commercial" sales have the lowest average sale price. </font>
<br><br>
<font face="calibri" size="4">It is quite strange that commercial area has the lowest average Sale Price while village area has the highest. One possible explanation could be SalePrice is also related to the size of houses. To confirm, let's explore the average size in these area.</font>
<font face="calibri" size="4">The variable indicates size in this dataset is called <b>GrLivArea</b>.
<br><br>
<b>Definition</b>: Above grade (ground) living area square feet</font>
<br><br>
```
# Display the average hosue size in each area
library(plyr)
ddply(train, .(MSZoning), summarize, size=mean(GrLivArea))
```
<font face="calibri" size="4">It is obvious that the avarage size of houses in Commecial are is much smaller than Floating Village area, which verified our assumption above. </font>
**<font face="calibri" color="blue" size="4">2.4.3 Explore the distribution of SalePrice by BldfType</font>**
<font face="calibri" size="4">Next, We are going to describe SalePrice by different cateogries of BldfType. </font>
<br><br>
<font face="calibri" size="4">BldgType: Type of dwelling
1Fam Single-family Detached
2FmCon Two-family Conversion; originally built as one-family dwelling
Duplx Duplex
TwnhsE Townhouse End Unit
TwnhsI Townhouse Inside Unit
<font face="calibri" size="4">To get a quick feel about BldgType, I use a table here to count houses in each catetory and also show maximum and minimum SalePrice. </font >
```
library(plyr)
ddply(train, .(BldgType), summarize,Total = length(BldgType),Max_price=max(SalePrice),Min_price=min(SalePrice))
```
<font face="calibri" size="4">In previous section I used Boxplot to describe MSZoning, while for BldgType I will use Histogram, since I am more interested in the distribution rather than summary numbers. </font>
```
# historgram of housing price by BldgType
ggplot(select_train, aes(SalePrice)) +
geom_histogram(aes(fill = BldgType), position = position_stack(reverse = TRUE), binwidth = 20000) +
coord_flip() + ggtitle("Figure 5 Histogram of SalePrice") +
ylab("Count") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5),legend.position=c(0.9,0.8), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
```
<font face="calibri" size="4">More thoughts about the graph above:
1. For houses with type of Single-family Detached, most of their prices are within the range from 50000 to 300000
2. For Two-family Conversion, Duplex, Townhouse End Unit and Townhouse Inside Unit, most of house prices are ranging from 75000 to 210000
3. The highest and lowest house price both come to Single-family Detached house type
**<font face="calibri" color="blue" size="4">2.4.4 Explore the distribution of SalePrice by OverallQual</font>**
<font face="calibri" size="4">The last one is OverallQual.
OverallQual: Rates the overall material and finish of the house
10 Very Excellent
9 Excellent
8 Very Good
7 Good
6 Above Average
5 Average
4 Below Average
3 Fair
2 Poor
1 Very Poor
```
ggplot(select_train, aes(x = SalePrice,fill = as.factor(OverallQual))) +
geom_histogram(position = "stack", binwidth = 10000) +
ggtitle("Figure 6 Histogram of SalePrice") +
ylab("Count") +
xlab("Housing Price") +
scale_fill_discrete(name="OverallQual")+
theme(plot.title = element_text(hjust = 0.5), legend.position=c(0.9,0.7), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
```
<font face="calibri" size="4">As we saw in graph above:
1. Most houese are with OverallQuall of 4,5,6 and 7, equivalent to "Below Average", "Average", "Above Average" and "Good"
2. The higher rate of overall quality, the higher house sale price
3. For each rate level of overall quality, the distribution of house price is almost symmetric
**<font face="calibri" color="blue" size="4">2.5 What kind of house will be sold for higher price?</font>**
**<font face="calibri" color="blue" size="4">2.5.1 Correlation Exploreation</font>**
<font face="calibri" size="4">Let's select variables first.
<br><br>Variables for correlation exploration: SalePrice','OverallQual','OverallCond','YearBuilt','ExterCond2','TotalBsmtSF','HeatingQC2' </font>
<font face="calibri" size="4">In order to have a clear view of how the key variables relate to SalePrice, I decide to use Correlation Heatmap to plot correlation coefficients.
<br><br>But before plotting heatmap, one more step is needed-- feature engineering. In section I, we learn that some variables are factors, in order to buid heatmap we need to convert them into numerics.
<br><br>Since these factor varaibles evaluate quality of house with ordered levels, such as "Ex", "Fa","Gd", "TA", and "Po", here, I match them to numbers: "1","2","3","4", and "5". That is, the smaller number, the higher level. After transforming, all the variables used for heatmap are numberic. </font>
```
# convert factor to numeric
select_train$ExterCond2 <- as.numeric(factor(select_train$ExterCond,
levels = c("Ex", "Fa","Gd", "TA","Po"),
labels = c(5,2,4,3,1) ,ordered = TRUE))
select_train$HeatingQC2 <- as.numeric(factor(select_train$HeatingQC,
levels = c("Ex", "Fa","Gd", "TA","Po"),
labels = c(5,2,4,3,1) ,ordered = TRUE))
select_train$CentralAir2 <- as.numeric(factor(select_train$CentralAir,
levels = c("N", "Y"),
labels = c(0,1) ,ordered = TRUE))
#select variables that be used for model buidling and heat map
model_var <- c('SalePrice',
'OverallQual','OverallCond','YearBuilt','ExterCond2',
'TotalBsmtSF','HeatingQC2',
'CentralAir2','GrLivArea','BedroomAbvGr','KitchenAbvGr',
'TotRmsAbvGrd','Fireplaces',
'GarageArea','OpenPorchSF','PoolArea',
'YrSold')
heat <- select_train[,model_var]
```
<font face="calibri" size="4">Next, using ggplot plot correlation heatmap. </font>
```
#plot correlation heatmap for SalePrice
options(repr.plot.width=8, repr.plot.height=6)
library(ggplot2)
library(reshape2)
qplot(x=Var1, y=Var2, data=melt(cor(heat, use="p")), fill=value, geom="tile") +
scale_fill_gradient2(low = "green", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Correlation") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 8, hjust = 1))+
coord_fixed()+
ggtitle("Figure 7 Correlation Heatmap") +
theme(plot.title = element_text(hjust = 0.4))
```
<font face="calibri" size="4"> In this graph, <b>Red </b>indicates perfect positive correlation and <b>Green</b> indicates perfect negative correlation. As we can see, there are several variables should be paid attention to: GarageArea, Fireplaces, TotRmsAbvGrd, GrLivArea, HeatingQC, TotalBsmtSF and YearBuild.
**<font face="calibri" color="blue" size="4">2.5.2 Correlation between SalePrice and some numeric variables</font>**
<font face="calibri" size="4">In this section, I am going to analyze the correlation between SalePrice and numeric variables, including <b>GrLivArea</b>,<b>TotalBsmtSF</b>, <b>TotRmsAbvGrd</b>, <b>GarageArea</b>. Different from categorical variables, here I will use scatter plot and trend line to indicate the relationship.
```
# scatter plot of GrLiveArea
# Change plot size to 5 x 4
options(repr.plot.width=9, repr.plot.height=6)
p1 <- ggplot(select_train, aes(x=GrLivArea, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 8 Scatter plot of SalePrice and GrLivArea") +
theme(plot.title = element_text(hjust = 0.4))
# scatter plot of TotalBsmtSF
p2 <- ggplot(select_train, aes(x=TotalBsmtSF, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 9 Scatter plot of SalePrice and TotalBsmtSF") +
theme(plot.title = element_text(hjust = 0.4))
#scatter plot of TotRmsAbvGrd
p3 <- ggplot(select_train, aes(x=TotRmsAbvGrd, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 10 Scatter plot of SalePrice and TotRmsAbvGrd") +
theme(plot.title = element_text(hjust = 0.4))
#scatter plot of GarageArea
p4 <- ggplot(select_train, aes(x=GarageArea, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 11 Scatter plot of SalePrice and GarageArea") +
theme(plot.title = element_text(hjust = 0.4))
library(gridExtra)
grid.arrange(p1, p2,p3,p4)
```
<font face="calibri" size="4"> Some thoughts about these graphs:
1. GrLivArea, TotalBsmtSF, TotRmsAbvGrd, and GarageArea are positively correlated with SalePrice, which means with the increase of GrLivArea, TotalBsmtSF, TotRmsAbvGrd and GarageArea, the SalePrice also increases.
2. TotalBsmtSF has more concentrated distribution than others
**<font face="calibri" color="blue" size="5">Part III Model Fitting</font>**
<font face="calibri" size="4"> After Descriptive Analysis we are moving into Predictive Analysis section. There are three models here:
<br><br><b>Linear Regresion Model</b>
<br><br><b>Classification & Regression Trees (CART) Model</b>
<br><br><b>Random Forest Model </b></font >
**<font face="calibri" color="blue" size="4">3.1 Linear Regression Model</font>**
<font face="calibri" size="4">In Linear Regresion Model, the relationships between Dependent and Indepedent Variables is expressed by equation with coefficients. The aim of this model is to minimize the sum of the squared residuals. Here I select 16 variables to fit into this model.
<font face="calibri" size="4">**Variables in this model**:
<br><br>SalePrice, OverallQual, OverallCond, YearBuilt, ExterQual2, ExterCond2, TotalBsmtSF, HeatingQC2, CentralAir2, GrLivArea, BedroomAbvGr, KitchenAbvGr, TotRmsAbvGrd, Fireplaces, GarageArea,<br>
OpenPorchSF, PoolArea,YrSold</font>
<font face="calibri" size="4">Step 1: choose variables and transfer SalePrice into log term
```
#prediction of lm
#build model dataset for linear regression
model_lin <- select_train[, model_var]
model_lin$lSalePrice <- log(model_lin$SalePrice)
```
<font face="calibri" size="4">Step 2: divide datasets into two parts -- training and validation, to prepare for prediction later
```
#partition data
set.seed(10000)
train.index <- sample(c(1:dim(model_lin)[1]), dim(model_lin)[1]*0.8)
model_lin_train = model_lin[train.index,]
model_lin_valid <- model_lin[-train.index,]
```
<font face="calibri" size="4">Step 3: run regression
```
#use lm() to run linear regression of SalePrice on all variables in model dataset
linreg <- lm(lSalePrice~.-SalePrice, data = model_lin_train)
summary(linreg)
```
<font face="calibri" size="4">Step 4: forecast and check for model accuracy
```
library(forecast)
#use predict() to make prediction on a new set
pred1 <- predict(linreg,model_lin_valid,type = "response")
residuals <- model_lin_valid$lSalePrice - pred1
linreg_pred <- data.frame("Predicted" = pred1, "Actual" = model_lin_valid$lSalePrice, "Residual" = residuals)
accuracy(pred1, model_lin_valid$lSalePrice)
```
**<font face="calibri" color="blue" size="4">3.2 CART</font>**
```
# classification tree
library(rpart)
library(rpart.plot)
class.tree <- rpart(lSalePrice~.-SalePrice,
data = model_lin_train,control = rpart.control(cp = 0.01))
plotcp(class.tree)
printcp(class.tree)
rpart.plot(class.tree,
box.palette="GnBu",
branch.lty=3, shadow.col="gray", nn=TRUE)
```
**<font face="calibri" color="blue" size="4">3.3 Random Forest</font>**
```
#Random Forest
library(randomForest)
RF <- randomForest(lSalePrice ~.-SalePrice, data = model_lin_train,
importance =TRUE,ntree=500,nodesize=7, na.action=na.roughfix)
# variable importance
options(repr.plot.width=9, repr.plot.height=6)
varImpPlot(RF, type=1)
#prediction
rf.pred <- predict(RF, newdata=model_lin_valid )
accuracy(rf.pred, model_lin_valid$lSalePrice)
plot(rf.pred, model_lin_valid$lSalePrice, main = "Figure 9 Predicted vs. Actual log SalePrice")
abline(0,1)
```
|
github_jupyter
|
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(gplots)
library(repr)
# Change plot size to 9 x 6
options(repr.plot.width=9, repr.plot.height=6)
list.files("../input")
train <- read.csv("../input/train.csv")
# list rows of data that have missing values
missing_row <- train[!complete.cases(train),]
head(missing_row)
nrow(missing_row)
## show all variable names
var_name <- names(train)
var_name
#Here, we select these important variables by creating a vector that contains variable names
select_var <- c('Id','MSZoning','Utilities', 'Neighborhood','BldgType','HouseStyle',
'OverallQual','OverallCond','YearBuilt', 'ExterQual','ExterCond',
'BsmtQual','BsmtCond','TotalBsmtSF','Heating','HeatingQC',
'CentralAir','Electrical','GrLivArea','BedroomAbvGr','KitchenAbvGr',
'KitchenQual','TotRmsAbvGrd','Functional','Fireplaces','FireplaceQu',
'GarageArea','GarageQual','GarageCond','OpenPorchSF','PoolArea',
'Fence','MoSold','YrSold','SaleType','SaleCondition','SalePrice')
# construct subset of train dataset that is used for prediction
select_train <- train[,select_var]
head(select_train)
summary(select_train)
# Five number summary
summary(select_train$SalePrice)
# Draw a higtogram to figure out the distribution of SalePrice
options(scipen=10000)
ggplot(select_train, aes(x = SalePrice, fill = ..count..)) +
geom_histogram(binwidth = 5000) +
ggtitle("Figure 1 Histogram of SalePrice") +
ylab("Count of houses") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5))
#log term of SalePrice
select_train$lSalePrice <- log(select_train$SalePrice)
# Draw a higtogram to figure out the distribution of log SalePrice
ggplot(select_train, aes(x = lSalePrice, fill = ..count..)) +
geom_histogram(binwidth = 0.05) +
ggtitle("Figure 2 Histogram of log SalePrice") +
ylab("Count of houses") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5))
# count house by MSZoning
options(repr.plot.width=5, repr.plot.height=4)
ggplot(select_train, aes(x = MSZoning, fill = MSZoning )) +
geom_bar()+
scale_fill_hue(c = 80)+
ggtitle("Figure 3 Distribution of MSZoning")+
theme(plot.title = element_text(hjust = 0.5),legend.position="right", legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))+
geom_text(stat='count',aes(label=..count..),vjust=-0.25)
# Distribution of MSZoning
table(select_train$MSZoning)
# Change plot size to 9 x 6
options(repr.plot.width=9, repr.plot.height=6)
#boxplot of SalePrice by MSZoning
#add average value of SalePrice as red point
ggplot(select_train, aes(x=MSZoning, y=SalePrice, fill=MSZoning)) +
geom_boxplot(alpha=0.3) +
stat_summary(fun.y=mean, geom="point", shape=20, size=4, color="red", fill="red")+
theme(legend.position="none")+
ggtitle("Figure 4 Boxplot of SalePrice by MSZoning")+
theme(plot.title = element_text(hjust = 0.5))
# Display the average hosue size in each area
library(plyr)
ddply(train, .(MSZoning), summarize, size=mean(GrLivArea))
library(plyr)
ddply(train, .(BldgType), summarize,Total = length(BldgType),Max_price=max(SalePrice),Min_price=min(SalePrice))
# historgram of housing price by BldgType
ggplot(select_train, aes(SalePrice)) +
geom_histogram(aes(fill = BldgType), position = position_stack(reverse = TRUE), binwidth = 20000) +
coord_flip() + ggtitle("Figure 5 Histogram of SalePrice") +
ylab("Count") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5),legend.position=c(0.9,0.8), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
ggplot(select_train, aes(x = SalePrice,fill = as.factor(OverallQual))) +
geom_histogram(position = "stack", binwidth = 10000) +
ggtitle("Figure 6 Histogram of SalePrice") +
ylab("Count") +
xlab("Housing Price") +
scale_fill_discrete(name="OverallQual")+
theme(plot.title = element_text(hjust = 0.5), legend.position=c(0.9,0.7), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
# convert factor to numeric
select_train$ExterCond2 <- as.numeric(factor(select_train$ExterCond,
levels = c("Ex", "Fa","Gd", "TA","Po"),
labels = c(5,2,4,3,1) ,ordered = TRUE))
select_train$HeatingQC2 <- as.numeric(factor(select_train$HeatingQC,
levels = c("Ex", "Fa","Gd", "TA","Po"),
labels = c(5,2,4,3,1) ,ordered = TRUE))
select_train$CentralAir2 <- as.numeric(factor(select_train$CentralAir,
levels = c("N", "Y"),
labels = c(0,1) ,ordered = TRUE))
#select variables that be used for model buidling and heat map
model_var <- c('SalePrice',
'OverallQual','OverallCond','YearBuilt','ExterCond2',
'TotalBsmtSF','HeatingQC2',
'CentralAir2','GrLivArea','BedroomAbvGr','KitchenAbvGr',
'TotRmsAbvGrd','Fireplaces',
'GarageArea','OpenPorchSF','PoolArea',
'YrSold')
heat <- select_train[,model_var]
#plot correlation heatmap for SalePrice
options(repr.plot.width=8, repr.plot.height=6)
library(ggplot2)
library(reshape2)
qplot(x=Var1, y=Var2, data=melt(cor(heat, use="p")), fill=value, geom="tile") +
scale_fill_gradient2(low = "green", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Correlation") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 8, hjust = 1))+
coord_fixed()+
ggtitle("Figure 7 Correlation Heatmap") +
theme(plot.title = element_text(hjust = 0.4))
# scatter plot of GrLiveArea
# Change plot size to 5 x 4
options(repr.plot.width=9, repr.plot.height=6)
p1 <- ggplot(select_train, aes(x=GrLivArea, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 8 Scatter plot of SalePrice and GrLivArea") +
theme(plot.title = element_text(hjust = 0.4))
# scatter plot of TotalBsmtSF
p2 <- ggplot(select_train, aes(x=TotalBsmtSF, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 9 Scatter plot of SalePrice and TotalBsmtSF") +
theme(plot.title = element_text(hjust = 0.4))
#scatter plot of TotRmsAbvGrd
p3 <- ggplot(select_train, aes(x=TotRmsAbvGrd, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 10 Scatter plot of SalePrice and TotRmsAbvGrd") +
theme(plot.title = element_text(hjust = 0.4))
#scatter plot of GarageArea
p4 <- ggplot(select_train, aes(x=GarageArea, y=SalePrice)) +
geom_point(shape=1) +
geom_smooth(method=lm , color="red", se=FALSE)+
ggtitle("Figure 11 Scatter plot of SalePrice and GarageArea") +
theme(plot.title = element_text(hjust = 0.4))
library(gridExtra)
grid.arrange(p1, p2,p3,p4)
#prediction of lm
#build model dataset for linear regression
model_lin <- select_train[, model_var]
model_lin$lSalePrice <- log(model_lin$SalePrice)
#partition data
set.seed(10000)
train.index <- sample(c(1:dim(model_lin)[1]), dim(model_lin)[1]*0.8)
model_lin_train = model_lin[train.index,]
model_lin_valid <- model_lin[-train.index,]
#use lm() to run linear regression of SalePrice on all variables in model dataset
linreg <- lm(lSalePrice~.-SalePrice, data = model_lin_train)
summary(linreg)
library(forecast)
#use predict() to make prediction on a new set
pred1 <- predict(linreg,model_lin_valid,type = "response")
residuals <- model_lin_valid$lSalePrice - pred1
linreg_pred <- data.frame("Predicted" = pred1, "Actual" = model_lin_valid$lSalePrice, "Residual" = residuals)
accuracy(pred1, model_lin_valid$lSalePrice)
# classification tree
library(rpart)
library(rpart.plot)
class.tree <- rpart(lSalePrice~.-SalePrice,
data = model_lin_train,control = rpart.control(cp = 0.01))
plotcp(class.tree)
printcp(class.tree)
rpart.plot(class.tree,
box.palette="GnBu",
branch.lty=3, shadow.col="gray", nn=TRUE)
#Random Forest
library(randomForest)
RF <- randomForest(lSalePrice ~.-SalePrice, data = model_lin_train,
importance =TRUE,ntree=500,nodesize=7, na.action=na.roughfix)
# variable importance
options(repr.plot.width=9, repr.plot.height=6)
varImpPlot(RF, type=1)
#prediction
rf.pred <- predict(RF, newdata=model_lin_valid )
accuracy(rf.pred, model_lin_valid$lSalePrice)
plot(rf.pred, model_lin_valid$lSalePrice, main = "Figure 9 Predicted vs. Actual log SalePrice")
abline(0,1)
| 0.467332 | 0.969584 |
# Getting monthly deaths
## Importing required modules
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
## Loading csv file as pandas dataframe
```
usaDeath = pd.read_csv("assets/main data/florida/covid_deaths_usafacts.csv")
usaDeath.tail()
```
## Cropping and managing date column
```
florida = usaDeath[usaDeath["State"] == "FL"].drop(
columns=["County Name", "State", "StateFIPS", "countyFIPS"]).transpose().diff().reset_index().rename(columns={"index": "date"})
florida['date'] = pd.to_datetime(florida["date"])
florida = florida.sort_values(by=["date"], ascending=True)
florida.tail() # tail shows the last five rows
```
## Modify dataframe and add month_year column
```
florida["month_year"] = florida['date'].dt.strftime("%Om-%Y")
# Changing the order of column
cols = florida.columns.tolist()
if(cols[-1] == "month_year"):
cols.insert(1, cols[-1])
cols.pop()
florida = florida[cols]
florida.tail()
```
## Summation over the columns
```
florida["total_death"] = florida.iloc[:,2:].sum(axis=1)
# Changing the order of column
cols = florida.columns.tolist()
if(cols[-1] == "total_death"):
cols.insert(2, cols[-1])
cols.pop()
florida = florida[cols]
floridaMainData = florida.iloc[:,:3].drop(columns=["date"])
floridaMainData
```
## Group by the month of the year
```
floridaMonthlyDeath = floridaMainData.groupby(by="month_year").sum().reset_index()
floridaMonthlyDeath["month_year"] = pd.to_datetime(floridaMonthlyDeath["month_year"])
floridaMonthlyDeath = floridaMonthlyDeath.sort_values(by=["month_year"])
floridaMonthlyDeath
```
# Ananlyzing VIIRS average radian
## Initialize the ee and geemap modules
```
import geemap
import ee
try:
ee.Initialize()
except:
ee.Authenticate()
```
## Drawing the Chicago on map
```
floridaGeo = ee.FeatureCollection("users/amirhkiani1998/florida").geometry()
```
## Get VIIRS from google earth engine
```
viirs = ee.ImageCollection(
"NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG").filterDate("2019-09-01", "2021-10-01").select("avg_rad")
newMap = geemap.Map()
newMap.addLayer(viirs.first().clip(floridaGeo))
newMap.centerObject(floridaGeo)
newMap
```
## Combine reducers to get mean, standard deviation, min, max
```
reducers = ee.Reducer.mean().combine(reducer2=ee.Reducer.stdDev(), sharedInputs=True
).combine(reducer2=ee.Reducer.min(), sharedInputs=True
).combine(reducer2=ee.Reducer.max(), sharedInputs=True)
```
## Map over all images in VIIRS
```
def imageClip(image):
return image.clip(floridaGeo)
viirsClipped = viirs.map(imageClip)
```
## Change type of **clipped viirs image collection** to **list**
```
size = viirsClipped.size().getInfo()
viirsClippedList = viirsClipped.toList(count=viirsClipped.size().getInfo())
myMap = geemap.Map()
sampleImage = ee.Image(viirsClippedList.get(8))
myMap.addLayer(sampleImage)
print(sampleImage.date().format().getInfo())
myMap.centerObject(floridaGeo)
myMap
```
## Downloading data from VIIRS
```
data = []
for i in range(size):
image = ee.Image(viirsClippedList.get(i))
date = image.date().format().getInfo()
stats = image.reduceRegion(reducer=reducers, bestEffort=True)
imageData = stats.getInfo()
imageData["date"] = date
data.append(imageData)
print(date)
```
## Make dataframe from VIIRS data
```
viirsDataframe = pd.DataFrame(data)
viirsDataframe["date"] = pd.to_datetime(viirsDataframe["date"])
viirsDataframe.head(20)
```
## Save **VIIRS data** and **Florida monthly death** as csv files
```
viirsDataframe.to_csv("assets/main data/Florida/VIIRS2019_09__2021_5.csv")
floridaMonthlyDeath.to_csv("assets/main data/Florida/monthly_death.csv")
```
## Load the dataframes (if has not loaded yet)
```
viirsDataframe = pd.read_csv("assets/main data/Florida/VIIRS2019_09__2021_5.csv")
floridaMonthlyDeath = pd.read_csv("assets/main data/Florida/monthly_death.csv")
viirsDataframe["date"] = pd.to_datetime(viirsDataframe["date"])
floridaMonthlyDeath["month_year"] = pd.to_datetime(floridaMonthlyDeath["month_year"])
```
## Standardizing **total_death** and **avg_rad_mean**
```
from sklearn.preprocessing import StandardScaler
# VIIRS Radian scaling
scalerFloridaRad = StandardScaler()
scalerFloridaRad.fit(np.array(viirsDataframe["avg_rad_mean"].values).reshape(-1, 1))
scaledFloridaRad = scalerFloridaRad.transform(
np.array(viirsDataframe["avg_rad_mean"].values).reshape(-1, 1))
# Florida Monthly Death scaling
scalerTotalDeath = StandardScaler()
scalerTotalDeath.fit(
np.array(floridaMonthlyDeath["total_death"].values).reshape(-1, 1))
scaledTotalDeath = scalerTotalDeath.transform(
np.array(floridaMonthlyDeath["total_death"].values).reshape(-1, 1))
```
## Plot both average radian and Florida monthly death
```
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True, figsize=(18, 8))
ax1.plot(viirsDataframe["date"],
scaledFloridaRad, label="VIIRS Avg. Radian", color="#463842")
ax1.set_ylabel("Scaled Average Radian", fontsize=12)
ax1.set_xlabel("Date", fontsize=20)
ax2.plot(floridaMonthlyDeath["month_year"],
scaledTotalDeath, label="Monthly Death", color="#823842", linewidth=2)
ax2.set_ylabel("Scaled Total Death", fontsize=12)
# Add Grid to plots
ax2.grid(axis="y")
ax2.grid(axis="x")
ax1.grid(axis="y")
ax1.grid(axis="x")
# Rotate the x labels to avoid interrupting
fig.autofmt_xdate()
fig.savefig("florida.png", format="png", dpi=600)
floridaMonthlyDeath
```
## Getting Tehran geometry
```
tehranGeo = ee.FeatureCollection("users/amirhkiani1998/teh").geometry()
```
## Making map function to clip image collection on the Tehran
```
def mapTehran(image):
return image.clip(tehranGeo)
```
## Clipping the viirs on Tehran
```
viirsTehran = viirs.map(mapTehran)
```
## Changing the Collection(viirsTehran) to List
```
sizeViirsTehran = viirsTehran.size().getInfo()
viirsTehranList = viirsTehran.toList(sizeViirsTehran)
```
## Downloading the data from VIIRS(clipped on Tehran)
```
data = []
for i in range(sizeViirsTehran):
image = ee.Image(viirsTehranList.get(i))
date = image.date().format().getInfo()
stats = image.reduceRegion(reducer=reducers, bestEffort=True)
imageData = stats.getInfo()
imageData["date"] = date
data.append(imageData)
print(date)
```
## Making dataframe from downloaded data
```
tehranDataframe = pd.DataFrame(data)
tehranDataframe["date"] = pd.to_datetime(tehranDataframe["date"])
tehranDataframe = tehranDataframe.sort_values(by=["date"])
# Delete Null values
tehranDataframe = tehranDataframe[tehranDataframe["avg_rad_mean"] != 0]
tehranDataframe
```
## Scaling the average radian in Tehran
```
from sklearn.preprocessing import StandardScaler
# VIIRS Radian scaling
scalerTehranRad = StandardScaler()
scalerTehranRad.fit(np.array(tehranDataframe["avg_rad_mean"].values).reshape(-1, 1))
scaledTehranRad = scalerTehranRad.transform(
np.array(tehranDataframe["avg_rad_mean"].values).reshape(-1, 1))
```
## Plotting the trible dataframes
```
plt.rcParams["font.family"] = "serif"
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True, figsize=(18, 8))
ax1.plot(tehranDataframe["date"],
scaledTehranRad, label="VIIRS Tehran Radian", color="#366952")
ax1.set_ylabel("Scaled Tehran Radian", fontsize=10)
ax2.plot(viirsDataframe["date"],
scaledFloridaRad, label="VIIRS Florida Radian", color="#463842")
ax2.set_ylabel("Scaled Florida Radian", fontsize=10)
ax2.set_xlabel("Date", fontsize=20)
ax3.plot(floridaMonthlyDeath["month_year"],
scaledTotalDeath, label="Monthly Death", color="#823842", linewidth=2)
ax3.set_ylabel("Scaled Total Death", fontsize=10)
# Add Grid to plots
ax1.grid(axis="y")
ax1.grid(axis="x")
ax2.grid(axis="y")
ax2.grid(axis="x")
ax3.grid(axis="y")
ax3.grid(axis="x")
# Rotate the x labels to avoid interrupting
fig.autofmt_xdate()
fig.legend()
fig.savefig("florida-tehran.png", format="png", dpi=600)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
usaDeath = pd.read_csv("assets/main data/florida/covid_deaths_usafacts.csv")
usaDeath.tail()
florida = usaDeath[usaDeath["State"] == "FL"].drop(
columns=["County Name", "State", "StateFIPS", "countyFIPS"]).transpose().diff().reset_index().rename(columns={"index": "date"})
florida['date'] = pd.to_datetime(florida["date"])
florida = florida.sort_values(by=["date"], ascending=True)
florida.tail() # tail shows the last five rows
florida["month_year"] = florida['date'].dt.strftime("%Om-%Y")
# Changing the order of column
cols = florida.columns.tolist()
if(cols[-1] == "month_year"):
cols.insert(1, cols[-1])
cols.pop()
florida = florida[cols]
florida.tail()
florida["total_death"] = florida.iloc[:,2:].sum(axis=1)
# Changing the order of column
cols = florida.columns.tolist()
if(cols[-1] == "total_death"):
cols.insert(2, cols[-1])
cols.pop()
florida = florida[cols]
floridaMainData = florida.iloc[:,:3].drop(columns=["date"])
floridaMainData
floridaMonthlyDeath = floridaMainData.groupby(by="month_year").sum().reset_index()
floridaMonthlyDeath["month_year"] = pd.to_datetime(floridaMonthlyDeath["month_year"])
floridaMonthlyDeath = floridaMonthlyDeath.sort_values(by=["month_year"])
floridaMonthlyDeath
import geemap
import ee
try:
ee.Initialize()
except:
ee.Authenticate()
floridaGeo = ee.FeatureCollection("users/amirhkiani1998/florida").geometry()
viirs = ee.ImageCollection(
"NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG").filterDate("2019-09-01", "2021-10-01").select("avg_rad")
newMap = geemap.Map()
newMap.addLayer(viirs.first().clip(floridaGeo))
newMap.centerObject(floridaGeo)
newMap
reducers = ee.Reducer.mean().combine(reducer2=ee.Reducer.stdDev(), sharedInputs=True
).combine(reducer2=ee.Reducer.min(), sharedInputs=True
).combine(reducer2=ee.Reducer.max(), sharedInputs=True)
def imageClip(image):
return image.clip(floridaGeo)
viirsClipped = viirs.map(imageClip)
size = viirsClipped.size().getInfo()
viirsClippedList = viirsClipped.toList(count=viirsClipped.size().getInfo())
myMap = geemap.Map()
sampleImage = ee.Image(viirsClippedList.get(8))
myMap.addLayer(sampleImage)
print(sampleImage.date().format().getInfo())
myMap.centerObject(floridaGeo)
myMap
data = []
for i in range(size):
image = ee.Image(viirsClippedList.get(i))
date = image.date().format().getInfo()
stats = image.reduceRegion(reducer=reducers, bestEffort=True)
imageData = stats.getInfo()
imageData["date"] = date
data.append(imageData)
print(date)
viirsDataframe = pd.DataFrame(data)
viirsDataframe["date"] = pd.to_datetime(viirsDataframe["date"])
viirsDataframe.head(20)
viirsDataframe.to_csv("assets/main data/Florida/VIIRS2019_09__2021_5.csv")
floridaMonthlyDeath.to_csv("assets/main data/Florida/monthly_death.csv")
viirsDataframe = pd.read_csv("assets/main data/Florida/VIIRS2019_09__2021_5.csv")
floridaMonthlyDeath = pd.read_csv("assets/main data/Florida/monthly_death.csv")
viirsDataframe["date"] = pd.to_datetime(viirsDataframe["date"])
floridaMonthlyDeath["month_year"] = pd.to_datetime(floridaMonthlyDeath["month_year"])
from sklearn.preprocessing import StandardScaler
# VIIRS Radian scaling
scalerFloridaRad = StandardScaler()
scalerFloridaRad.fit(np.array(viirsDataframe["avg_rad_mean"].values).reshape(-1, 1))
scaledFloridaRad = scalerFloridaRad.transform(
np.array(viirsDataframe["avg_rad_mean"].values).reshape(-1, 1))
# Florida Monthly Death scaling
scalerTotalDeath = StandardScaler()
scalerTotalDeath.fit(
np.array(floridaMonthlyDeath["total_death"].values).reshape(-1, 1))
scaledTotalDeath = scalerTotalDeath.transform(
np.array(floridaMonthlyDeath["total_death"].values).reshape(-1, 1))
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True, figsize=(18, 8))
ax1.plot(viirsDataframe["date"],
scaledFloridaRad, label="VIIRS Avg. Radian", color="#463842")
ax1.set_ylabel("Scaled Average Radian", fontsize=12)
ax1.set_xlabel("Date", fontsize=20)
ax2.plot(floridaMonthlyDeath["month_year"],
scaledTotalDeath, label="Monthly Death", color="#823842", linewidth=2)
ax2.set_ylabel("Scaled Total Death", fontsize=12)
# Add Grid to plots
ax2.grid(axis="y")
ax2.grid(axis="x")
ax1.grid(axis="y")
ax1.grid(axis="x")
# Rotate the x labels to avoid interrupting
fig.autofmt_xdate()
fig.savefig("florida.png", format="png", dpi=600)
floridaMonthlyDeath
tehranGeo = ee.FeatureCollection("users/amirhkiani1998/teh").geometry()
def mapTehran(image):
return image.clip(tehranGeo)
viirsTehran = viirs.map(mapTehran)
sizeViirsTehran = viirsTehran.size().getInfo()
viirsTehranList = viirsTehran.toList(sizeViirsTehran)
data = []
for i in range(sizeViirsTehran):
image = ee.Image(viirsTehranList.get(i))
date = image.date().format().getInfo()
stats = image.reduceRegion(reducer=reducers, bestEffort=True)
imageData = stats.getInfo()
imageData["date"] = date
data.append(imageData)
print(date)
tehranDataframe = pd.DataFrame(data)
tehranDataframe["date"] = pd.to_datetime(tehranDataframe["date"])
tehranDataframe = tehranDataframe.sort_values(by=["date"])
# Delete Null values
tehranDataframe = tehranDataframe[tehranDataframe["avg_rad_mean"] != 0]
tehranDataframe
from sklearn.preprocessing import StandardScaler
# VIIRS Radian scaling
scalerTehranRad = StandardScaler()
scalerTehranRad.fit(np.array(tehranDataframe["avg_rad_mean"].values).reshape(-1, 1))
scaledTehranRad = scalerTehranRad.transform(
np.array(tehranDataframe["avg_rad_mean"].values).reshape(-1, 1))
plt.rcParams["font.family"] = "serif"
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True, figsize=(18, 8))
ax1.plot(tehranDataframe["date"],
scaledTehranRad, label="VIIRS Tehran Radian", color="#366952")
ax1.set_ylabel("Scaled Tehran Radian", fontsize=10)
ax2.plot(viirsDataframe["date"],
scaledFloridaRad, label="VIIRS Florida Radian", color="#463842")
ax2.set_ylabel("Scaled Florida Radian", fontsize=10)
ax2.set_xlabel("Date", fontsize=20)
ax3.plot(floridaMonthlyDeath["month_year"],
scaledTotalDeath, label="Monthly Death", color="#823842", linewidth=2)
ax3.set_ylabel("Scaled Total Death", fontsize=10)
# Add Grid to plots
ax1.grid(axis="y")
ax1.grid(axis="x")
ax2.grid(axis="y")
ax2.grid(axis="x")
ax3.grid(axis="y")
ax3.grid(axis="x")
# Rotate the x labels to avoid interrupting
fig.autofmt_xdate()
fig.legend()
fig.savefig("florida-tehran.png", format="png", dpi=600)
| 0.382372 | 0.850841 |
```
import nibabel as nib
import matplotlib.pyplot as plt
def show_mid_slices(image):
""" Function to display row of image middle slices """
shape = image.shape
slices = [image[int(shape[0]/2), :, :],
image[:, int(shape[1]/2), :],
image[:, :, int(shape[2]/2)]]
fig, axes = plt.subplots(1, len(slices), figsize=(500,200))
for i, slice in enumerate(slices):
axes[i].imshow(slice.T, cmap="gray", origin="lower")
def show_vol_slices(image):
""" Function to display slices from several volumes """
shape = image.shape
vols = shape[3]
slices = image[:, :, int(shape[2]/2), :]
fig, axes = plt.subplots(int(vols/5)+1, 5, figsize=(50,20*(int(vols/5)+1)))
print((500,200*(int(vols/5)+1)))
for i in range(vols):
if vols > 5:
axes[int(i/5),i%5].imshow(slices[:,:,i].T, cmap="gray", origin="lower")
else:
axes[i].imshow(slices[:,:,i].T, cmap="gray", origin="lower")
# pa and ap parameters are mandatory
pa = ""
ap = ""
# bvec and bval need to be specified only for the main encoding direction
# and only if the basename is different from the nifti file
bvec = ""
bval = ""
# the main encoding direction (PA or AP)
main_dir = "PA"
# the readout may be omitted if it is the same for both directions
readout_pa = 0.1
readout_ap = 0.1
# the position of the actual b0 volumes
pa_b0 = 0
ap_b0 = 0
if main_dir == "PA":
data = pa
ix = 1
else:
data = ap
ix = 2
if bvec == "":
bvec = '/'.join([*data.split('/')[:-1], '']) + data.split('/')[-1].split('.')[0] + '.bvec'
bval = '/'.join([*data.split('/')[:-1], '']) + data.split('/')[-1].split('.')[0] + '.bval'
```
# Sample slices for PA image
```
pa_img = nib.load(pa)
pa_img_data = pa_img.get_fdata()
show_mid_slices(pa_img_data[:,:,:,pa_b0])
```
# Sample slices for AP image
```
ap_img = nib.load(ap)
ap_img_data = ap_img.get_fdata()
show_mid_slices(ap_img_data[:,:,:,ap_b0])
%%bash -s "$pa" "$ap"
dwidenoise $1 PA_denoised.nii
mrdegibbs PA_denoised.nii PA_unringed.nii
dwidenoise $2 AP_denoised.nii
mrdegibbs AP_denoised.nii AP_unringed.nii
```
# Sample slices for unringed PA image
```
pa_img = nib.load('PA_unringed.nii')
pa_img_data = pa_img.get_fdata()
show_mid_slices(pa_img_data[:,:,:,pa_b0])
```
# Sample slices for unringed AP image
```
ap_img = nib.load('AP_unringed.nii')
ap_img_data = ap_img.get_fdata()
show_mid_slices(ap_img_data[:,:,:,ap_b0])
%%bash -s "$pa_b0" "$ap_b0" "$readout_pa" "$readout_ap"
fslroi PA_unringed.nii b0_blip_up.nii.gz $1 1
fslroi AP_unringed.nii b0_blip_down.nii.gz $2 1
fslmerge -t b0_blip_up_down.nii.gz b0_blip_up.nii.gz b0_blip_down.nii.gz
printf "0 1 0 $3\n0 -1 0 $4" > params.txt
topup --imain=b0_blip_up_down --datain=params.txt --config=b02b0.cnf --out=topup_results --iout=hifi
fslmaths hifi -Tmean hifi
bet hifi hifi_brain -m
```
# Topup results
```
hifi_img = nib.load('hifi.nii.gz')
hifi_img_data = hifi_img.get_fdata()
show_mid_slices(hifi_img_data)
%%bash -s "$data" "$ix" "$bvec" "$bval"
vols=$(mrinfo $1 | grep "Dimensions" | cut -d 'x' -f 4 | tr -d ' ')
indx=""
for ((i=1; i<=$vols; i+=1)); do indx="$indx $2"; done
echo $indx > index.txt
eddy --imain=$1 --mask=hifi_brain_mask --acqp=params.txt --index=index.txt \
--bvecs=$3 --bvals=$4 --topup=topup_results --out=eddy_corrected --data_is_shelled
```
# Eddy results
```
eddy_img = nib.load('eddy_corrected.nii.gz')
eddy_img_data =eddy_img.get_fdata()
show_vol_slices(eddy_img_data)
```
|
github_jupyter
|
import nibabel as nib
import matplotlib.pyplot as plt
def show_mid_slices(image):
""" Function to display row of image middle slices """
shape = image.shape
slices = [image[int(shape[0]/2), :, :],
image[:, int(shape[1]/2), :],
image[:, :, int(shape[2]/2)]]
fig, axes = plt.subplots(1, len(slices), figsize=(500,200))
for i, slice in enumerate(slices):
axes[i].imshow(slice.T, cmap="gray", origin="lower")
def show_vol_slices(image):
""" Function to display slices from several volumes """
shape = image.shape
vols = shape[3]
slices = image[:, :, int(shape[2]/2), :]
fig, axes = plt.subplots(int(vols/5)+1, 5, figsize=(50,20*(int(vols/5)+1)))
print((500,200*(int(vols/5)+1)))
for i in range(vols):
if vols > 5:
axes[int(i/5),i%5].imshow(slices[:,:,i].T, cmap="gray", origin="lower")
else:
axes[i].imshow(slices[:,:,i].T, cmap="gray", origin="lower")
# pa and ap parameters are mandatory
pa = ""
ap = ""
# bvec and bval need to be specified only for the main encoding direction
# and only if the basename is different from the nifti file
bvec = ""
bval = ""
# the main encoding direction (PA or AP)
main_dir = "PA"
# the readout may be omitted if it is the same for both directions
readout_pa = 0.1
readout_ap = 0.1
# the position of the actual b0 volumes
pa_b0 = 0
ap_b0 = 0
if main_dir == "PA":
data = pa
ix = 1
else:
data = ap
ix = 2
if bvec == "":
bvec = '/'.join([*data.split('/')[:-1], '']) + data.split('/')[-1].split('.')[0] + '.bvec'
bval = '/'.join([*data.split('/')[:-1], '']) + data.split('/')[-1].split('.')[0] + '.bval'
pa_img = nib.load(pa)
pa_img_data = pa_img.get_fdata()
show_mid_slices(pa_img_data[:,:,:,pa_b0])
ap_img = nib.load(ap)
ap_img_data = ap_img.get_fdata()
show_mid_slices(ap_img_data[:,:,:,ap_b0])
%%bash -s "$pa" "$ap"
dwidenoise $1 PA_denoised.nii
mrdegibbs PA_denoised.nii PA_unringed.nii
dwidenoise $2 AP_denoised.nii
mrdegibbs AP_denoised.nii AP_unringed.nii
pa_img = nib.load('PA_unringed.nii')
pa_img_data = pa_img.get_fdata()
show_mid_slices(pa_img_data[:,:,:,pa_b0])
ap_img = nib.load('AP_unringed.nii')
ap_img_data = ap_img.get_fdata()
show_mid_slices(ap_img_data[:,:,:,ap_b0])
%%bash -s "$pa_b0" "$ap_b0" "$readout_pa" "$readout_ap"
fslroi PA_unringed.nii b0_blip_up.nii.gz $1 1
fslroi AP_unringed.nii b0_blip_down.nii.gz $2 1
fslmerge -t b0_blip_up_down.nii.gz b0_blip_up.nii.gz b0_blip_down.nii.gz
printf "0 1 0 $3\n0 -1 0 $4" > params.txt
topup --imain=b0_blip_up_down --datain=params.txt --config=b02b0.cnf --out=topup_results --iout=hifi
fslmaths hifi -Tmean hifi
bet hifi hifi_brain -m
hifi_img = nib.load('hifi.nii.gz')
hifi_img_data = hifi_img.get_fdata()
show_mid_slices(hifi_img_data)
%%bash -s "$data" "$ix" "$bvec" "$bval"
vols=$(mrinfo $1 | grep "Dimensions" | cut -d 'x' -f 4 | tr -d ' ')
indx=""
for ((i=1; i<=$vols; i+=1)); do indx="$indx $2"; done
echo $indx > index.txt
eddy --imain=$1 --mask=hifi_brain_mask --acqp=params.txt --index=index.txt \
--bvecs=$3 --bvals=$4 --topup=topup_results --out=eddy_corrected --data_is_shelled
eddy_img = nib.load('eddy_corrected.nii.gz')
eddy_img_data =eddy_img.get_fdata()
show_vol_slices(eddy_img_data)
| 0.179818 | 0.828454 |
# Geolocalizacion de dataset de escuelas argentinas
```
#Importar librerias
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
### Preparacion de data
```
# Vamos a cargar ...
# Leer csv
df = pd.read_csv('../../datos/Población_estudiantil.csv', header= None)
df.columns = ['Universidad']
df['Address'] = 'Universidad ' + df['Universidad'].astype(str) + ', Argentina'
```
### Geolocalizacion
```
import json
import time
import urllib
import urllib2
def geolocate(inp, API_key = None, BACKOFF_TIME = 30):
# See https://developers.google.com/maps/documentation/timezone/get-api-key
# with open('googleMapsAPIkey.txt', 'r') as myfile:
# maps_key = myfile.read().replace('\n', '')
base_url = 'https://maps.googleapis.com/maps/api/geocode/json'
# This joins the parts of the URL together into one string.
url = base_url + '?' + urllib.urlencode({
'address': "%s" % (inp),
'key': API_key,
})
try:
# Get the API response.
response = str(urllib2.urlopen(url).read())
except IOError:
pass # Fall through to the retry loop.
else:
# If we didn't get an IOError then parse the result.
result = json.loads(response.replace('\\n', ''))
if result['status'] == 'OK':
return result['results'][0]
elif result['status'] != 'UNKNOWN_ERROR':
# Many API errors cannot be fixed by a retry, e.g. INVALID_REQUEST or
# ZERO_RESULTS. There is no point retrying these requests.
# raise Exception(result['error_message'])
return None
# If we're over the API limit, backoff for a while and try again later.
elif result['status'] == 'OVER_QUERY_LIMIT':
print "Hit Query Limit! Backing off for "+str(BACKOFF_TIME)+" minutes..."
time.sleep(BACKOFF_TIME * 60) # sleep for 30 minutes
geocoded = False
def set_geolocation_values(df, loc):
df.set_value(i,'lng', loc['geometry']['location']['lng'])
df.set_value(i,'lat', loc['geometry']['location']['lat'])
df.set_value(i, 'id', loc['place_id'])
dataframe = df
col = 'Address'
API_key = 'AIzaSyDjBFMZlNTyds2Sfihu2D5LTKupKDBpf6c'
for i, row in dataframe.iterrows():
loc = geolocate(row[col], API_key)
if loc:
set_geolocation_values(dataframe, loc)
if i%10 == 0:
print 'processed row '+str(i)
dataframe.to_csv('../../datos/univ_geoloc.csv', index = False, encoding = 'utf8')
df
```
|
github_jupyter
|
#Importar librerias
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# Vamos a cargar ...
# Leer csv
df = pd.read_csv('../../datos/Población_estudiantil.csv', header= None)
df.columns = ['Universidad']
df['Address'] = 'Universidad ' + df['Universidad'].astype(str) + ', Argentina'
import json
import time
import urllib
import urllib2
def geolocate(inp, API_key = None, BACKOFF_TIME = 30):
# See https://developers.google.com/maps/documentation/timezone/get-api-key
# with open('googleMapsAPIkey.txt', 'r') as myfile:
# maps_key = myfile.read().replace('\n', '')
base_url = 'https://maps.googleapis.com/maps/api/geocode/json'
# This joins the parts of the URL together into one string.
url = base_url + '?' + urllib.urlencode({
'address': "%s" % (inp),
'key': API_key,
})
try:
# Get the API response.
response = str(urllib2.urlopen(url).read())
except IOError:
pass # Fall through to the retry loop.
else:
# If we didn't get an IOError then parse the result.
result = json.loads(response.replace('\\n', ''))
if result['status'] == 'OK':
return result['results'][0]
elif result['status'] != 'UNKNOWN_ERROR':
# Many API errors cannot be fixed by a retry, e.g. INVALID_REQUEST or
# ZERO_RESULTS. There is no point retrying these requests.
# raise Exception(result['error_message'])
return None
# If we're over the API limit, backoff for a while and try again later.
elif result['status'] == 'OVER_QUERY_LIMIT':
print "Hit Query Limit! Backing off for "+str(BACKOFF_TIME)+" minutes..."
time.sleep(BACKOFF_TIME * 60) # sleep for 30 minutes
geocoded = False
def set_geolocation_values(df, loc):
df.set_value(i,'lng', loc['geometry']['location']['lng'])
df.set_value(i,'lat', loc['geometry']['location']['lat'])
df.set_value(i, 'id', loc['place_id'])
dataframe = df
col = 'Address'
API_key = 'AIzaSyDjBFMZlNTyds2Sfihu2D5LTKupKDBpf6c'
for i, row in dataframe.iterrows():
loc = geolocate(row[col], API_key)
if loc:
set_geolocation_values(dataframe, loc)
if i%10 == 0:
print 'processed row '+str(i)
dataframe.to_csv('../../datos/univ_geoloc.csv', index = False, encoding = 'utf8')
df
| 0.258232 | 0.698972 |
<table>
<tr align=left><td><img align=left src="./images/CC-BY.png">
<td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli</td>
</table>
```
from __future__ import print_function
from __future__ import absolute_import
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
```
# Interpolation
There are times when you have estimates for the values of a function for specific inputs. The values of the function may be obtained in a variety of ways either through experiment or through the use of other approximation techniques. Our goal in this chapter is to explore techniques that allow us to determine a new function whose values match the known observations at a set of predetermined input values. We first formally define the term we will use to describe the process.
**Definition:** Given a discrete set of values $y_i$ at locations $x_i$, an *interpolant* is a (piece-wise) continuous function $f(x)$ that passes exactly through the data (*i.e.* $f(x_i) = y_i$).
**Example 0** The linear polynomial
$$
P_1(x) = 2(x-1)+3
$$
interpolates the coordinates $(1,3)$ and $(3,7)$.
In general a polynomial of degree $N$ can be used to interpolate $N+1$ data points. There are many different kinds of functions to use to interpolate values, but here we focus on polynomials.
## Applications
- Data filling
- Function approximation
- Fundamental component of other algorithms
- Root finding (secant method)
- Optimization, minima/maxima (successive parabolic interpolation)
- Numerical integration and differentiation
## Polynomial Interpolation
**Theorem:** There is a *unique* polynomial of degree $N$, $\mathcal{P}_N(x)$, that passes exactly through $N + 1$ values $y_1, y_2, \ldots, y_N, y_{N+1}$ at *distinct* points $x_1, x_2, \ldots, x_N, x_{N+1}$.
Consequence of the number of unknowns in $\mathcal{P}_N(x)$.
#### Example 1: 2 Points
Given points are $(x_0, y_0)$ and $(x_1, y_1)$ which will lead to a line:
Define $P_1(x) = p_1 x + p_0$ and use the two points to find $p_0$ and $p_1$:
We first note that we have two equations and two unknowns. The two equations can be found by assuming the function $P_1(x)$ interpolates the two data points
$$
\begin{align}
y_0 &= p_1 x_0 + p_0, \\
y_1 &= p_1 x_1 + p_1.
\end{align}
$$
In this example we will solve the first equation for $p_0$, substitute the result into the second equation, and then solve for $p_1$.
$$y_0 = p_1 x_0 + p_0 \quad \Rightarrow \quad p_0 = y_0 - p_1 x_0$$
$$\begin{aligned}
y_1 &= p_1 x_1 + p_0 & \Rightarrow \\
y_1 &= p_1 x_1 + y_0 - p_1 x_0 & \Rightarrow \\
p_1 &= \frac{y_1 - y_0}{x_1 - x_0} & \Rightarrow \\
p_0 &= y_0 - \frac{y_1 - y_0}{x_1 - x_0} x_0 &
\end{aligned}$$
$$\mathcal{P}_1(x) = \frac{y_1 - y_0}{x_1 - x_0} x + y_0 - \frac{y_1 - y_0}{x_1 - x_0} x_0 = \frac{y_1 - y_0}{x_1 - x_0} (x - x_0) + y_0$$
#### Example 2: 3 Points
Given points are $(x_0, y_0)$, $(x_1, y_1)$, and $(x_2, y_2)$ which will lead to quadratic polynomial:
Define $\mathcal{P}_2(x) = p_0 x^2 + p_1 x + p_2$ leading to the equations
$$y_0 = p_2 x_0^2 + p_1 x_0 + p_0$$
$$y_1 = p_2 x_1^2 + p_1 x_1 + p_0$$
$$y_2 = p_2 x_2^2 + p_1 x_2 + p_0$$
This gets complicated quickly! Note, we have three equations and three unknowns, and the previous system is a linear system of three equations. A more general approach to solving the system will be explored later, but first it is important to determine whether or not the system even has a solution.
### Proof - Uniqueness of Polynomial Interpolants
Let
$$\mathcal{P}_N(x) = \sum^N_{n=0} p_n x^n $$
or
$$\mathcal{P}_N(x) = p_0 + p_1 x + \cdots + p_{N - 1} x^{N - 1} + p_{N} x^N$$
and require $\mathcal{P}_N(x_i) = y_i$ for $i=0,1,\ldots,N$ and $x_i \neq x_j ~~~ \forall i,j$.
Assume there exists another polynomial
$$Q_N(x) = \sum^N_{n=0} q_n x^n$$
that passes through the same set of points such that $Q_N(x_i) = y_i$. Now compute $T_N(x) = \mathcal{P}_N(x) - Q_N(x)$:
We know that by assumption that $T_N(x_i) = 0$ but what about for all $x$?
$$T_N(x) = \mathcal{P}_N(x) - Q_N(x) = \sum^N_{n=0} p_n x^n - q_n x^n = \sum^N_{n=0} (p_n - q_n) x^n$$
But if $T_N(x_i) = 0$ implies that $p_n - q_n = 0$ individually and therefore $\mathcal{P}_N(x) = Q_N(x)$.
### Monomial Basis
Let $\mathcal{P}_N(x) = \sum^N_{n=0} p_n x^n$, $\mathcal{P}_N(x)$ can be represented by a *linear combination* of the monomials
$$1, x, x^2, x^3, \ldots, x^{N-1}, x^N$$
with weights
$$p_0, p_1, p_2, p_3, \ldots, p_{N-1}, \text{and } p_N$$
respectively.
#### Example 3: Monomial Basis
Consider $\mathcal{P}_3(x) = p_0 + p_1 x + p_2 x^2 + p_3 x^3$ with the four data points $(x_i, y_i), ~~ i = 0,1,2,3$. We have four equations and four unknowns as expected:
$$\mathcal{P}_3(x_0) = p_0 + p_1 x_0 + p_2 x_0^2 + p_3 x_0^3 = y_0$$
$$\mathcal{P}_3(x_1) = p_0 + p_1 x_1 + p_2 x_1^2 + p_3 x_1^3 = y_1$$
$$\mathcal{P}_3(x_2) = p_0 + p_1 x_2 + p_2 x_2^2 + p_3 x_2^3 = y_2$$
$$\mathcal{P}_3(x_3) = p_0 + p_1 x_3 + p_2 x_3^2 + p_3 x_3^3 = y_3$$
Lets rewrite these as a matrix equation:
$$\vec{x} = \begin{bmatrix} x_0 \\ x_1 \\ x_2 \\ x_3 \end{bmatrix} \quad \vec{y} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix} \quad \vec{p} = \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix}$$
When we write the system in matrix/vector form the matrix that arises is called *Vandermonde* matrix:
$$
V = \begin{bmatrix}
1 & x_0 & x_0^2 & x_0^3 \\
1 & x_1 & x_1^2 & x_1^3 \\
1 & x_2 & x_2^2 & x_2^3 \\
1 & x_3 & x_3^2 & x_3^3
\end{bmatrix}.
$$
We can now write the system of linear equations as $V \vec{p} = \vec{y}$:
$$\begin{bmatrix}
1 & x_0 & x_0^2 & x_0^3 \\
1 & x_1 & x_1^2 & x_1^3 \\
1 & x_2 & x_2^2 & x_2^3 \\
1 & x_3 & x_3^2 & x_3^3
\end{bmatrix} \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix}.$$
$$\begin{bmatrix}
1 & x_0 & x_0^2 & x_0^3 \\
1 & x_1 & x_1^2 & x_1^3 \\
1 & x_2 & x_2^2 & x_2^3 \\
1 & x_3 & x_3^2 & x_3^3
\end{bmatrix} \begin{bmatrix} p_0 \\ p_1 \\ p_2 \\ p_3 \end{bmatrix} = \begin{bmatrix} y_0 \\ y_1 \\ y_2 \\ y_3 \end{bmatrix}$$
- What happens if we have redundant data? Either $(x_i, y_i)$ is repeated or for one $i$ we have two values of $y$.
- What if we have more points then the order of polynomial we want?
- How does this relate to solving the above linear system of equations?
Vandermonde matrices in general are defined as
$$V = \begin{bmatrix}
1 & x_0 & x_0^2 & \cdots & x_0^N \\
1 & x_1 & x_1^2 & \cdots & x_1^N \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & x_m & x_m^2 & \cdots & x_m^N \\
\end{bmatrix}
$$
where $V$ is a $m \times n$ matrix with points $(x_i, y_i)$ for $i = 0, 1, 2, 3, \ldots m$ and for an order $N$ polynomial $\mathcal{P}_N(x)$.
### Finding $p_i$
Finding the coefficients of $\mathcal{P}_N(x)$ can be done by solving the system outlined above. There are functions in `numpy` that can do this for us such as:
- `numpy.polyfit(x, y, x.shape[0] - 1)`
- `numpy.vander(x, N=None)` to construct the matrix and use a linear solver routine.
We can also use a different basis that might be easier to use.
### Lagrangian Basis
Given $N+1$ points $(x_0,y_0), (x_1,y_1), \ldots, (x_{N},y_{N})$ again assuming the $x_i$ are all unique, the interpolating polynomial $\mathcal{P}_N(x)$ can be written as
$$\mathcal{P}_N(x) = \sum^{N}_{i=0} y_i \ell_i(x)$$
where
$$\ell_i(x) = \prod^{N}_{j=0, j \neq i} \frac{x - x_j}{x_i - x_j} = \frac{x - x_0}{x_i - x_0} \frac{x - x_1}{x_i - x_1} \cdots \frac{x - x_{i-1}}{x_i - x_{i-1}}\frac{x - x_{i+1}}{x_i - x_{i+1}} \cdots \frac{x - x_{N}}{x_i - x_{N}}$$
Note that $\ell_i(x_i) = 1$ and $\forall j\neq i, ~~ \ell_i(x_j) = 0$.
**Example 0 Revisited** In example 0 above the linear polynomial that interpolates the coordinates $(1,3)$ and $(3,7)$ was simply stated as
$$
P_1(x) = 2(x-1)+3.
$$
Another way to look at this example is to first note that when we add two linear polynomials
the result is another linear polynomial. The first polynomial to define interpolates $(1,1)$
and $(3,0)$,
$$
\ell_0(x) = \frac{x-3}{1-3}.
$$
The second polynomial to define interpolates $(1,0)$ and $(3,1)$,
$$
\ell_1(x) = \frac{x-1}{3-1}.
$$
A linear combination of these two functions can be defined that will interpolate the points $(1,3)$ and $(3,7)$,
$$
P_1(x) = 3\cdot\ell_0(x) + 7\cdot\ell_1(x).
$$
The graphs of these functions are shown below.
```
# =============================================================
# Plot the two example basis functions in the current example
x = numpy.linspace(1.0, 3.0, 2)
fig_Ex0a = plt.figure()
axes_Ex0a = fig_Ex0a.add_subplot(1, 2, 1)
axes_Ex0a.plot(x, (x-3)/(-2),color='r', label="$\ell_{%s}(x)$" % 0)
axes_Ex0a.set_title("Lagrange Basis $\ell_0(x)$")
axes_Ex0a.set_xlabel("x")
axes_Ex0a.set_ylabel("$\ell_0(x)$")
axes_Ex0b = fig_Ex0a.add_subplot(1, 2, 2)
axes_Ex0b.plot(x, (x-1)/(2),color='b', label="$\ell_{%s}(x)$" % 1)
axes_Ex0b.set_title("Lagrange Basis $\ell_1(x)$")
axes_Ex0b.set_xlabel("x")
axes_Ex0b.set_ylabel("$\ell_1(x)$")
fig_Ex0c = plt.figure()
axes_Ex0c = fig_Ex0c.add_subplot(1, 1, 1)
axes_Ex0c.set_ylim([0,9])
axes_Ex0c.plot(x, 3*(x-3)/(-2) + 7*(x-1)/(2),color='g')
axes_Ex0c.set_title("Interpolant for (1,3) and (3,7)")
axes_Ex0c.set_xlabel("x")
axes_Ex0c.set_ylabel("Interpolant")
```
#### Example 4: $N = 1$ Lagrange Polynomial
Given 2 points $(x_0, y_0)$ and $(x_1, y_1)$ the Lagrange form of $\mathcal{P}_N(x)$ is given by
$$\ell_0(x) = \frac{x - x_1}{x_0 - x_1}$$
and
$$\ell_1(x) = \frac{x - x_0}{x_1 - x_0}$$
so that
$$\mathcal{P}_1(x) = \ell_0(x) \cdot y_0 + \ell_1(x) \cdot y_1 = \frac{x - x_1}{x_0 - x_1} \cdot y_0 + \frac{x - x_0}{x_1 - x_0} \cdot y_1$$
One important aspect of Lagrange polynomials to note is that the $\ell_i(x)$ functions are exactly 1 when $x = x_i$ and that every other $\ell_j(x)$ where $j \neq i$ is 0.
```
data = numpy.array([[-1.5, -0.5], [0.0, 0.5]])
# data = numpy.array([[-1.5, -0.5], [0.0, 0.5], [-0.5, 1.0]])
N = data.shape[0] - 1
M = data.shape[0]
x = numpy.linspace(-2.0, 2.0, 100)
# ====================================================
# Compute the Lagrange basis (\ell_i(x))
def lagrange_basis(x, data):
"""Compute Lagrange basis at x given data"""
basis = numpy.ones((data.shape[0], x.shape[0]))
for i in range(data.shape[0]):
for j in range(data.shape[0]):
if i != j:
basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
return basis
# ====================================================
# Calculate full polynomial
def poly_interpolant(x, data):
"""Compute polynomial interpolant of (x,y) using Lagrange basis"""
P = numpy.zeros(x.shape[0])
basis = lagrange_basis(x, data)
for n in range(data.shape[0]):
P += basis[n, :] * data[n, 1]
return P
# ====================================================
# Plot individual basis functions
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=8)
# Plot full polynomial P_N(x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
plt.show()
```
#### Example 5: Interpolate six points from $sin(\pi x)$
Use six points to approximate $\sin$ on the interval $x \in [-1, 1]$. What is the behavior as $N \rightarrow \infty$? Also plot the error between $f(x)$ and the interpolant $P_N(x)$.
```
num_points = 4
# num_points = 5
# num_points = 6
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = numpy.sin(2.0 * numpy.pi * data[:, 0])
N = data.shape[0] - 1 # Degree of polynomial
M = data.shape[0]
x = numpy.linspace(-1.0, 1.0, 100)
# ====================================================
# Plot individual basis functions
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=1)
# Plot full polynomial P_N(x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
axes.plot(x, numpy.sin(2.0 * numpy.pi * x), 'r--', label="True $f(x)$")
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
axes.legend(loc=1)
plt.show()
```
#### Example 6: Runge's Function
Interpolate $f(x) = \frac{1}{1 + 25 x^2}$ using 6 points of your choosing on $x \in [-1, 1]$.
Try it with 11 points.
Keep increasing the number of points and see what happens.
```
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
x = numpy.linspace(-1, 1, 100)
# x = numpy.linspace(-2, 2, 100)
num_points = 4
# num_points = 10
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
plt.show()
```
#### Example 7: Weierstrass "Monster" Function
Defined as
$$
f(x) = \sum^\infty_{n=0} a^n \cos(b^n \pi x)
$$
such that
$$
0 < a < 1 \quad \text{and} \quad a b > 1 + \frac{3\pi}{2}.
$$
This function is continuous everywhere but not differentiable anywhere.
```
def f(x, a=0.9, N=100):
summation = 0.0
b = (1.0 + 3.0 / 2.0 * numpy.pi) / a + 0.01
print(b)
for n in range(N + 1):
summation += a**n * numpy.cos(b**n * numpy.pi * x)
return summation
x = numpy.linspace(-1, 1, 1000)
# x = numpy.linspace(-2, 2, 100)
num_points = 10
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
plt.show()
```
### Rules of Thumb
- Avoid high-order interpolants when possible! Keep increasing the number of points and see what happens.
- Avoid extrapolation - Increase the range of $x$ in the above example and check how good the approximation is beyond our sampling interval
### Error Analysis
**Theorem:** Lagrange Remainder Theorem - Let $f(x) \in C^{N+1}[-1, 1]$, then
$$
f(x) = \mathcal{P}_N(x) + R_N(x)
$$
where $\mathcal{P}_N(x)$ is the interpolating polynomial and
$$
R_N(x) = Q(x) \frac{f^{(N+1)}(c)}{(N+1)!} \quad \text{with} \quad c \in [-1,1]
$$
with
$$
Q(x) = \prod^N_{i=0} (x - x_i) = (x-x_0)(x-x_1)\cdots(x-x_N) .
$$
A few things to note:
- For Taylor's theorem note that $Q(x) = (x - x_0)^{N+1}$ and the error only vanishes at $x_0$.
- For Lagrange's theorem the error vanishes at all $x_i$.
- To minimize $R_N(x)$ requires minimizing $|Q(x)|$ for $x \in [-1, 1]$.
#### Minimizing $R_N(x)$
Minimizing the error $R_N(x)$ in Lagrange's theorem is equivalent to minimizing $|Q(x)|$ for $x \in [-1, 1]$.
Minimizing error $\Leftrightarrow$ picking roots of $Q(x)$ or picking the points where the interpolant data is located. How do we this?
### Chebyshev Polynomials
*Chebyshev polynomials* $T_N(x)$ are another basis that can be used for interpolation.
First 5 polynomials
$$T_0(x) = 1$$
$$T_1(x) = x$$
$$T_2(x) = 2 x^2 - 1$$
$$T_3(x) = 4 x^3 - 3 x$$
$$T_4(x) = 8x^4 - 8x^2 + 1$$
```
def cheb_poly(x, N):
"""Compute the *N*th Chebyshev polynomial and evaluate it at *x*"""
T = numpy.empty((3, x.shape[0]))
T[0, :] = numpy.ones(x.shape)
T[1, :] = x
if N == 0:
return T[0, :]
elif N == 1:
return T[1, :]
else:
for k in range(2, N + 1):
T[2, :] = 2.0 * x * T[1, :] - T[0, :]
T[0, :] = T[1, :]
T[1, :] = T[2, :]
return T[2, :]
x = numpy.linspace(-1, 1, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
for n in range(5):
axes.plot(x, cheb_poly(x, n), label="$T_%s$" % n)
axes.set_ylim((-1.1, 1.1))
axes.set_title("Chebyshev Polynomials")
axes.set_xlabel("x")
axes.set_ylabel("$T_N(x)$")
axes.legend(loc=1)
plt.show()
```
1. Chebyshev nodes of the 1st kind (roots)
$$
x_k = \cos \left (\frac{(2 k - 1) \pi}{2 N} \right ) \quad k = 1, \ldots, N
$$
1. Chebyshev nodes of the 2nd kind (extrema)
$$
x_k = \cos \left( \frac{k \pi}{N} \right) \quad k = 0, \ldots, N
$$
```
N = 5
x_extrema = numpy.cos(numpy.arange(N + 1) * numpy.pi / N)
x_nodes = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
# Plot points
axes.plot(x_extrema, numpy.zeros(N+1), 'ro')
axes.plot(x_nodes, numpy.zeros(N), 'bo')
# Plot some helpful lines
axes.plot((-1.0, -1.0), (-1.1, 1.1), 'k--')
axes.plot((1.0, 1.0), (-1.1, 1.1), 'k--')
axes.plot((-1.0, 1.0), (0.0, 0.0), 'k--')
for i in range(x_extrema.shape[0]):
axes.plot((x_extrema[i], x_extrema[i]), (-1.1, 1.1), 'r--')
axes.plot(x_extrema[i], cheb_poly(x_extrema, N)[i], 'ro')
print(x_extrema)
print(numpy.cos(x_extrema))
# Plot Chebyshev polynomial
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
# Labels
axes.set_title("Chebyshev Nodes and Extrema", fontsize="20")
axes.set_xlabel("x", fontsize="15")
axes.set_ylabel("$T_{N+1}(x)$", fontsize="15")
plt.show()
# First-kind Nesting (3 x)
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
N = 5
factor = 3
x_1 = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
x_2 = numpy.cos((2.0 * numpy.arange(1, factor * N + 1) - 1.0) / (2.0 * factor * N) * numpy.pi)
axes.plot(x_1, numpy.zeros(N), "o", color="r", markerfacecolor="lightgray", markersize="15")
axes.plot(x_2, numpy.zeros(N * factor), 'kx', markersize="10")
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.plot(x_hat, cheb_poly(x_hat, factor * N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.set_title("Nesting of 1st and 2nd Kind Chebyshev Polynomials")
axes.set_xlabel("$x$")
axes.set_ylabel("$T_N(x)$")
plt.show()
```
#### Properties of Chebyshev Polynomials
1. Defined by a recurrence relation
$$T_k(x) = 2 x T_{k-1}(x) - T_{k-2}(x)$$
#### Properties of Chebyshev Polynomials
2. Leading coefficient of $x^N$ in $T_N(x)$ is $2^{N-1}$ for $N \geq 1$
#### Properties of Chebyshev Polynomials
3. Extreme values:
$$|T_N(x)| \leq 1 \quad \text{for} \quad -1 \leq x \leq 1$$
#### Properties of Chebyshev Polynomials
4. Minimax principle: The polynomial
$$T(x) = \frac{T_{N+1}(x)}{2^N}$$
is a *monic polynomial*, a univariate function with the leading coefficient equal to 1, with the property that
$$
\max |T(x)| \leq \max |Q(X)| \quad \text{for} \quad x \in [-1, 1], \quad \text{and}
$$
$$
\max |T(x)| = \frac{1}{2^N}
$$
Recall that the remainder term in the Lagrange Remainder Theorem was
$$
R_N(x) = Q(x) \frac{f^{(N+1)}(c)}{(N+1)!} \quad \text{with} \quad c \in [-1,1]
$$
with
$$
Q(x) = \prod^N_{i=0} (x - x_i) = (x-x_0)(x-x_1)\cdots(x-x_N) .
$$
#### Error Analysis Redux
Given that the Chebyshev polynomials are a minimum on the interval $[-1, 1]$ we would like $T(x) = Q(x)$.
Since we only know the roots of $Q(x)$ (the points where the interpolant data is located) we require these points to be the roots of the Chebyshev polynomial $T_{N+1}(x)$ therefore enforcing $T(x) = Q(x)$.
The zeros of $T_N(x)$ in the interval $[-1, 1]$ can be shown to satisfy
$$
x_k = \cos\left( \frac{(2k - 1) \pi}{2 N} \right ) \quad \text{for} \quad k=1, \ldots, N
$$
These nodal points (sampling the function at these points) can be shown to minimize interpolation error.
```
x = numpy.linspace(0, numpy.pi, 100)
N = 15
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, aspect="equal")
axes.plot(numpy.cos(x), numpy.sin(x), 'r--')
axes.plot(numpy.linspace(-1.1, 1.1, 100), numpy.zeros(x.shape), 'r')
for k in range(1, N + 1):
location = [numpy.cos((2.0 * k - 1.0) * numpy.pi / (2.0 * N)),
numpy.sin((2.0 * k - 1.0) * numpy.pi / (2.0 * N))]
axes.plot(location[0], location[1], 'ko')
axes.plot(location[0], 0.0, 'ko')
axes.plot([location[0], location[0]], [0.0, location[1]], 'k--')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-0.1, 1.1))
plt.show()
```
#### Summary
1. Minimizing the error in Lagrange's theorem is equivalent to minimizing
$$
|Q(x)| \quad \text{for} \quad x \in [-1, 1].
$$
1. We know Chebyshev polynomials are a minimum on the interval $[-1, 1]$ so we would like to have $T(x) = Q(x)$.
1. Since we only know the roots of $Q(x)$ (the points where the interpolant data is located) we require these points to be the roots of the Chebyshev polynomial $T_{N+1}(x)$ therefore enforcing $T(x) = Q(x)$.
1. The zeros of $T_N(x)$ in the interval $[-1, 1]$ can be shown to satisfy
$$
x_k = \cos\left( \frac{(2k - 1) \pi}{2 N} \right ) \quad \text{for} \quad k=1, \ldots, N
$$
These nodal points (sampling the function at these points) can be shown to minimize interpolation error.
#### Notes
- The Chebyshev nodes minimize interpolation error for any polynomial basis (due to uniqueness any polynomial that these points are identical regardless of the basis).
- Chebyshev nodes uniquely define the Chebyshev polynomials.
- The boundedness properties of Chebyshev polynomials are what lead us to the roots as a minimization but there are other used for these orthogonal polynomials.
- There are two kinds of Chebyshev nodes and therefore two definitions.
```
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
# Parameters
x = numpy.linspace(-1, 1, 100)
num_points = 25
# ============================================================
# Equidistant nodes
equidistant_data = numpy.empty((num_points, 2))
equidistant_data[:, 0] = numpy.linspace(-1, 1, num_points)
equidistant_data[:, 1] = f(equidistant_data[:, 0])
N = equidistant_data.shape[0] - 1
P_lagrange = poly_interpolant(x, equidistant_data)
# ============================================================
# Chebyshev nodes
chebyshev_data = numpy.empty((num_points, 2))
chebyshev_data[:, 0] = numpy.cos((2.0 * numpy.arange(1, num_points + 1) - 1.0) * numpy.pi / (2.0 * num_points))
chebyshev_data[:, 1] = f(chebyshev_data[:, 0])
P_cheby1 = poly_interpolant(x, chebyshev_data)
# Fit directly with Chebyshev polynomials
coeff = numpy.polynomial.chebyshev.chebfit(chebyshev_data[:, 0], chebyshev_data[:, 1], N)
P_cheby2 = numpy.polynomial.chebyshev.chebval(x, coeff)
# Check on unique polynomials
print(numpy.allclose(P_cheby1, P_cheby2))
# ============================================================
# Plot the results
fig = plt.figure()
fig.subplots_adjust(hspace=.5)
axes = fig.add_subplot(2, 1, 1)
axes.plot(x, P_lagrange, 'b', label="$P_%s(x)$" % N)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(equidistant_data[:, 0], equidistant_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function at Equispaced Points")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=8)
print(numpy.linalg.norm(P_lagrange - f(x)))
axes = fig.add_subplot(2, 1, 2)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(x, P_cheby1, 'b', label="$P_%s(x)$" % N)
axes.plot(chebyshev_data[:, 0], chebyshev_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function at Chebyshev Points")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
print(numpy.linalg.norm(P_cheby1 - f(x)))
plt.show()
```
## Piece-Wise Polynomial Interpolation
Given $N$ points, use lower order polynomial interpolation to fit the function in pieces. We can choose the order of the polynomials and the continuity.
- $C^0$: Interpolant is continuous
- Linear interpolation
- Quadratic interpolation
- $C^1$: Interpolation and 1st derivative are continuous
- Cubic Hermite polynomials (PCHiP)
- $C^2$: Interpolation, 1st and 2nd derivatives are continuous
- Cubic splines
### Piece-Wise Linear
Given a segment between point $(x_k, y_k)$a nd $(x_{k+1}, y_{k+1})$ define the segment as
$$\mathcal{P}_k(x) = \frac{y_{k+1} - y_k}{x_{k+1} - x_k} (x - x_k) + y_k$$
The final interpolant $\mathcal{P}(x)$ is then defined on $[x_k, x_{k+1}]$ using this function.
```
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# Lagrange Basis
P_lagrange = poly_interpolant(x, data)
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_linear, 'r')
axes.set_title("Interpolated Data - $C^0$ Linear")
axes.set_xlabel("x")
axes.set_ylabel("$P_1(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
```
### Piece-Wise Overlapping Polynomials
In sets of three points $(x_{k+1}, y_{k+1})$, $(x_{k}, y_{k})$, and $(x_{k-1}, y_{k-1})$, find quadratic interpolant and define final interpolant $P(x)$ using the quadratic interpolant $\mathcal{P}_k(x)$ on $[x_{k-1}, x_{k+1}]$.
```
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_quadratic, 'r')
axes.set_title("Interpolated Data - $C^0$ Quadratic")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
```
### Piece-Wise $C^1$ Cubic Interpolation
For the previous two cases we had discontinous 1st derivatives! We can make this better by constraining the polynomials to be continuous at the boundaries of the piece-wise intervals.
Given a segment between points $(x_k, y_k)$ and $(x_{k+1}, y_{k+1})$ we want to fit a cubic function between the two points.
$$\mathcal{P}_k(x) = p_0 + p_1 x + p_2 x^2 + p_3 x^3$$
$$\mathcal{P}_k(x_k) = y_k, \quad \mathcal{P}_k(x_{k+1}) = y_{k+1}$$
Now we have 4 unknowns but only two data points! Constraining the derivative at each interval end will lead to two new equations and therefore we can solve for the interpolant.
$$\frac{\text{d}}{\text{dx}} \mathcal{P}_k(x_k) = d_k, \quad \frac{\text{d}}{\text{dx}} \mathcal{P}_k(x_{k+1}) = d_{k+1}$$
where we need to prescribe the $d_k$s. Since we know the polynomial we can write these 4 equations as
$$\begin{aligned}
p_0 + p_1 x_k + p_2 x_k^2 + p_3 x_k^3 &= y_k \\
p_0 + p_1 x_{k+1} + p_2 x_{k+1}^2 + p_3 x_{k+1}^3 &= y_{k+1} \\
p_1 + 2p_2 x_k + 3 p_3 x_k^2 &= d_k \\
p_1 + 2 p_2 x_{k+1} + 3 p_3 x_{k+1}^2 &= d_{k+1}
\end{aligned}$$
Rewriting this as a system we get
$$\begin{bmatrix}
1 & x_k & x_k^2 & x_k^3 \\
1 & x_{k+1} & x_{k+1}^2 & x_{k+1}^3 \\
0 & 1 & 2 x_k & 3 x_k^2 \\
0 & 1 & 2 x_{k+1} & 3 x_{k+1}^2
\end{bmatrix} \begin{bmatrix}
p_0 \\ p_1 \\ p_2 \\ p_3
\end{bmatrix} = \begin{bmatrix}
y_k \\ y_{k+1} \\ d_k \\ d_{k+1}
\end{bmatrix}$$
A common simplification to the problem description re-parameterizes the locations of the points such that $s \in [0, 1]$ and recast the problem with $(0, y_k)$ and $(1, y_{k+1})$. This simplifies the above system to
$$\begin{bmatrix}
1 & 0 & 0 & 0 \\
1 & 1 & 1 & 1 \\
0 & 1 & 0 & 0 \\
0 & 1 & 2 & 3
\end{bmatrix} \begin{bmatrix}
p_0 \\ p_1 \\ p_2 \\ p_3
\end{bmatrix} = \begin{bmatrix}
y_k \\ y_{k+1} \\ d_k \\ d_{k+1}
\end{bmatrix}$$
which can be solved to find
$$\begin{aligned}
\mathcal{P}(s) &= (1-s)^2 (1 + 2s) y_k + s^2 (3 - 2 s) y_{k+1} + s (1 - s)^2 d_k - s^2 (1 - s)d_{k+1}\\
\mathcal{P}'(s) &= 6s(s-1) y_k + 6s(1-s) y_{k+1} + (s-1)(3s-1) d_k - s(3s-2) d_{k+1}\\
\mathcal{P}''(s) &= 6 (1-2s)(y_{k+1} - y_k) + (6s - 4) d_k + (6s-2) d_{k+1}
\end{aligned}$$
Now, how to choose $d_k$?
#### PCHIP
Piecewise Cubic Hermite Interpolation Polynomial
- Picks the slope that preserves monotonicity
- Also tried to preserve the shape of the data
- Note that in general this interpolant is $\mathcal{P}_k(x) \in C^1$
```
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_pchip, 'r')
axes.set_title("Interpolated Data - $C^1$ Cubic PCHIP")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
```
#### Cubic Splines
Enfores continuity on second derivatives as well:
$$\mathcal{P}''_{k}(x_{k}) = \mathcal{P}''_{k-1}(x_k)$$
From our generalization before we know
$$\mathcal{P}''(s) = 6 (1-2s)(y_{k+1} - y_k) + (6s - 4) d_k + (6s-2) d_{k+1}$$
and our constraint now becomes
$$\mathcal{P}''_{k}(0) = \mathcal{P}''_{k-1}(1)$$
$$\mathcal{P}''_{k-1}(1) = 6 (1-2 \cdot 1)(y_{k} - y_{k-1}) + (6\cdot 1 - 4) d_{k-1} + (6\cdot 1-2) d_{k}$$
$$\mathcal{P}''_{k}(0) = 6 (1-2 \cdot 0)(y_{k+1} - y_k) + (6\cdot 0 - 4) d_k + (6\cdot 0-2) d_{k+1}$$
$$-6(y_{k} - y_{k-1}) + 2 d_{k-1} + 4 d_{k} = 6 (y_{k+1} - y_k) - 4 d_k -2 d_{k+1}$$
We now have constraints on choosing the $d_k$ values. Note that we still need to prescribe them at the boundaries of the full interval.
This forms a linear set of equations for the $d_k$s based on the $y_k$ values and can be reformulated into a tri-diagonal linear system
$$\begin{bmatrix}
& \ddots & \ddots & \ddots & & &\\
& 0 & 2 & 8 & 2 & 0 & & \\
& & 0 & 2 & 8 & 2 & 0 & & & \\
& & & 0 & 2 & 8 & 2 & 0 & & \\
& & & & & \ddots & \ddots & \ddots &
\end{bmatrix}\begin{bmatrix}
\vdots \\ d_{k-1} \\ d_{k} \\ d_{k+1} \\ \vdots
\end{bmatrix} = \begin{bmatrix}
\vdots \\ 6 (y_{k} - y_{k-2}) \\ 6 (y_{k+1} - y_{k-1}) \\ 6 (y_{k+2} - y_{k}) \\\vdots
\end{bmatrix}$$
The boundaries are still left unconstrained and we must pick some rule to specify the derivatives there.
```
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^2 Piece-wise Splines
# Note that to get an interpolant we need to set the smoothing
# parameters *s* to 0
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_spline(x), 'r')
axes.set_title("Interpolated Data - $C^2$ Cubic Splines")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# Lagrange Basis
N = data.shape[0] - 1
lagrange_basis = numpy.ones((N + 1, x.shape[0]))
for i in range(N + 1):
for j in range(N + 1):
if i != j:
lagrange_basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
# Calculate full polynomial
P_lagrange = numpy.zeros(x.shape[0])
for n in range(N + 1):
P_lagrange += lagrange_basis[n, :] * data[n, 1]
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# C^2 Piece-wise Splines
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko', label="Data")
axes.plot(x, P_lagrange, 'y', label="Lagrange")
axes.plot(x, P_linear, 'g', label="PW Linear")
axes.plot(x, P_quadratic, 'r', label="PW Quadratic")
axes.plot(x, P_pchip, 'c', label="PW Cubic - PCHIP")
axes.plot(x, P_spline(x), 'b', label="PW Cubic - Spline")
axes.set_title("Interpolated Data - Method Comparisons")
axes.set_xlabel("x")
axes.set_ylabel("$P(x)$")
axes.legend()
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
```
## Relationship to Regression
What if we have more data and want a lower degree polynomial but do not want to use a piece-wise defined interpolant?
Regression techniques are often used to minimize a form of error between the data points $y_i$ at $x_i$ with an approximating function $f(x_i)$. Note that this is NOT interpolation anymore!
### Least-Squares
One way of doing this is to require that we minimize the least-squares error
$$
E = \left( \sum^m_{i=1} |y_i - f(x_i)|^2 \right )^{1/2}.
$$
where as before we have data $y_i$ at locations $x_i$ and an approximating function $f(x_i)$.
From the beginning of our discussion we know we can write the interpolant as a system of linear equations which we can then solve for the coefficients of a monomial basis. If we wanted to fit a line
$$
\mathcal{P}_1(x) = p_0 + p_1 x
$$
to $N$ data points we would have
$$
\begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
\vdots & \vdots \\
1 & x_N
\end{bmatrix} \begin{bmatrix}
p_0 \\ p_1
\end{bmatrix} = \begin{bmatrix}
y_1 \\ y_2 \\ \vdots \\ y_N
\end{bmatrix}
$$
or
$$
A p = y
$$
What's wrong with this system?
This leads to the likelihood that there is no solution to the system as
$$
A \in \mathbb{R}^{N \times 2}, p \in \mathbb{R}^{2 \times 1}, \text{ and } y \in \mathbb{R}^{N \times 1}.
$$
Instead we can solve the related least-squares system
$$
A^T A p = A^T y
$$
whose solution minimizes the least-square error defined before as $E$.
```
# Linear Least Squares Problem
N = 50
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
A = numpy.ones((x.shape[0], 2))
A[:, 1] = x
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
# p = numpy.linalg.lstsq(A, y)[0]
f = lambda x: p[0] + p[1] * x
E = numpy.linalg.norm(y - f(x), ord=2)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data")
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
plt.show()
print("E = %s" % E)
# Linear Least Squares Problem
N = 50
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
A = numpy.ones((x.shape[0], 2))
A[:, 1] = numpy.tanh(x)
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
# p = numpy.linalg.lstsq(A, y)[0]
f = lambda x: p[0] + p[1] * numpy.tanh(x)
E = numpy.linalg.norm(y - f(x), ord=2)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data")
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
plt.show()
print("E = %s" % E)
```
### Let ye be warned...

(Original image can be found at [Curve Fitting](https://xkcd.com/2048/).)
|
github_jupyter
|
from __future__ import print_function
from __future__ import absolute_import
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
# =============================================================
# Plot the two example basis functions in the current example
x = numpy.linspace(1.0, 3.0, 2)
fig_Ex0a = plt.figure()
axes_Ex0a = fig_Ex0a.add_subplot(1, 2, 1)
axes_Ex0a.plot(x, (x-3)/(-2),color='r', label="$\ell_{%s}(x)$" % 0)
axes_Ex0a.set_title("Lagrange Basis $\ell_0(x)$")
axes_Ex0a.set_xlabel("x")
axes_Ex0a.set_ylabel("$\ell_0(x)$")
axes_Ex0b = fig_Ex0a.add_subplot(1, 2, 2)
axes_Ex0b.plot(x, (x-1)/(2),color='b', label="$\ell_{%s}(x)$" % 1)
axes_Ex0b.set_title("Lagrange Basis $\ell_1(x)$")
axes_Ex0b.set_xlabel("x")
axes_Ex0b.set_ylabel("$\ell_1(x)$")
fig_Ex0c = plt.figure()
axes_Ex0c = fig_Ex0c.add_subplot(1, 1, 1)
axes_Ex0c.set_ylim([0,9])
axes_Ex0c.plot(x, 3*(x-3)/(-2) + 7*(x-1)/(2),color='g')
axes_Ex0c.set_title("Interpolant for (1,3) and (3,7)")
axes_Ex0c.set_xlabel("x")
axes_Ex0c.set_ylabel("Interpolant")
data = numpy.array([[-1.5, -0.5], [0.0, 0.5]])
# data = numpy.array([[-1.5, -0.5], [0.0, 0.5], [-0.5, 1.0]])
N = data.shape[0] - 1
M = data.shape[0]
x = numpy.linspace(-2.0, 2.0, 100)
# ====================================================
# Compute the Lagrange basis (\ell_i(x))
def lagrange_basis(x, data):
"""Compute Lagrange basis at x given data"""
basis = numpy.ones((data.shape[0], x.shape[0]))
for i in range(data.shape[0]):
for j in range(data.shape[0]):
if i != j:
basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
return basis
# ====================================================
# Calculate full polynomial
def poly_interpolant(x, data):
"""Compute polynomial interpolant of (x,y) using Lagrange basis"""
P = numpy.zeros(x.shape[0])
basis = lagrange_basis(x, data)
for n in range(data.shape[0]):
P += basis[n, :] * data[n, 1]
return P
# ====================================================
# Plot individual basis functions
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=8)
# Plot full polynomial P_N(x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
plt.show()
num_points = 4
# num_points = 5
# num_points = 6
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = numpy.sin(2.0 * numpy.pi * data[:, 0])
N = data.shape[0] - 1 # Degree of polynomial
M = data.shape[0]
x = numpy.linspace(-1.0, 1.0, 100)
# ====================================================
# Plot individual basis functions
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
basis = lagrange_basis(x, data)
for i in range(N + 1):
axes.plot(x, basis[i, :], label="$\ell_{%s}(x)$" % i)
axes.set_title("Lagrange Basis $\ell_i(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$\ell_i(x)$")
axes.legend(loc=1)
# Plot full polynomial P_N(x)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), label="$P_{%s}(x)$" % N)
axes.plot(x, numpy.sin(2.0 * numpy.pi * x), 'r--', label="True $f(x)$")
for point in data:
axes.plot(point[0], point[1], 'ko')
axes.set_title("$P_N(x)$")
axes.set_xlabel("x")
axes.set_ylabel("$P_N(x)$")
axes.legend(loc=1)
plt.show()
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
x = numpy.linspace(-1, 1, 100)
# x = numpy.linspace(-2, 2, 100)
num_points = 4
# num_points = 10
# num_points = 20
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
plt.show()
def f(x, a=0.9, N=100):
summation = 0.0
b = (1.0 + 3.0 / 2.0 * numpy.pi) / a + 0.01
print(b)
for n in range(N + 1):
summation += a**n * numpy.cos(b**n * numpy.pi * x)
return summation
x = numpy.linspace(-1, 1, 1000)
# x = numpy.linspace(-2, 2, 100)
num_points = 10
data = numpy.empty((num_points, 2))
data[:, 0] = numpy.linspace(-1, 1, num_points)
data[:, 1] = f(data[:, 0])
N = data.shape[0] - 1
# Plot the results
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, poly_interpolant(x, data), 'b', label="$P_6(x)$")
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(data[:, 0], data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
plt.show()
def cheb_poly(x, N):
"""Compute the *N*th Chebyshev polynomial and evaluate it at *x*"""
T = numpy.empty((3, x.shape[0]))
T[0, :] = numpy.ones(x.shape)
T[1, :] = x
if N == 0:
return T[0, :]
elif N == 1:
return T[1, :]
else:
for k in range(2, N + 1):
T[2, :] = 2.0 * x * T[1, :] - T[0, :]
T[0, :] = T[1, :]
T[1, :] = T[2, :]
return T[2, :]
x = numpy.linspace(-1, 1, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
for n in range(5):
axes.plot(x, cheb_poly(x, n), label="$T_%s$" % n)
axes.set_ylim((-1.1, 1.1))
axes.set_title("Chebyshev Polynomials")
axes.set_xlabel("x")
axes.set_ylabel("$T_N(x)$")
axes.legend(loc=1)
plt.show()
N = 5
x_extrema = numpy.cos(numpy.arange(N + 1) * numpy.pi / N)
x_nodes = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
# Plot points
axes.plot(x_extrema, numpy.zeros(N+1), 'ro')
axes.plot(x_nodes, numpy.zeros(N), 'bo')
# Plot some helpful lines
axes.plot((-1.0, -1.0), (-1.1, 1.1), 'k--')
axes.plot((1.0, 1.0), (-1.1, 1.1), 'k--')
axes.plot((-1.0, 1.0), (0.0, 0.0), 'k--')
for i in range(x_extrema.shape[0]):
axes.plot((x_extrema[i], x_extrema[i]), (-1.1, 1.1), 'r--')
axes.plot(x_extrema[i], cheb_poly(x_extrema, N)[i], 'ro')
print(x_extrema)
print(numpy.cos(x_extrema))
# Plot Chebyshev polynomial
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
# Labels
axes.set_title("Chebyshev Nodes and Extrema", fontsize="20")
axes.set_xlabel("x", fontsize="15")
axes.set_ylabel("$T_{N+1}(x)$", fontsize="15")
plt.show()
# First-kind Nesting (3 x)
fig = plt.figure()
# fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 1, 1)
N = 5
factor = 3
x_1 = numpy.cos((2.0 * numpy.arange(1, N + 1) - 1.0) / (2.0 * N) * numpy.pi)
x_2 = numpy.cos((2.0 * numpy.arange(1, factor * N + 1) - 1.0) / (2.0 * factor * N) * numpy.pi)
axes.plot(x_1, numpy.zeros(N), "o", color="r", markerfacecolor="lightgray", markersize="15")
axes.plot(x_2, numpy.zeros(N * factor), 'kx', markersize="10")
x_hat = numpy.linspace(-1, 1, 1000)
axes.plot(x_hat, cheb_poly(x_hat, N), 'k')
axes.plot(x_hat, cheb_poly(x_hat, factor * N), 'k')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.set_title("Nesting of 1st and 2nd Kind Chebyshev Polynomials")
axes.set_xlabel("$x$")
axes.set_ylabel("$T_N(x)$")
plt.show()
x = numpy.linspace(0, numpy.pi, 100)
N = 15
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1, aspect="equal")
axes.plot(numpy.cos(x), numpy.sin(x), 'r--')
axes.plot(numpy.linspace(-1.1, 1.1, 100), numpy.zeros(x.shape), 'r')
for k in range(1, N + 1):
location = [numpy.cos((2.0 * k - 1.0) * numpy.pi / (2.0 * N)),
numpy.sin((2.0 * k - 1.0) * numpy.pi / (2.0 * N))]
axes.plot(location[0], location[1], 'ko')
axes.plot(location[0], 0.0, 'ko')
axes.plot([location[0], location[0]], [0.0, location[1]], 'k--')
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-0.1, 1.1))
plt.show()
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
# Parameters
x = numpy.linspace(-1, 1, 100)
num_points = 25
# ============================================================
# Equidistant nodes
equidistant_data = numpy.empty((num_points, 2))
equidistant_data[:, 0] = numpy.linspace(-1, 1, num_points)
equidistant_data[:, 1] = f(equidistant_data[:, 0])
N = equidistant_data.shape[0] - 1
P_lagrange = poly_interpolant(x, equidistant_data)
# ============================================================
# Chebyshev nodes
chebyshev_data = numpy.empty((num_points, 2))
chebyshev_data[:, 0] = numpy.cos((2.0 * numpy.arange(1, num_points + 1) - 1.0) * numpy.pi / (2.0 * num_points))
chebyshev_data[:, 1] = f(chebyshev_data[:, 0])
P_cheby1 = poly_interpolant(x, chebyshev_data)
# Fit directly with Chebyshev polynomials
coeff = numpy.polynomial.chebyshev.chebfit(chebyshev_data[:, 0], chebyshev_data[:, 1], N)
P_cheby2 = numpy.polynomial.chebyshev.chebval(x, coeff)
# Check on unique polynomials
print(numpy.allclose(P_cheby1, P_cheby2))
# ============================================================
# Plot the results
fig = plt.figure()
fig.subplots_adjust(hspace=.5)
axes = fig.add_subplot(2, 1, 1)
axes.plot(x, P_lagrange, 'b', label="$P_%s(x)$" % N)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(equidistant_data[:, 0], equidistant_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function at Equispaced Points")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=8)
print(numpy.linalg.norm(P_lagrange - f(x)))
axes = fig.add_subplot(2, 1, 2)
axes.plot(x, f(x), 'k', label="True $f(x)$")
axes.plot(x, P_cheby1, 'b', label="$P_%s(x)$" % N)
axes.plot(chebyshev_data[:, 0], chebyshev_data[:, 1], 'ro', label="data")
axes.set_title("Interpolation of Runge's function at Chebyshev Points")
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.legend(loc=1)
print(numpy.linalg.norm(P_cheby1 - f(x)))
plt.show()
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# Lagrange Basis
P_lagrange = poly_interpolant(x, data)
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_linear, 'r')
axes.set_title("Interpolated Data - $C^0$ Linear")
axes.set_xlabel("x")
axes.set_ylabel("$P_1(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
N = data.shape[0] - 1
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko')
axes.plot(x, P_lagrange, 'b--')
axes.plot(x, P_quadratic, 'r')
axes.set_title("Interpolated Data - $C^0$ Quadratic")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_pchip, 'r')
axes.set_title("Interpolated Data - $C^1$ Cubic PCHIP")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# C^2 Piece-wise Splines
# Note that to get an interpolant we need to set the smoothing
# parameters *s* to 0
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ro')
axes.plot(x, P_spline(x), 'r')
axes.set_title("Interpolated Data - $C^2$ Cubic Splines")
axes.set_xlabel("x")
axes.set_ylabel("$P_3(x)$")
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
import scipy.interpolate as interpolate
data = numpy.array([[1.0, 3.0], [2.0, 1.0], [3.5, 4.0], [5.0, 0.0], [6.0, 0.5], [9.0, -2.0], [9.5, -3.0]])
x = numpy.linspace(0.0, 10, 100)
# Lagrange Basis
N = data.shape[0] - 1
lagrange_basis = numpy.ones((N + 1, x.shape[0]))
for i in range(N + 1):
for j in range(N + 1):
if i != j:
lagrange_basis[i, :] *= (x - data[j, 0]) / (data[i, 0] - data[j, 0])
# Calculate full polynomial
P_lagrange = numpy.zeros(x.shape[0])
for n in range(N + 1):
P_lagrange += lagrange_basis[n, :] * data[n, 1]
# C^0 Piece-wise linear
# P_pw_linear = numpy.interp(x, data[:, 0], data[:, 1])
P_linear = numpy.zeros(x.shape)
for n in range(1, N + 1):
P_linear += ((data[n, 1] - data[n - 1, 1]) / (data[n, 0] - data[n - 1, 0]) * (x - data[n - 1, 0])
+ data[n - 1, 1]) * (x > data[n - 1, 0]) * (x <= data[n, 0])
# Add end points for continuity
P_linear += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_linear += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^0 Piece-wise quadratic
P_quadratic = numpy.zeros(x.shape)
for k in range(1, N + 1, 2):
p = numpy.polyfit(data[k - 1:k + 2, 0], data[k - 1:k + 2, 1], 2)
P_quadratic += numpy.polyval(p, x) * (x > data[k - 1, 0]) * (x <= data[k + 1, 0])
# Add end points for continuity
P_quadratic += numpy.ones(x.shape) * data[0, 1] * (x < data[0, 0])
P_quadratic += numpy.ones(x.shape) * data[-1, 1] * (x >= data[-1, 0])
# C^1 Piece-wise PCHIP
P_pchip = interpolate.pchip_interpolate(data[:, 0], data[:, 1], x)
# C^2 Piece-wise Splines
P_spline = interpolate.UnivariateSpline(data[:, 0], data[:, 1], s=0)
# Plot
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(data[:,0], data[:,1], 'ko', label="Data")
axes.plot(x, P_lagrange, 'y', label="Lagrange")
axes.plot(x, P_linear, 'g', label="PW Linear")
axes.plot(x, P_quadratic, 'r', label="PW Quadratic")
axes.plot(x, P_pchip, 'c', label="PW Cubic - PCHIP")
axes.plot(x, P_spline(x), 'b', label="PW Cubic - Spline")
axes.set_title("Interpolated Data - Method Comparisons")
axes.set_xlabel("x")
axes.set_ylabel("$P(x)$")
axes.legend()
axes.set_xlim([0.0, 10.0])
axes.set_ylim([-4.0, 15.0])
plt.show()
# Linear Least Squares Problem
N = 50
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
A = numpy.ones((x.shape[0], 2))
A[:, 1] = x
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
# p = numpy.linalg.lstsq(A, y)[0]
f = lambda x: p[0] + p[1] * x
E = numpy.linalg.norm(y - f(x), ord=2)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data")
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
plt.show()
print("E = %s" % E)
# Linear Least Squares Problem
N = 50
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
A = numpy.ones((x.shape[0], 2))
A[:, 1] = numpy.tanh(x)
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
# p = numpy.linalg.lstsq(A, y)[0]
f = lambda x: p[0] + p[1] * numpy.tanh(x)
E = numpy.linalg.norm(y - f(x), ord=2)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, f(x), 'r')
axes.set_title("Least Squares Fit to Data")
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
plt.show()
print("E = %s" % E)
| 0.843283 | 0.993372 |
```
from gensim.models import FastText
model = FastText.load_fasttext_format("./model/fasttext.bin")
from konlpy.tag import Kkma
from konlpy.tag import Okt
kkma = Kkma()
okt = Okt()
kkma.pos("아버지가방에 들어가신다")
okt.pos("맛있고 춥고 더러워요", stem=True)
model.wv.distance("아이즈원", "최예나")
posed = okt.pos('생각보다 사람들도 많았고 넓어서 놀랐어용! 외관에서부터 풍기는 아늑한분위기와 딱 안성맞춤의 인테리어로 쾌적하고 좋더라구요~', stem=True, norm=True)
posed
adj = [i for i in posed if i[1]=='Adjective']
adj
POSED = kkma.pos("생각보다 사람들도 많았고 넓어서 놀랐어용! 외관에서부터 풍기는 아늑한분위기와 딱 안성맞춤의 인테리어로 쾌적하고 좋더라구요~")
POSED
adj = []
cnt = 0
floor =[]
stair =[]
cnt2=0
cnt3=0
MAX=0
last=[]
# XR이나 VA찾는거
for index,i in enumerate(POSED):
if i[1]=='XR' or i[1]=='VA':
adj.append(i)
floor.insert(cnt,index)
cnt+=1
print(floor)
# NNG 찾는거
for index,i in enumerate(POSED):
if i[1] =='NNG':
stair.insert(cnt2,index)
cnt2+=1
print(stair)
# 마지막거
for index,i in enumerate(adj):
if i[1]=='VA':
check=floor[index]
print(check)
for index2,j in enumerate(stair):
if j < check:
cnt3+=1
MAX=stair[index2]
last.append(POSED[MAX][0]+ " " +POSED[check][0]+"음")
print(adj)
print(last)
# XR 빼내서 XR[] 배열에 저장
xr=[]
va=[] # va + nng
vanng=[]
for index,i in enumerate(POSED):
if i[1]=='XR':
xr.append(i[0])
# VA 과 NNG 만빼내서 저장
for index,i in enumerate(POSED):
if i[1]=='NNG' or i[1]=='VA':
va.append(i)
# VA 인덱스의 -1 번째 항이 NNG 일경우 합친것을 다른 배열에 저장.
for index,i in enumerate(va):
if va[index][1] == 'VA':
if index == 0:
continue
elif va[index-1][1]=='NNG':
vanng.append(va[index-1][0] + " " + va[index][0]+"음")
# 만약 그렇지 않을 경우 패스// EX) 넓음
# XR[] 과 VA[] 합치기
xr_va = xr + vanng
print(xr_va)
def tag_parser(sentence):
paser = kkma.pos(sentence)
# XR 빼내서 XR[] 배열에 저장
xr=[]
va=[] # va + nng
vanng=[]
for index,i in enumerate(paser):
if i[1]=='XR':
xr.append(i[0])
# VA 과 NNG 만빼내서 저장
for index,i in enumerate(paser):
if i[1]=='NNG' or i[1]=='VA':
va.append(i)
# VA 인덱스의 -1 번째 항이 NNG 일경우 합친것을 다른 배열에 저장.
for index,i in enumerate(va):
if va[index][1] == 'VA':
if index == 0:
continue
elif va[index-1][1]=='NNG':
vanng.append(va[index-1][0] + " " + va[index][0]+"음")
# 만약 그렇지 않을 경우 패스// EX) 넓음
# XR[] 과 VA[] 합치기
return xr + vanng
tag_parser("5천원짜리 해장국. 예전엔 짜투리고기도 몇점 들어가고 가성비 좋았는데 이제는 그냥 소기름 넣은 된장국에 선지만 들어간 해장국 주인도 바뀐건지 종업원이 싸가지가 없는건지 손님을 개취급해서 다시는 가고 싶지 않다")
pos=kkma.pos("5천원짜리 해장국. 예전엔 짜투리고기도 몇점 들어가고 가성비 좋았는데 이제는 그냥 소기름 넣은 된장국에 선지만 들어간 해장국 주인도 바뀐건지 종업원이 싸가지가 없는건지 손님을 개취급해서 다시는 가고 싶지 않다")
pos
```
|
github_jupyter
|
from gensim.models import FastText
model = FastText.load_fasttext_format("./model/fasttext.bin")
from konlpy.tag import Kkma
from konlpy.tag import Okt
kkma = Kkma()
okt = Okt()
kkma.pos("아버지가방에 들어가신다")
okt.pos("맛있고 춥고 더러워요", stem=True)
model.wv.distance("아이즈원", "최예나")
posed = okt.pos('생각보다 사람들도 많았고 넓어서 놀랐어용! 외관에서부터 풍기는 아늑한분위기와 딱 안성맞춤의 인테리어로 쾌적하고 좋더라구요~', stem=True, norm=True)
posed
adj = [i for i in posed if i[1]=='Adjective']
adj
POSED = kkma.pos("생각보다 사람들도 많았고 넓어서 놀랐어용! 외관에서부터 풍기는 아늑한분위기와 딱 안성맞춤의 인테리어로 쾌적하고 좋더라구요~")
POSED
adj = []
cnt = 0
floor =[]
stair =[]
cnt2=0
cnt3=0
MAX=0
last=[]
# XR이나 VA찾는거
for index,i in enumerate(POSED):
if i[1]=='XR' or i[1]=='VA':
adj.append(i)
floor.insert(cnt,index)
cnt+=1
print(floor)
# NNG 찾는거
for index,i in enumerate(POSED):
if i[1] =='NNG':
stair.insert(cnt2,index)
cnt2+=1
print(stair)
# 마지막거
for index,i in enumerate(adj):
if i[1]=='VA':
check=floor[index]
print(check)
for index2,j in enumerate(stair):
if j < check:
cnt3+=1
MAX=stair[index2]
last.append(POSED[MAX][0]+ " " +POSED[check][0]+"음")
print(adj)
print(last)
# XR 빼내서 XR[] 배열에 저장
xr=[]
va=[] # va + nng
vanng=[]
for index,i in enumerate(POSED):
if i[1]=='XR':
xr.append(i[0])
# VA 과 NNG 만빼내서 저장
for index,i in enumerate(POSED):
if i[1]=='NNG' or i[1]=='VA':
va.append(i)
# VA 인덱스의 -1 번째 항이 NNG 일경우 합친것을 다른 배열에 저장.
for index,i in enumerate(va):
if va[index][1] == 'VA':
if index == 0:
continue
elif va[index-1][1]=='NNG':
vanng.append(va[index-1][0] + " " + va[index][0]+"음")
# 만약 그렇지 않을 경우 패스// EX) 넓음
# XR[] 과 VA[] 합치기
xr_va = xr + vanng
print(xr_va)
def tag_parser(sentence):
paser = kkma.pos(sentence)
# XR 빼내서 XR[] 배열에 저장
xr=[]
va=[] # va + nng
vanng=[]
for index,i in enumerate(paser):
if i[1]=='XR':
xr.append(i[0])
# VA 과 NNG 만빼내서 저장
for index,i in enumerate(paser):
if i[1]=='NNG' or i[1]=='VA':
va.append(i)
# VA 인덱스의 -1 번째 항이 NNG 일경우 합친것을 다른 배열에 저장.
for index,i in enumerate(va):
if va[index][1] == 'VA':
if index == 0:
continue
elif va[index-1][1]=='NNG':
vanng.append(va[index-1][0] + " " + va[index][0]+"음")
# 만약 그렇지 않을 경우 패스// EX) 넓음
# XR[] 과 VA[] 합치기
return xr + vanng
tag_parser("5천원짜리 해장국. 예전엔 짜투리고기도 몇점 들어가고 가성비 좋았는데 이제는 그냥 소기름 넣은 된장국에 선지만 들어간 해장국 주인도 바뀐건지 종업원이 싸가지가 없는건지 손님을 개취급해서 다시는 가고 싶지 않다")
pos=kkma.pos("5천원짜리 해장국. 예전엔 짜투리고기도 몇점 들어가고 가성비 좋았는데 이제는 그냥 소기름 넣은 된장국에 선지만 들어간 해장국 주인도 바뀐건지 종업원이 싸가지가 없는건지 손님을 개취급해서 다시는 가고 싶지 않다")
pos
| 0.14143 | 0.496704 |
# Cholangiocarcinoma (CHOL)
[Jump to the urls to download the GCT and CLS files](#Downloads)
<p><strong>Authors:</strong> Alejandra Ramos, Marylu Villa, and Edwin Juarez</p>
<p><strong>Contact info:</strong> Email Edwin at [email protected] or post a question in <a href="http://www.genepattern.org/help" target="_blank">http://www.genepattern.org/help</a></p>
This notebook provides the steps to download all the CHOL samples (RNA-Seq) from The Cancer Genome Atlas (TCGA) contained in the Genomic Data Commons (GDC) Data portal. These samples can be downloaded as a GCT file and phenotype labels (primary tumor vs normal samples) can be downloaded as a CLS file. These files are compatible with other GenePattern Analyses.

# Overview
<p><em>Bile duct cancer</em> arises from the cells that line the bile ducts, the drainage system for bile that is produced by the liver. Bile ducts collect this bile, draining it into the gallbladder and finally into the small intestine where it aids in the digestion process. Bile duct cancer is also called cholangiocarcinoma.</p>
## CHOL Statistics
<ul>
Bile duct cancer is a rare form of cancer, with approximately 2,500 new cases diagnosed in the United States each year.</li>
</ul>
<p><img alt="Incidence of liver and intrahepatic bile duct cancer in South Texas by age group and race/ethnicity, 2005â2009. Source : Texas Cancer Registry, Cancer Epidemiology and Surveillance Branch, Texas Department of State Health Services  " src="https://www.researchgate.net/profile/Amelie_Ramirez/publication/300288976/figure/fig8/AS:351568613396499@1460831897168/Incidence-of-liver-and-intrahepatic-bile-duct-cancer-in-South-Texas-by-age-group-and.png" /></p>
<p>Incidence of liver and intrahepatic bile duct cancer in South Texas by age group and race/ethnicity, 2005–2009.</p>
<p>https://www.researchgate.net/figure/Incidence-of-liver-and-intrahepatic-bile-duct-cancer-in-South-Texas-by-age-group-and_fig8_300288976</p>
## Dataset's Demographic Information
<p>TCGA contains 45 CHOL samples (36 primary cancer samples, 9 normal tissue samples) from 36 people. Below is a summary of the demographic information represented in this dataset. If you are interested in viewing the complete study, as well as the files on the GDC Data Portal, you can follow <a href="https://portal.gdc.cancer.gov/repository?facetTab=cases&filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.project.project_id%22%2C%22value%22%3A%5B%22TCGA-CHOL%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.analysis.workflow_type%22%2C%22value%22%3A%5B%22HTSeq%20-%20Counts%22%5D%7D%7D%2C%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22files.experimental_strategy%22%2C%22value%22%3A%5B%22RNA-Seq%22%5D%7D%7D%5D%7D&searchTableTab=cases" target="_blank">this link (these data were gathered on July 10th, 2018).</a></p>

# Login to GenePattern
<div class="alert alert-info">
<h3 style="margin-top: 0;"> Instructions <i class="fa fa-info-circle"></i></h3>
<ol>
<li>Login to the *GenePattern Cloud* server.</li>
</ol>
</div>
```
# Requires GenePattern Notebook: pip install genepattern-notebook
import gp
import genepattern
# Username and password removed for security reasons.
genepattern.display(genepattern.session.register("https://cloud.genepattern.org/gp", "", ""))
```
# Downloading RNA-Seq HTSeq Counts Using TCGAImporter
Use the TCGAImporter module to download RNA-Seq HTSeq counts from the GDC Data Portal using a Manifest file and a Metadata file
<p><strong>Input files</strong></p>
<ul>
<li><em>Manifest file:</em> a file containing the list of RNA-Seq samples to be downloaded.</li>
<li><em>Metadata file:</em> a file containing information about the files present at the GDC Data Portal. Instructions for downloading the Manifest and Metadata files can be found here: <a href="https://github.com/genepattern/TCGAImporter/blob/master/how_to_download_a_manifest_and_metadata.pdf" target="_blank">https://github.com/genepattern/TCGAImporter/blob/master/how_to_download_a_manifest_and_metadata.pdf</a></li>
</ul>
<p><strong>Output files</strong></p>
<ul>
<li><em>CHOL_TCGA.gct</em> - This is a tab delimited file that contains the gene expression (HTSeq counts) from the samples listed on the Manifest file. For more info on GCT files, look at reference <a href="#References">1</a><em> </em></li>
<li><em><em>CHOL_TCGA.cls</em> -</em> The CLS file defines phenotype labels (in this case Primary Tumor and Normal Sample) and associates each sample in the GCT file with a label. For more info on CLS files, look at reference <a href="#References">2</a></li>
</ul>
<div class="alert alert-info">
<h3 style="margin-top: 0;"> Instructions <i class="fa fa-info-circle"></i></h3>
<ol>
<li>Load the manifest file in **Manifest** parameter.</li>
<li>Load the metadata file in **Metadata** parameter.</li>
<li>Click **run**.</li>
</ol>
</div>
<p><strong>Estimated run time for TCGAImporter</strong> : ~ 2 minutes</p>
```
tcgaimporter_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00369')
tcgaimporter_job_spec = tcgaimporter_task.make_job_spec()
tcgaimporter_job_spec.set_parameter("manifest", "https://cloud.genepattern.org/gp/users/aleramos123/tmp/run5876584072708422695.tmp/CHOL_MANIFEST.txt")
tcgaimporter_job_spec.set_parameter("metadata", "https://cloud.genepattern.org/gp/users/aleramos123/tmp/run3955148500036628036.tmp/CHOL_METADATA.json")
tcgaimporter_job_spec.set_parameter("output_file_name", "CHOL_TCGA")
tcgaimporter_job_spec.set_parameter("gct", "True")
tcgaimporter_job_spec.set_parameter("translate_gene_id", "False")
tcgaimporter_job_spec.set_parameter("cls", "True")
genepattern.display(tcgaimporter_task)
job35200 = gp.GPJob(genepattern.session.get(0), 35200)
genepattern.display(job35200)
```
# Downloads
<p>You can download the input and output files of TCGAImporter for this cancer type here:</p>
<p><strong>Inputs:</strong></p>
<ul>
<li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/BLCA/BLCA_MANIFEST.txt" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/CHOL/CHOL_MANIFEST.txt</a></li>
<li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_METADATA.json" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/CHOL/CHOL_METADATA.json</a></li>
</ul>
<p><strong>Outputs:</strong></p>
<ul>
<li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_TCGA.gct" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/CHOL/CHOL_TCGA.gct</a></li>
<li><a href="https://datasets.genepattern.org/data/TCGA_HTSeq_counts/KIRP/KIRP_TCGA.cls" target="_blank">https://datasets.genepattern.org/data/TCGA_HTSeq_counts/CHOL/CHOL_TCGA.cls</a></li>
</ul>
If you'd like to download similar files for other TCGA datasets, visit this link:
- https://datasets.genepattern.org/?prefix=data/TCGA_HTSeq_counts/
# References
[1] http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#GCT
[2] http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide#CLS
<p>[3] <a href="https://stackoverflow.com/questions/4384210/what-is-the-purpose-of-cls-files-and-dll-files-in-vb" target="_blank">https://stackoverflow.com/questions/4384210/what-is-the-purpose-of-cls-files-and-dll-files-in-vb</a></p>
<p>[4] <a href="https://www.medicinenet.com/bile_duct_cancer_cholangiocarcinoma/article.htm" target="_blank">https://www.medicinenet.com/bile_duct_cancer_cholangiocarcinoma/article.htm</a></p>
|
github_jupyter
|
# Requires GenePattern Notebook: pip install genepattern-notebook
import gp
import genepattern
# Username and password removed for security reasons.
genepattern.display(genepattern.session.register("https://cloud.genepattern.org/gp", "", ""))
tcgaimporter_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00369')
tcgaimporter_job_spec = tcgaimporter_task.make_job_spec()
tcgaimporter_job_spec.set_parameter("manifest", "https://cloud.genepattern.org/gp/users/aleramos123/tmp/run5876584072708422695.tmp/CHOL_MANIFEST.txt")
tcgaimporter_job_spec.set_parameter("metadata", "https://cloud.genepattern.org/gp/users/aleramos123/tmp/run3955148500036628036.tmp/CHOL_METADATA.json")
tcgaimporter_job_spec.set_parameter("output_file_name", "CHOL_TCGA")
tcgaimporter_job_spec.set_parameter("gct", "True")
tcgaimporter_job_spec.set_parameter("translate_gene_id", "False")
tcgaimporter_job_spec.set_parameter("cls", "True")
genepattern.display(tcgaimporter_task)
job35200 = gp.GPJob(genepattern.session.get(0), 35200)
genepattern.display(job35200)
| 0.379608 | 0.950778 |
```
%matplotlib inline
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from PIL import Image
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
# since I added the weights file of the pretrained Resnet50 model to this kernel, there're two folders in the ../input
# ../input/plant-seedlings-classification and ../input/ResNet-50
print(os.listdir("../input"), os.listdir("../input/plant-seedlings-classification/train"))
# Any results you write to the current directory are saved as output.
# how many files in each category
training_base_dir = "../input/plant-seedlings-classification/train"
file_nums = []
category_files = {}
for directory in os.listdir(training_base_dir):
category_files[directory] = []
cate_dir = os.path.join(training_base_dir, directory)
file_num = 0
for file_name in os.listdir(cate_dir):
full_file_name = os.path.join(cate_dir, file_name)
category_files[directory].append(full_file_name)
file_num += 1
print(cate_dir, file_num)
file_nums.append(file_num)
# create folders for training and validation data
base_dir = "./"
categories = os.listdir("../input/plant-seedlings-classification/train")
# I initially wanted to create 4 training datasets and 4 validation datasets and use theses datasets to train different models.
# Since there's only 5 GB disk I can use, only one training dataset and one validation dataset are created.
datasets_num = 1
for idx in range(1, datasets_num + 1):
train_val_str = [data_type + str(idx) for data_type in ["train", "val"]]
for data_type in train_val_str:
tmp_path0 = os.path.join(base_dir, data_type)
try:
os.mkdir(tmp_path0)
except (FileExistsError, FileNotFoundError):
print("raise an error when creating {}".format(tmp_path))
continue
for category in categories:
tmp_path1 = os.path.join(tmp_path0, category)
try:
os.mkdir(tmp_path1)
except (FileExistsError, FileNotFoundError):
print("raise an error when creating {}".format(tmp_path))
continue
# sample files and copy these files to training and validation dataset folders
from shutil import copy
from random import sample, seed
from pdb import set_trace
seed()
for i in range(1, datasets_num + 1):
for _, category in enumerate(category_files.keys()):
l = len(category_files[category])
train_data_num = int(l * 0.9)
valid_data_num = l - train_data_num
files2copy = sample(category_files[category], l)
train_dest = os.path.join(base_dir, "train{}".format(i), category)
valid_dest = os.path.join(base_dir, "val{}".format(i), category)
for j in range(train_data_num):
copy(files2copy[j], train_dest)
for j in range(train_data_num, l):
copy(files2copy[j], valid_dest)
# create a folder to store the weights of pretrained models
from os.path import join, exists
from os import makedirs
cache_dir = join('/tmp', '.torch')
if not exists(cache_dir):
print("creating {}".format(cache_dir))
makedirs(cache_dir)
models_dir = join(cache_dir, 'models')
if not exists(models_dir):
print("creating {}".format(models_dir))
makedirs(models_dir)
# !cp ../input/resnet101/resnet101.pth /tmp/.torch/models/resnet101-5d3b4d8f.pth
# we need to find the weights of the pretrained model in https://www.kaggle.com/pytorch and add the weiths to our kernel.
# since pytorch finds the weights in /tmp/.torch/models, we need to copy the weights file to the folder and rename it.
!cp ../input/resnet50/resnet50.pth /tmp/.torch/models/resnet50-19c8e357.pth
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision as tv
from torchvision import transforms
# define transforms
img_size = (224, 224)
train_transforms = transforms.Compose([transforms.Resize(img_size), transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(img_size),
transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# define data loader
batch_size = 64
train_dataset = tv.datasets.ImageFolder("train1", train_transforms)
valid_dataset = tv.datasets.ImageFolder("val1", valid_transforms)
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
valid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# define model
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
myModel = tv.models.resnet50(pretrained=True)
set_parameter_requires_grad(myModel, True)
num_ftrs = myModel.fc.in_features
num_classes = len(category_files.keys())
# three layers are added to the top of the pretrained model
myModel.fc = nn.Sequential(*[nn.Linear(num_ftrs, 1024), nn.Dropout(0.25), nn.Linear(1024, num_classes)])
# define optimizer and loss
optimizer = optim.SGD(myModel.parameters(), lr=0.001, momentum=0.9)
criterion = nn.CrossEntropyLoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
myModel = myModel.to(device)
epochs = 200
best_val_loss = 100
for i in range(epochs):
train_loss = 0
train_corrects = 0
myModel.train()
for inputs, labels in train_data_loader:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
predicts = myModel(inputs)
loss = criterion(predicts, labels)
loss.backward()
optimizer.step()
_, preds = torch.max(predicts, 1)
train_loss += loss.item() * inputs.size(0)
train_corrects += torch.sum(preds == labels.data)
val_loss = 0
val_corrects = 0
myModel.eval()
for inputs, labels in valid_data_loader:
inputs = inputs.to(device)
labels = labels.to(device)
predicts = myModel(inputs)
loss = criterion(predicts, labels)
_, preds = torch.max(predicts, 1)
val_loss += loss.item() * inputs.size(0)
val_corrects += torch.sum(preds == labels.data)
print("epoch: {}, train loss: {}, train accu: {}, val loss: {}, val accu: {}".format(i,
train_loss / len(train_data_loader.dataset), train_corrects.double() / len(train_data_loader.dataset),
val_loss / len(valid_data_loader.dataset), val_corrects.double() / len(valid_data_loader.dataset)))
if val_loss / len(valid_data_loader.dataset) < best_val_loss:
torch.save(myModel.state_dict(), "myModel.pkl")
best_val_loss = val_loss / len(valid_data_loader.dataset)
```
|
github_jupyter
|
%matplotlib inline
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from PIL import Image
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
# since I added the weights file of the pretrained Resnet50 model to this kernel, there're two folders in the ../input
# ../input/plant-seedlings-classification and ../input/ResNet-50
print(os.listdir("../input"), os.listdir("../input/plant-seedlings-classification/train"))
# Any results you write to the current directory are saved as output.
# how many files in each category
training_base_dir = "../input/plant-seedlings-classification/train"
file_nums = []
category_files = {}
for directory in os.listdir(training_base_dir):
category_files[directory] = []
cate_dir = os.path.join(training_base_dir, directory)
file_num = 0
for file_name in os.listdir(cate_dir):
full_file_name = os.path.join(cate_dir, file_name)
category_files[directory].append(full_file_name)
file_num += 1
print(cate_dir, file_num)
file_nums.append(file_num)
# create folders for training and validation data
base_dir = "./"
categories = os.listdir("../input/plant-seedlings-classification/train")
# I initially wanted to create 4 training datasets and 4 validation datasets and use theses datasets to train different models.
# Since there's only 5 GB disk I can use, only one training dataset and one validation dataset are created.
datasets_num = 1
for idx in range(1, datasets_num + 1):
train_val_str = [data_type + str(idx) for data_type in ["train", "val"]]
for data_type in train_val_str:
tmp_path0 = os.path.join(base_dir, data_type)
try:
os.mkdir(tmp_path0)
except (FileExistsError, FileNotFoundError):
print("raise an error when creating {}".format(tmp_path))
continue
for category in categories:
tmp_path1 = os.path.join(tmp_path0, category)
try:
os.mkdir(tmp_path1)
except (FileExistsError, FileNotFoundError):
print("raise an error when creating {}".format(tmp_path))
continue
# sample files and copy these files to training and validation dataset folders
from shutil import copy
from random import sample, seed
from pdb import set_trace
seed()
for i in range(1, datasets_num + 1):
for _, category in enumerate(category_files.keys()):
l = len(category_files[category])
train_data_num = int(l * 0.9)
valid_data_num = l - train_data_num
files2copy = sample(category_files[category], l)
train_dest = os.path.join(base_dir, "train{}".format(i), category)
valid_dest = os.path.join(base_dir, "val{}".format(i), category)
for j in range(train_data_num):
copy(files2copy[j], train_dest)
for j in range(train_data_num, l):
copy(files2copy[j], valid_dest)
# create a folder to store the weights of pretrained models
from os.path import join, exists
from os import makedirs
cache_dir = join('/tmp', '.torch')
if not exists(cache_dir):
print("creating {}".format(cache_dir))
makedirs(cache_dir)
models_dir = join(cache_dir, 'models')
if not exists(models_dir):
print("creating {}".format(models_dir))
makedirs(models_dir)
# !cp ../input/resnet101/resnet101.pth /tmp/.torch/models/resnet101-5d3b4d8f.pth
# we need to find the weights of the pretrained model in https://www.kaggle.com/pytorch and add the weiths to our kernel.
# since pytorch finds the weights in /tmp/.torch/models, we need to copy the weights file to the folder and rename it.
!cp ../input/resnet50/resnet50.pth /tmp/.torch/models/resnet50-19c8e357.pth
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision as tv
from torchvision import transforms
# define transforms
img_size = (224, 224)
train_transforms = transforms.Compose([transforms.Resize(img_size), transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(img_size),
transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# define data loader
batch_size = 64
train_dataset = tv.datasets.ImageFolder("train1", train_transforms)
valid_dataset = tv.datasets.ImageFolder("val1", valid_transforms)
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
valid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# define model
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
myModel = tv.models.resnet50(pretrained=True)
set_parameter_requires_grad(myModel, True)
num_ftrs = myModel.fc.in_features
num_classes = len(category_files.keys())
# three layers are added to the top of the pretrained model
myModel.fc = nn.Sequential(*[nn.Linear(num_ftrs, 1024), nn.Dropout(0.25), nn.Linear(1024, num_classes)])
# define optimizer and loss
optimizer = optim.SGD(myModel.parameters(), lr=0.001, momentum=0.9)
criterion = nn.CrossEntropyLoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
myModel = myModel.to(device)
epochs = 200
best_val_loss = 100
for i in range(epochs):
train_loss = 0
train_corrects = 0
myModel.train()
for inputs, labels in train_data_loader:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
predicts = myModel(inputs)
loss = criterion(predicts, labels)
loss.backward()
optimizer.step()
_, preds = torch.max(predicts, 1)
train_loss += loss.item() * inputs.size(0)
train_corrects += torch.sum(preds == labels.data)
val_loss = 0
val_corrects = 0
myModel.eval()
for inputs, labels in valid_data_loader:
inputs = inputs.to(device)
labels = labels.to(device)
predicts = myModel(inputs)
loss = criterion(predicts, labels)
_, preds = torch.max(predicts, 1)
val_loss += loss.item() * inputs.size(0)
val_corrects += torch.sum(preds == labels.data)
print("epoch: {}, train loss: {}, train accu: {}, val loss: {}, val accu: {}".format(i,
train_loss / len(train_data_loader.dataset), train_corrects.double() / len(train_data_loader.dataset),
val_loss / len(valid_data_loader.dataset), val_corrects.double() / len(valid_data_loader.dataset)))
if val_loss / len(valid_data_loader.dataset) < best_val_loss:
torch.save(myModel.state_dict(), "myModel.pkl")
best_val_loss = val_loss / len(valid_data_loader.dataset)
| 0.570571 | 0.387777 |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
This notebook demonstrates how to run batch scoring job. __[Inception-V3 model](https://arxiv.org/abs/1512.00567)__ and unlabeled images from __[ImageNet](http://image-net.org/)__ dataset will be used. It registers a pretrained inception model in model registry then uses the model to do batch scoring on images in a blob container.
## Prerequisites
Make sure you go through the [00. Installation and Configuration](./00.configuration.ipynb) Notebook first if you haven't.
```
import os
from azureml.core import Workspace, Run, Experiment
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
# Also create a Project and attach to Workspace
scripts_folder = "scripts"
if not os.path.isdir(scripts_folder):
os.mkdir(scripts_folder)
from azureml.core.compute import BatchAiCompute, ComputeTarget
from azureml.core.datastore import Datastore
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import CondaDependencies, RunConfiguration
```
## Create and attach Compute targets
Use the below code to create and attach Compute targets.
```
# Batch AI compute
cluster_name = "gpu-cluster"
try:
cluster = BatchAiCompute(ws, cluster_name)
print("found existing cluster.")
except:
print("creating new cluster")
provisioning_config = BatchAiCompute.provisioning_configuration(vm_size = "STANDARD_NC6",
autoscale_enabled = True,
cluster_min_nodes = 0,
cluster_max_nodes = 1)
# create the cluster
cluster = ComputeTarget.create(ws, cluster_name, provisioning_config)
cluster.wait_for_completion(show_output=True)
```
# Python scripts to run
Python scripts that run the batch scoring. `batchai_score.py` takes input images in `dataset_path`, pretrained models in `model_dir` and outputs a `results-label.txt` to `output_dir`.
```
%%writefile $scripts_folder/batchai_score.py
import os
import argparse
import datetime,time
import tensorflow as tf
from math import ceil
import numpy as np
import shutil
from tensorflow.contrib.slim.python.slim.nets import inception_v3
from azureml.core.model import Model
slim = tf.contrib.slim
parser = argparse.ArgumentParser(description="Start a tensorflow model serving")
parser.add_argument('--model_name', dest="model_name", required=True)
parser.add_argument('--label_dir', dest="label_dir", required=True)
parser.add_argument('--dataset_path', dest="dataset_path", required=True)
parser.add_argument('--output_dir', dest="output_dir", required=True)
parser.add_argument('--batch_size', dest="batch_size", type=int, required=True)
args = parser.parse_args()
image_size = 299
num_channel = 3
# create output directory if it does not exist
os.makedirs(args.output_dir, exist_ok=True)
def get_class_label_dict(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
class DataIterator:
def __init__(self, data_dir):
self.file_paths = []
image_list = os.listdir(data_dir)
total_size = len(image_list)
self.file_paths = [data_dir + '/' + file_name.rstrip() for file_name in image_list ]
self.labels = [1 for file_name in self.file_paths]
@property
def size(self):
return len(self.labels)
def input_pipeline(self, batch_size):
images_tensor = tf.convert_to_tensor(self.file_paths, dtype=tf.string)
labels_tensor = tf.convert_to_tensor(self.labels, dtype=tf.int64)
input_queue = tf.train.slice_input_producer([images_tensor, labels_tensor], shuffle=False)
labels = input_queue[1]
images_content = tf.read_file(input_queue[0])
image_reader = tf.image.decode_jpeg(images_content, channels=num_channel, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
new_size = tf.constant([image_size, image_size], dtype=tf.int32)
images = tf.image.resize_images(float_caster, new_size)
images = tf.divide(tf.subtract(images, [0]), [255])
image_batch, label_batch = tf.train.batch([images, labels], batch_size=batch_size, capacity=5 * batch_size)
return image_batch
def main(_):
start_time = datetime.datetime.now()
label_file_name = os.path.join(args.label_dir, "labels.txt")
label_dict = get_class_label_dict(label_file_name)
classes_num = len(label_dict)
test_feeder = DataIterator(data_dir=args.dataset_path)
total_size = len(test_feeder.labels)
count = 0
# get model from model registry
model_path = Model.get_model_path(args.model_name)
with tf.Session() as sess:
test_images = test_feeder.input_pipeline(batch_size=args.batch_size)
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
input_images = tf.placeholder(tf.float32, [args.batch_size, image_size, image_size, num_channel])
logits, _ = inception_v3.inception_v3(input_images,
num_classes=classes_num,
is_training=False)
probabilities = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver()
saver.restore(sess, model_path)
out_filename = os.path.join(args.output_dir, "result-labels.txt")
with open(out_filename, "w") as result_file:
i = 0
while count < total_size and not coord.should_stop():
test_images_batch = sess.run(test_images)
file_names_batch = test_feeder.file_paths[i*args.batch_size: min(test_feeder.size, (i+1)*args.batch_size)]
results = sess.run(probabilities, feed_dict={input_images: test_images_batch})
new_add = min(args.batch_size, total_size-count)
count += new_add
i += 1
for j in range(new_add):
result_file.write(os.path.basename(file_names_batch[j]) + ": " + label_dict[results[j]] + "\n")
result_file.flush()
coord.request_stop()
coord.join(threads)
# copy the file to artifacts
shutil.copy(out_filename, "./outputs/")
# Move the processed data out of the blob so that the next run can process the data.
if __name__ == "__main__":
tf.app.run()
```
## Prepare Model and Input data
### Download Model
Download and extract model from http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz to `"models"`
```
# create directory for model
model_dir = 'models'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
import tarfile
import urllib.request
url="http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
response = urllib.request.urlretrieve(url, "model.tar.gz")
tar = tarfile.open("model.tar.gz", "r:gz")
tar.extractall(model_dir)
```
### Create a datastore that points to blob container containing sample images
We have created a public blob container `sampledata` on an account named `pipelinedata` containing images from ImageNet evaluation set. In the next step, we create a datastore with name `images_datastore` that points to this container. The `overwrite=True` step overwrites any datastore that was created previously with that name.
This step can be changed to point to your blob container by providing an additional `account_key` parameter with `account_name`.
```
account_name = "pipelinedata"
sample_data = Datastore.register_azure_blob_container(ws, datastore_name="images_datastore", container_name="sampledata",
account_name=account_name,
overwrite=True)
```
# Output datastore
We write the outputs to the default datastore
```
default_ds = ws.get_default_datastore()
```
# Specify where the data is stored or will be written to
```
from azureml.core.conda_dependencies import CondaDependencies
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.core import Datastore
from azureml.core import Experiment
input_images = DataReference(datastore=sample_data,
data_reference_name="input_images",
path_on_datastore="batchscoring/images",
mode="download"
)
model_dir = DataReference(datastore=sample_data,
data_reference_name="input_model",
path_on_datastore="batchscoring/models",
mode="download"
)
label_dir = DataReference(datastore=sample_data,
data_reference_name="input_labels",
path_on_datastore="batchscoring/labels",
mode="download"
)
output_dir = PipelineData(name="scores",
datastore_name=default_ds.name,
output_path_on_compute="batchscoring/results")
```
## Register the model with Workspace
```
import shutil
from azureml.core.model import Model
# register downloaded model
model = Model.register(model_path = "models/inception_v3.ckpt",
model_name = "inception", # this is the name the model is registered as
tags = {'pretrained': "inception"},
description = "Imagenet trained tensorflow inception",
workspace = ws)
# remove the downloaded dir after registration if you wish
shutil.rmtree("models")
```
# Specify environment to run the script
```
cd = CondaDependencies()
cd.add_pip_package("tensorflow-gpu")
# Runconfig
batchai_run_config = RunConfiguration(conda_dependencies=cd)
batchai_run_config.environment.docker.enabled = True
batchai_run_config.environment.docker.gpu_support = True
batchai_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_GPU_IMAGE
batchai_run_config.environment.spark.precache_packages = False
```
# Steps to run
A subset of the parameters to the python script can be given as input when we re-run a `PublishedPipeline`. In the current example, we define `batch_size` taken by the script as such parameter.
```
from azureml.pipeline.core.graph import PipelineParameter
batch_size_param = PipelineParameter(name="param_batch_size", default_value=20)
inception_model_name = "inception_v3.ckpt"
batch_score_step = PythonScriptStep(
name="batch ai scoring",
script_name="batchai_score.py",
arguments=["--dataset_path", input_images,
"--model_name", "inception",
"--label_dir", label_dir,
"--output_dir", output_dir,
"--batch_size", batch_size_param],
target=cluster,
inputs=[input_images, label_dir],
outputs=[output_dir],
runconfig=batchai_run_config,
source_directory=scripts_folder
)
pipeline = Pipeline(workspace=ws, steps=[batch_score_step])
pipeline_run = Experiment(ws, 'batch_scoring').submit(pipeline, pipeline_params={"param_batch_size": 20})
```
# Monitor run
```
from azureml.train.widgets import RunDetails
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=True)
```
# Download and review output
```
step_run = list(pipeline_run.get_children())[0]
step_run.download_file("./outputs/result-labels.txt")
import pandas as pd
df = pd.read_csv("result-labels.txt", delimiter=":", header=None)
df.columns = ["Filename", "Prediction"]
df.head()
```
# Publish a pipeline and rerun using a REST call
## Create a published pipeline
```
published_pipeline = pipeline_run.publish_pipeline(
name="Inception v3 scoring", description="Batch scoring using Inception v3 model", version="1.0")
published_id = published_pipeline.id
```
## Rerun using REST call
## Get AAD token
```
from azureml.core.authentication import AzureCliAuthentication
import requests
cli_auth = AzureCliAuthentication()
aad_token = cli_auth.get_authentication_header()
```
## Run published pipeline using its REST endpoint
```
from azureml.pipeline.core import PublishedPipeline
rest_endpoint = PublishedPipeline.get_endpoint(published_id, ws)
# specify batch size when running the pipeline
response = requests.post(rest_endpoint, headers=aad_token, json={"param_batch_size": 50})
run_id = response.json()["Id"]
```
## Monitor the new run
```
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run = PipelineRun(ws.experiments()["batch_scoring"], run_id)
RunDetails(published_pipeline_run).show()
```
|
github_jupyter
|
import os
from azureml.core import Workspace, Run, Experiment
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
# Also create a Project and attach to Workspace
scripts_folder = "scripts"
if not os.path.isdir(scripts_folder):
os.mkdir(scripts_folder)
from azureml.core.compute import BatchAiCompute, ComputeTarget
from azureml.core.datastore import Datastore
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import CondaDependencies, RunConfiguration
# Batch AI compute
cluster_name = "gpu-cluster"
try:
cluster = BatchAiCompute(ws, cluster_name)
print("found existing cluster.")
except:
print("creating new cluster")
provisioning_config = BatchAiCompute.provisioning_configuration(vm_size = "STANDARD_NC6",
autoscale_enabled = True,
cluster_min_nodes = 0,
cluster_max_nodes = 1)
# create the cluster
cluster = ComputeTarget.create(ws, cluster_name, provisioning_config)
cluster.wait_for_completion(show_output=True)
%%writefile $scripts_folder/batchai_score.py
import os
import argparse
import datetime,time
import tensorflow as tf
from math import ceil
import numpy as np
import shutil
from tensorflow.contrib.slim.python.slim.nets import inception_v3
from azureml.core.model import Model
slim = tf.contrib.slim
parser = argparse.ArgumentParser(description="Start a tensorflow model serving")
parser.add_argument('--model_name', dest="model_name", required=True)
parser.add_argument('--label_dir', dest="label_dir", required=True)
parser.add_argument('--dataset_path', dest="dataset_path", required=True)
parser.add_argument('--output_dir', dest="output_dir", required=True)
parser.add_argument('--batch_size', dest="batch_size", type=int, required=True)
args = parser.parse_args()
image_size = 299
num_channel = 3
# create output directory if it does not exist
os.makedirs(args.output_dir, exist_ok=True)
def get_class_label_dict(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
class DataIterator:
def __init__(self, data_dir):
self.file_paths = []
image_list = os.listdir(data_dir)
total_size = len(image_list)
self.file_paths = [data_dir + '/' + file_name.rstrip() for file_name in image_list ]
self.labels = [1 for file_name in self.file_paths]
@property
def size(self):
return len(self.labels)
def input_pipeline(self, batch_size):
images_tensor = tf.convert_to_tensor(self.file_paths, dtype=tf.string)
labels_tensor = tf.convert_to_tensor(self.labels, dtype=tf.int64)
input_queue = tf.train.slice_input_producer([images_tensor, labels_tensor], shuffle=False)
labels = input_queue[1]
images_content = tf.read_file(input_queue[0])
image_reader = tf.image.decode_jpeg(images_content, channels=num_channel, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
new_size = tf.constant([image_size, image_size], dtype=tf.int32)
images = tf.image.resize_images(float_caster, new_size)
images = tf.divide(tf.subtract(images, [0]), [255])
image_batch, label_batch = tf.train.batch([images, labels], batch_size=batch_size, capacity=5 * batch_size)
return image_batch
def main(_):
start_time = datetime.datetime.now()
label_file_name = os.path.join(args.label_dir, "labels.txt")
label_dict = get_class_label_dict(label_file_name)
classes_num = len(label_dict)
test_feeder = DataIterator(data_dir=args.dataset_path)
total_size = len(test_feeder.labels)
count = 0
# get model from model registry
model_path = Model.get_model_path(args.model_name)
with tf.Session() as sess:
test_images = test_feeder.input_pipeline(batch_size=args.batch_size)
with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
input_images = tf.placeholder(tf.float32, [args.batch_size, image_size, image_size, num_channel])
logits, _ = inception_v3.inception_v3(input_images,
num_classes=classes_num,
is_training=False)
probabilities = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver()
saver.restore(sess, model_path)
out_filename = os.path.join(args.output_dir, "result-labels.txt")
with open(out_filename, "w") as result_file:
i = 0
while count < total_size and not coord.should_stop():
test_images_batch = sess.run(test_images)
file_names_batch = test_feeder.file_paths[i*args.batch_size: min(test_feeder.size, (i+1)*args.batch_size)]
results = sess.run(probabilities, feed_dict={input_images: test_images_batch})
new_add = min(args.batch_size, total_size-count)
count += new_add
i += 1
for j in range(new_add):
result_file.write(os.path.basename(file_names_batch[j]) + ": " + label_dict[results[j]] + "\n")
result_file.flush()
coord.request_stop()
coord.join(threads)
# copy the file to artifacts
shutil.copy(out_filename, "./outputs/")
# Move the processed data out of the blob so that the next run can process the data.
if __name__ == "__main__":
tf.app.run()
# create directory for model
model_dir = 'models'
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
import tarfile
import urllib.request
url="http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
response = urllib.request.urlretrieve(url, "model.tar.gz")
tar = tarfile.open("model.tar.gz", "r:gz")
tar.extractall(model_dir)
account_name = "pipelinedata"
sample_data = Datastore.register_azure_blob_container(ws, datastore_name="images_datastore", container_name="sampledata",
account_name=account_name,
overwrite=True)
default_ds = ws.get_default_datastore()
from azureml.core.conda_dependencies import CondaDependencies
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.core import Datastore
from azureml.core import Experiment
input_images = DataReference(datastore=sample_data,
data_reference_name="input_images",
path_on_datastore="batchscoring/images",
mode="download"
)
model_dir = DataReference(datastore=sample_data,
data_reference_name="input_model",
path_on_datastore="batchscoring/models",
mode="download"
)
label_dir = DataReference(datastore=sample_data,
data_reference_name="input_labels",
path_on_datastore="batchscoring/labels",
mode="download"
)
output_dir = PipelineData(name="scores",
datastore_name=default_ds.name,
output_path_on_compute="batchscoring/results")
import shutil
from azureml.core.model import Model
# register downloaded model
model = Model.register(model_path = "models/inception_v3.ckpt",
model_name = "inception", # this is the name the model is registered as
tags = {'pretrained': "inception"},
description = "Imagenet trained tensorflow inception",
workspace = ws)
# remove the downloaded dir after registration if you wish
shutil.rmtree("models")
cd = CondaDependencies()
cd.add_pip_package("tensorflow-gpu")
# Runconfig
batchai_run_config = RunConfiguration(conda_dependencies=cd)
batchai_run_config.environment.docker.enabled = True
batchai_run_config.environment.docker.gpu_support = True
batchai_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_GPU_IMAGE
batchai_run_config.environment.spark.precache_packages = False
from azureml.pipeline.core.graph import PipelineParameter
batch_size_param = PipelineParameter(name="param_batch_size", default_value=20)
inception_model_name = "inception_v3.ckpt"
batch_score_step = PythonScriptStep(
name="batch ai scoring",
script_name="batchai_score.py",
arguments=["--dataset_path", input_images,
"--model_name", "inception",
"--label_dir", label_dir,
"--output_dir", output_dir,
"--batch_size", batch_size_param],
target=cluster,
inputs=[input_images, label_dir],
outputs=[output_dir],
runconfig=batchai_run_config,
source_directory=scripts_folder
)
pipeline = Pipeline(workspace=ws, steps=[batch_score_step])
pipeline_run = Experiment(ws, 'batch_scoring').submit(pipeline, pipeline_params={"param_batch_size": 20})
from azureml.train.widgets import RunDetails
RunDetails(pipeline_run).show()
pipeline_run.wait_for_completion(show_output=True)
step_run = list(pipeline_run.get_children())[0]
step_run.download_file("./outputs/result-labels.txt")
import pandas as pd
df = pd.read_csv("result-labels.txt", delimiter=":", header=None)
df.columns = ["Filename", "Prediction"]
df.head()
published_pipeline = pipeline_run.publish_pipeline(
name="Inception v3 scoring", description="Batch scoring using Inception v3 model", version="1.0")
published_id = published_pipeline.id
from azureml.core.authentication import AzureCliAuthentication
import requests
cli_auth = AzureCliAuthentication()
aad_token = cli_auth.get_authentication_header()
from azureml.pipeline.core import PublishedPipeline
rest_endpoint = PublishedPipeline.get_endpoint(published_id, ws)
# specify batch size when running the pipeline
response = requests.post(rest_endpoint, headers=aad_token, json={"param_batch_size": 50})
run_id = response.json()["Id"]
from azureml.pipeline.core.run import PipelineRun
published_pipeline_run = PipelineRun(ws.experiments()["batch_scoring"], run_id)
RunDetails(published_pipeline_run).show()
| 0.512449 | 0.846197 |
# Convolutional Neural Networks
In this notebook we will implement a convolutional neural network. Rather than doing everything from scratch we will make use of [TensorFlow 2](https://www.tensorflow.org/) and the [Keras](https://keras.io) high level interface.
## Installing TensorFlow and Keras
TensorFlow and Keras are not included with the base Anaconda install, but can be easily installed by running the following commands on the Anaconda Command Prompt/terminal window:
```
conda install notebook jupyterlab nb_conda_kernels
conda create -n tf tensorflow ipykernel mkl
```
Once this has been done, you should be able to select the `Python [conda env:tf]` kernel from the Kernel->Change Kernel menu item at the top of this notebook. Then, we import TensorFlow package:
```
import tensorflow as tf
```
## Creating a simple network with TensorFlow
We will start by creating a very simple fully connected feedforward network using TensorFlow/Keras. The network will mimic the one we implemented previously, but TensorFlow/Keras will take care of most of the details for us.
### MNIST Dataset
First, let us load the MNIST digits dataset that we will be using to train our network. This is available directly within Keras:
```
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
```
The data comes as a set of integers in the range [0,255] representing the shade of gray of a given pixel. Let's first rescale them to be in the range [0,1]:
```
x_train, x_test = x_train / 255.0, x_test / 255.0
```
Now we can build a neural network model using Keras. This uses a very simple high-level modular structure where we only have the specify the layers in our model and the properties of each layer. The layers we will have are as follows:
1. Input layer: This will be a 28x28 matrix of numbers.
2. `Flatten` layer: Convert our 28x28 pixel image into an array of size 784.
3. `Dense` layer: a fully-connected layer of the type we have been using up to now. We will use 30 neurons and the sigmoid activation function.
4. `Dense` layer: fully-connected output layer.
```
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(30, activation='sigmoid'),
tf.keras.layers.Dense(10, activation='softmax')
])
```
Next we compile this model, specifying the optimization algorithm (ADAM) and loss function (cross-entropy) to be used.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
We now train the model with our training data. We will run for 5 epochs.
```
model.fit(x_train, y_train, epochs=5)
```
Finally, we check the accuracy of our model against the test data
```
model.evaluate(x_test, y_test, verbose=False)
```
It has 95.5% accuracy, consistent with what was found during training.
#### Exercises
Experiment with this network:
1. Change the number of neurons in the hidden layer.
2. Add more hidden layers.
3. Change the activation function in the hidden layer to `relu` (for examples see the list of [Keras Layer Activation Functions](https://keras.io/api/layers/activations/)).
4. Change the activation in the output layer to something other than `softmax`.
5. Change the loss function (for examples see the list of [Keras Loss Functions](https://keras.io/api/losses/)).
How does the performance of your network change with these modifications?
#### Task
Implement the neural network in "[Gradient-based learning applied to document recognition](http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf)", by Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner. The [Keras Layer documentation](https://keras.io/api/layers/) includes information about the layers supported. In particular, [`Conv2D`](https://keras.io/api/layers/convolution_layers/convolution2d) and [`MaxPooling2D`](https://keras.io/api/layers/pooling_layers/max_pooling2d) layers may be useful.
|
github_jupyter
|
conda install notebook jupyterlab nb_conda_kernels
conda create -n tf tensorflow ipykernel mkl
import tensorflow as tf
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(30, activation='sigmoid'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=False)
| 0.886727 | 0.995354 |
# Функции в Pandas
```
import pandas as pd
stats = pd.read_excel('ad_campaigns.xlsx')
stats.head()
?stats.rename
stats.columns = ['group', 'phrase', 'effect', 'ad_id', 'title', 'text', 'link']
stats.head()
```
### Lambda-функции
Хотим посчитать распределение количества слов в столбце с фразами
```
stats['word_count'] = stats['phrase'].apply(lambda x: len(x.split(' ')))
stats.head()
# вариант с передачей всей строчки функции
# тут надо обязательно указать параметр axis = 1
stats['word_count'] = stats.apply(lambda x: len(x['phrase'].split(' ')), axis=1)
stats.head()
stats['word_count'].value_counts()
%matplotlib inline
stats['word_count'].hist()
stats['word_count'].hist(bins=30)
```
### Упражнение
Поисковые запросы с каким количеством слов встречаются в наших данных чаще всего? Выведите топ-5
### Произвольные функции
В URL кампаний есть названия. С этим надо что-то делать
```
# пример ссылки
url = stats.loc[0, 'link']
url
from urllib import parse
parsed = parse.urlsplit(url)
parsed
# можно конечно вручную
parsed.query.split('&')[2].split('=')[1]
# как доставать значения
parsed.netloc
params = parse.parse_qs(parsed.query)
params
# вот и кампании
params['utm_campaign'][0]
# зачем тут везде списки?
url_with_doubles = 'https://awesome-site.ru/?a=1&a=2&a=3'
parsed = parse.urlsplit(url_with_doubles)
parse.parse_qs(parsed.query)
# оборачиваем все в функцию
# в качестве аргумента будет строка датафрейма
def campaign_name(row):
"""Получение названия кампании из ссылки внутри строки row"""
parsed = parse.urlsplit(row['link'])
params_dict = parse.parse_qs(parsed.query)
return params_dict['utm_campaign'][0]
stats['campaign'] = stats.apply(campaign_name, axis=1)
stats.head()
# как передать несколько аргументов
def power_up(row, n):
"""Возводит значение столбца effect в степень n"""
return row['effect'] ** n
stats['power_up'] = stats.apply(power_up, n=3, axis=1)
stats.head()
```
### Упражнение
В наших данных есть много объявлений с услугой МРТ (в названии группы есть слово 'мрт') круглосуточно (в тексте объявления есть '24 часа'). Выделите строки, в которых не упоминается МРТ, но прием идет круглосуточно. Сколько таких строк в датасете?
# Группировки
```
# этот value_counts уже всех достал
stats['campaign'].value_counts().head()
# нужно что-то более универсальное
stats.groupby('campaign').count().head()
stats.groupby('campaign').count()[['group', 'effect']]
stats.index.values
stats.groupby('campaign').count().reset_index()
# тоже самое, что с value_counts
stats.groupby('campaign').count().reset_index().sort_values('group', ascending=False).head()
# но теперь можно менять функции ура
stats.groupby('campaign').sum().sort_values('effect').head()
```
### Упражнение
Для каждой кампании campaign найдите минимальное значение столбца effect и постройте рейтинг кампаний по этим значениям, начиная с самого низкого.
### Несколько функций в группировках
```
# задаем несколько функций сразу
stats.groupby('campaign').agg(['min', 'max'])['effect'].head()
# разные метрики для разных столбцов
stats.groupby('campaign').agg({'effect': ['min', 'max'], 'power_up': 'mean'}).head()
# группировка по нескольким столбцам
stats.groupby(['group', 'campaign']).count().head()
# максимальное число объявлений в одной группе
stats.groupby(['group', 'campaign']).count().sort_values('phrase', ascending=False).head()
```
### Упражнение
Какая кампания имеет наименьшую сумму по столбцу effect?
# Агрегированные группировки
Считаем метрики для пользователей, которые выставили 100 и более оценок
```
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
```
### Упражнение
Получите список пользователей film_fans_user_ids, которые выставили 100 и более оценок фильмам. Чтобы перевести pandas series в список можно использовать метод tolist():
film_fans_user_ids = some_dataframe['userId'].tolist()
```
ratings.groupby('userId').count().head()
ratings_filtered[.... > 100]
ratings_filtered['userId'].tolist()
ratings[ ratings['userId'].isin(...) ]
```
# Сводные таблицы
Прям как в экселе
```
data = pd.read_csv('ml-latest-small/ratings.csv')
data.head()
# какие оценки поставил пользователь №1
data[ data['userId'] == 1 ]['rating'].value_counts()
# а для всех пользователей сразу?
data.pivot_table(index = 'userId', columns = 'rating', values = 'timestamp', aggfunc = 'count', fill_value = 0)
# можно итоги добавить
data.pivot_table(index = 'userId', columns = 'rating', values = 'timestamp', aggfunc = 'count', fill_value = 0,
margins = True)
```
### Упражнение
Какой пользователь выставил больше всех пятерок?
# Объединение датафреймов
Какой жанр имеет самые высокие рейтинги?
```
genres = ['Drama', 'Action', 'Thriller']
import numpy as np
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
movies = pd.read_csv('ml-latest-small/movies.csv')
movies.head()
joined = ratings.merge(movies, on='movieId', how='left')
joined.head()
# рекомендуемая проверка на возможные дубликаты
len(ratings) == len(joined)
```
### Немного о подводных камнях
```
ratings = pd.read_csv('ratings_example.txt', sep = '\t')
ratings.head()
movies = pd.read_csv('movies_example.txt', sep = '\t')
movies.head()
# ¯\_(ツ)_/¯
ratings.merge(movies, how = 'left', on = 'movieId')
movies.drop_duplicates(subset = 'movieId', keep = 'first', inplace = True)
movies.head()
ratings.merge(movies, how = 'left', on = 'movieId')
```
### Считаем рейтинг жанров
```
# еще раз список жанров
genres = ['Drama', 'Action', 'Thriller']
def genres_ratings(row):
"""Возвращает рейтинг, если он есть в списке жанров данного фильма"""
return pd.Series([row['rating'] if genre in row['genres'] else np.NaN for genre in genres])
joined[genres] = joined.apply(genres_ratings, axis=1)
joined.head()
```
### Упражнение
Выведите средний рейтинг каждого жанра из списка genres
# Немного про визуализации
### Matplotlib
```
import matplotlib.pyplot as plt
data = pd.read_csv('sales_data.csv', sep=';')
data
plt.plot(data['month'], data['2016'])
plt.plot(data['2017'])
plt.plot(data['2018'])
plt.plot(data['month'], data['2016'], marker='o')
plt.plot(data['2017'], linestyle='dashed')
plt.plot(data['2016'], linewidth=5)
plt.legend()
```
### Seaborn
http://seaborn.pydata.org/examples/
```
features = pd.read_csv('correlation.tsv', sep='\t')
features
# месяц сейчас не пригодится
features.drop('month', axis=1, inplace=True)
features.head()
features.corr()
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(features.corr())
# добавляем наглядности
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(features.corr(), annot=True, fmt='.1f', ax=ax, cmap=sns.color_palette('coolwarm', 16))
```
|
github_jupyter
|
import pandas as pd
stats = pd.read_excel('ad_campaigns.xlsx')
stats.head()
?stats.rename
stats.columns = ['group', 'phrase', 'effect', 'ad_id', 'title', 'text', 'link']
stats.head()
stats['word_count'] = stats['phrase'].apply(lambda x: len(x.split(' ')))
stats.head()
# вариант с передачей всей строчки функции
# тут надо обязательно указать параметр axis = 1
stats['word_count'] = stats.apply(lambda x: len(x['phrase'].split(' ')), axis=1)
stats.head()
stats['word_count'].value_counts()
%matplotlib inline
stats['word_count'].hist()
stats['word_count'].hist(bins=30)
# пример ссылки
url = stats.loc[0, 'link']
url
from urllib import parse
parsed = parse.urlsplit(url)
parsed
# можно конечно вручную
parsed.query.split('&')[2].split('=')[1]
# как доставать значения
parsed.netloc
params = parse.parse_qs(parsed.query)
params
# вот и кампании
params['utm_campaign'][0]
# зачем тут везде списки?
url_with_doubles = 'https://awesome-site.ru/?a=1&a=2&a=3'
parsed = parse.urlsplit(url_with_doubles)
parse.parse_qs(parsed.query)
# оборачиваем все в функцию
# в качестве аргумента будет строка датафрейма
def campaign_name(row):
"""Получение названия кампании из ссылки внутри строки row"""
parsed = parse.urlsplit(row['link'])
params_dict = parse.parse_qs(parsed.query)
return params_dict['utm_campaign'][0]
stats['campaign'] = stats.apply(campaign_name, axis=1)
stats.head()
# как передать несколько аргументов
def power_up(row, n):
"""Возводит значение столбца effect в степень n"""
return row['effect'] ** n
stats['power_up'] = stats.apply(power_up, n=3, axis=1)
stats.head()
# этот value_counts уже всех достал
stats['campaign'].value_counts().head()
# нужно что-то более универсальное
stats.groupby('campaign').count().head()
stats.groupby('campaign').count()[['group', 'effect']]
stats.index.values
stats.groupby('campaign').count().reset_index()
# тоже самое, что с value_counts
stats.groupby('campaign').count().reset_index().sort_values('group', ascending=False).head()
# но теперь можно менять функции ура
stats.groupby('campaign').sum().sort_values('effect').head()
# задаем несколько функций сразу
stats.groupby('campaign').agg(['min', 'max'])['effect'].head()
# разные метрики для разных столбцов
stats.groupby('campaign').agg({'effect': ['min', 'max'], 'power_up': 'mean'}).head()
# группировка по нескольким столбцам
stats.groupby(['group', 'campaign']).count().head()
# максимальное число объявлений в одной группе
stats.groupby(['group', 'campaign']).count().sort_values('phrase', ascending=False).head()
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
ratings.groupby('userId').count().head()
ratings_filtered[.... > 100]
ratings_filtered['userId'].tolist()
ratings[ ratings['userId'].isin(...) ]
data = pd.read_csv('ml-latest-small/ratings.csv')
data.head()
# какие оценки поставил пользователь №1
data[ data['userId'] == 1 ]['rating'].value_counts()
# а для всех пользователей сразу?
data.pivot_table(index = 'userId', columns = 'rating', values = 'timestamp', aggfunc = 'count', fill_value = 0)
# можно итоги добавить
data.pivot_table(index = 'userId', columns = 'rating', values = 'timestamp', aggfunc = 'count', fill_value = 0,
margins = True)
genres = ['Drama', 'Action', 'Thriller']
import numpy as np
ratings = pd.read_csv('ml-latest-small/ratings.csv')
ratings.head()
movies = pd.read_csv('ml-latest-small/movies.csv')
movies.head()
joined = ratings.merge(movies, on='movieId', how='left')
joined.head()
# рекомендуемая проверка на возможные дубликаты
len(ratings) == len(joined)
ratings = pd.read_csv('ratings_example.txt', sep = '\t')
ratings.head()
movies = pd.read_csv('movies_example.txt', sep = '\t')
movies.head()
# ¯\_(ツ)_/¯
ratings.merge(movies, how = 'left', on = 'movieId')
movies.drop_duplicates(subset = 'movieId', keep = 'first', inplace = True)
movies.head()
ratings.merge(movies, how = 'left', on = 'movieId')
# еще раз список жанров
genres = ['Drama', 'Action', 'Thriller']
def genres_ratings(row):
"""Возвращает рейтинг, если он есть в списке жанров данного фильма"""
return pd.Series([row['rating'] if genre in row['genres'] else np.NaN for genre in genres])
joined[genres] = joined.apply(genres_ratings, axis=1)
joined.head()
import matplotlib.pyplot as plt
data = pd.read_csv('sales_data.csv', sep=';')
data
plt.plot(data['month'], data['2016'])
plt.plot(data['2017'])
plt.plot(data['2018'])
plt.plot(data['month'], data['2016'], marker='o')
plt.plot(data['2017'], linestyle='dashed')
plt.plot(data['2016'], linewidth=5)
plt.legend()
features = pd.read_csv('correlation.tsv', sep='\t')
features
# месяц сейчас не пригодится
features.drop('month', axis=1, inplace=True)
features.head()
features.corr()
import matplotlib.pyplot as plt
import seaborn as sns
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(features.corr())
# добавляем наглядности
f, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(features.corr(), annot=True, fmt='.1f', ax=ax, cmap=sns.color_palette('coolwarm', 16))
| 0.294316 | 0.870927 |
# Bite Size Bayes
Copyright 2020 Allen B. Downey
License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
## Review
[In the previous notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/04_dice.ipynb) I presented Theorem 4, which is a way to compute the probability of a disjunction (`or` operation) using the probability of a conjunction (`and` operation).
$P(A ~or~ B) = P(A) + P(B) - P(A ~and~ B)$
Then we used it to show that the sum of the unnormalized posteriors is the total probability of the data, which is why the Bayes table works.
We saw several examples involving dice, and I used them to show how prediction and inference are related: the Bayes table actually solves the prediction problem on the way to solving the inference problem.
## Bayesville
In this notebook we'll consider a famous example where Bayes's Theorem brings clarity to a confusing topic: medical testing.
Joe Blitzstein explains the scenario in this video from Stat110x at HarvardX.
(You might have to run the following cell to see the video.)
```
from IPython.display import YouTubeVideo
YouTubeVideo('otdaJPVQIgg')
```
I'll paraphase the problem posed in the video:
> In Bayesville, 1% of the population has an undiagnosed medical condition. Jimmy gets tested for the condition and the test comes back positive; that is, the test says Jimmy has the condition.
>
> The test is 95% accurate, which means
>
> * If you give the test to someone with the condition, the probability is 95% that the test will be positive, and
>
> * If you give it to someone who does not have the condition, the probability is 95% that the test will be negative.
>
> What is the probability that Jimmy actually has the condition?
Because the test is 95% accurate, it is tempting to say that the probability is 95% that the test is correct and Jimmy has the condition.
But that is wrong. Or maybe I should say it's the right answer to a different question. 95% is the probability of a positive test, given a patient with the condition. But that's not what the question asked, or what Jimmy wants to know.
To Jimmy, the important question is the probability he has the condition, given a positive test. As we have seen, and as Joe explains in the video:
$P(A|B) ≠ P(B|A)$
We can use a Bayes table to answer Jimmy's question correctly.
## Bayes table
I'll use two strings to represent the hypotheses: `condition` and `no condition`.
The prior for `condition` is the probability a random citizen of Bayesville has the condition, which is 1%.
The prior for `no condition` is the probability that a random citizen does not have the disease, which is 99%.
Let's put those values into a Bayes table.
```
import pandas as pd
table = pd.DataFrame(index=['condition', 'no condition'])
table['prior'] = 0.01, 0.99
table
```
The data is the positive test, so the likelihoods are:
* The probability of a correct positive test, given the condition, which is 95%.
* The probability of an incorrect positive test, given no condition, which is 5%.
```
table['likelihood'] = 0.95, 0.05
table
```
Once we have priors and likelihoods, the remaining steps are always the same. We compute the unnormalized posteriors:
```
table['unnorm'] = table['prior'] * table['likelihood']
table
```
And the total probability of the data.
```
prob_data = table['unnorm'].sum()
prob_data
```
Then divide through to get the normalized posteriors.
```
table['posterior'] = table['unnorm'] / prob_data
table
```
The posterior for `condition` is substantially higher than the prior, so the positive test is evidence in favor of `condition`.
But the prior is small and the evidence is not strong enough to overcome it; despite the positive test, the probability that Jimmy has the condition is only about 16%.
Many people find this result surprising and some insist that the probability is 95% that Jimmy has the condition.
The mistake they are making is called the [base rate fallacy](https://en.wikipedia.org/wiki/Base_rate_fallacy) because it ignores the "base rate" of the condition, which is the prior.
## Put a function on it
At this point you might be sick of seeing the same six lines of code over and over, so let's put them in a function where you will never have to see them again.
```
def make_bayes_table(hypos, prior, likelihood):
"""Make a Bayes table.
hypos: sequence of hypotheses
prior: prior probabilities
likelihood: sequence of likelihoods
returns: DataFrame
"""
table = pd.DataFrame(index=hypos)
table['prior'] = prior
table['likelihood'] = likelihood
table['unnorm'] = table['prior'] * table['likelihood']
prob_data = table['unnorm'].sum()
table['posterior'] = table['unnorm'] / prob_data
return table
```
This function takes three parameters:
* `hypos`, which should be a sequence of hypotheses. You can use almost any type to represent the hypotheses, including string, `int`, and `float`.
* `prior`, which is sequence of prior probabilities, $P(H)$ for each $H$.
* `likelihood`, which is a sequence of likelihoods, $P(D|H)$ for each $H$.
All three sequences should be the same length.
Here's a solution to the previous problem using `make_bayes_table`:
```
hypos = ['condition', 'no condition']
prior = 0.01, 0.99
likelihood = 0.95, 0.05
make_bayes_table(hypos, prior, likelihood)
```
**Exercise:** Suppose we take the same test to another town, called Sickville, where the base rate of the disease is 10%, substantially higher than in Bayesville. If a citizen of Sickville tests positive, what is the probability that they have the condition?
Use `make_bayes_table` to compute the result.
```
# Solution
hypos = ['condition', 'no condition']
prior = 0.1, 0.9
likelihood = 0.95, 0.05
make_bayes_table(hypos, prior, likelihood)
```
With a higher base rate, the posterior probability is substantially higher.
**Exercise:** Suppose we go back to Bayesville, where the base rate is 1%, with a new test that is 99.5% accurate.
If a citizen of Bayesville tests positive with the new test, what is the probability they have the condition?
Use `make_bayes_table` to compute the result.
```
# Solution
hypos = ['condition', 'no condition']
prior = 0.01, 0.99
likelihood = 0.995, 0.005
make_bayes_table(hypos, prior, likelihood)
```
With an accuracy of 99.5%, the positive test provides stronger evidence, so it is able to overcome the small prior.
## The Elvis problem
Here's a problem from [*Bayesian Data Analysis*](http://www.stat.columbia.edu/~gelman/book/):
> Elvis Presley had a twin brother (who died at birth). What is the probability that Elvis was an identical twin?
For background information, I used data from the
U.S. Census Bureau, [Birth, Stillbirth, and Infant Mortality Statistics for the Continental United States, the Territory of Hawaii, the Virgin Islands 1935](https://www.cdc.gov/nchs/data/vsushistorical/birthstat_1935.pdf) to estimate that in 1935, about 1/3 of twins were identical.
**Exercise:** Use this base rate and a Bayes table to compute the probability that Elvis was an identical twin.
Hint: Because identical twins have the same genes, they are almost always the same sex.
```
# Solution
hypos = ['identical', 'fraternal']
prior = 1/3, 2/3
likelihood = 1, 0.5
make_bayes_table(hypos, prior, likelihood)
```
## Summary
In this notebook, we used Bayes's Theorem, in the form of a Bayes table, to solve an important problem: interpreting the result of a medical test correctly.
Many people, including many doctors, get this problem wrong, with bad consequences for patients. Now that you know about the "base rate fallacy", you will see that it appears in many other domains, not just medicine.
Finally, I presented the Elvis problem, which I hope is a fun way to apply what you have learned so far. If you like the Elvis problem, you might enjoy [this notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/elvis.ipynb) where I dig into it a little deeper.
[In the next notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/06_pmf.ipynb) I'll introduce the probability mass function (PMF) and we'll use it to solve new versions of the cookie problem and the dice problem.
|
github_jupyter
|
from IPython.display import YouTubeVideo
YouTubeVideo('otdaJPVQIgg')
import pandas as pd
table = pd.DataFrame(index=['condition', 'no condition'])
table['prior'] = 0.01, 0.99
table
table['likelihood'] = 0.95, 0.05
table
table['unnorm'] = table['prior'] * table['likelihood']
table
prob_data = table['unnorm'].sum()
prob_data
table['posterior'] = table['unnorm'] / prob_data
table
def make_bayes_table(hypos, prior, likelihood):
"""Make a Bayes table.
hypos: sequence of hypotheses
prior: prior probabilities
likelihood: sequence of likelihoods
returns: DataFrame
"""
table = pd.DataFrame(index=hypos)
table['prior'] = prior
table['likelihood'] = likelihood
table['unnorm'] = table['prior'] * table['likelihood']
prob_data = table['unnorm'].sum()
table['posterior'] = table['unnorm'] / prob_data
return table
hypos = ['condition', 'no condition']
prior = 0.01, 0.99
likelihood = 0.95, 0.05
make_bayes_table(hypos, prior, likelihood)
# Solution
hypos = ['condition', 'no condition']
prior = 0.1, 0.9
likelihood = 0.95, 0.05
make_bayes_table(hypos, prior, likelihood)
# Solution
hypos = ['condition', 'no condition']
prior = 0.01, 0.99
likelihood = 0.995, 0.005
make_bayes_table(hypos, prior, likelihood)
# Solution
hypos = ['identical', 'fraternal']
prior = 1/3, 2/3
likelihood = 1, 0.5
make_bayes_table(hypos, prior, likelihood)
| 0.711431 | 0.992725 |
```
import numpy as np
from sklearn.decomposition import PCA
import scipy.io as sio
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import os
import random
from random import shuffle
from skimage.transform import rotate
import scipy.ndimage
def loadIndianPinesData():
data_path = os.path.join(os.getcwd(),'Data')
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio=0.10):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=345,
stratify=y)
return X_train, X_test, y_train, y_test
def oversampleWeakClasses(X, y):
uniqueLabels, labelCounts = np.unique(y, return_counts=True)
maxCount = np.max(labelCounts)
labelInverseRatios = maxCount / labelCounts
# repeat for every label and concat
newX = X[y == uniqueLabels[0], :, :, :].repeat(round(labelInverseRatios[0]), axis=0)
newY = y[y == uniqueLabels[0]].repeat(round(labelInverseRatios[0]), axis=0)
for label, labelInverseRatio in zip(uniqueLabels[1:], labelInverseRatios[1:]):
cX = X[y== label,:,:,:].repeat(round(labelInverseRatio), axis=0)
cY = y[y == label].repeat(round(labelInverseRatio), axis=0)
newX = np.concatenate((newX, cX))
newY = np.concatenate((newY, cY))
np.random.seed(seed=42)
rand_perm = np.random.permutation(newY.shape[0])
newX = newX[rand_perm, :, :, :]
newY = newY[rand_perm]
return newX, newY
def standartizeData(X):
newX = np.reshape(X, (-1, X.shape[2]))
scaler = preprocessing.StandardScaler().fit(newX)
newX = scaler.transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1],X.shape[2]))
return newX, scaler
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
def createPatches(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
def AugmentData(X_train):
for i in range(int(X_train.shape[0]/2)):
patch = X_train[i,:,:,:]
num = random.randint(0,2)
if (num == 0):
flipped_patch = np.flipud(patch)
if (num == 1):
flipped_patch = np.fliplr(patch)
if (num == 2):
no = random.randrange(-180,180,30)
flipped_patch = scipy.ndimage.interpolation.rotate(patch, no,axes=(1, 0),
reshape=False, output=None, order=3, mode='constant', cval=0.0, prefilter=False)
patch2 = flipped_patch
X_train[i,:,:,:] = patch2
return X_train
def savePreprocessedData(X_trainPatches, X_testPatches, y_trainPatches, y_testPatches, windowSize, wasPCAapplied = False, numPCAComponents = 0, testRatio = 0.25):
if wasPCAapplied:
with open("X_trainPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, X_trainPatches)
with open("X_testPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, X_testPatches)
with open("y_trainPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, y_trainPatches)
with open("y_testPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, y_testPatches)
else:
with open("../preprocessedData/XtrainWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, X_trainPatches)
with open("../preprocessedData/XtestWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, X_testPatches)
with open("../preprocessedData/ytrainWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, y_trainPatches)
with open("../preprocessedData/ytestWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, y_testPatches)
# Load the Global values (windowSize, numPCAcomponents, testRatio) from the text file global_variables.txt
myFile = open('global_variables.txt', 'r')
file = myFile.readlines()[:]
for line in file:
if line[0:3] == "win":
ds = line.find('=')
windowSize = int(line[ds+1:-1],10)
elif line[0:3] == "num":
ds = line.find('=')
numPCAcomponents = int(line[ds+2:-1],10)
else:
ds = line.find('=')
testRatio = float(line[ds+1:])
# Global Variables
#numPCAComponents = 30
#windowSize = 5
#testRatio = 0.25
X, y = loadIndianPinesData()
X,pca = applyPCA(X,numPCAcomponents)
XPatches, yPatches = createPatches(X, y, windowSize=windowSize)
X_train, X_test, y_train, y_test = splitTrainTestSet(XPatches, yPatches, testRatio)
X_train, y_train = oversampleWeakClasses(X_train, y_train)
X_train = AugmentData(X_train)
savePreprocessedData(X_train, X_test, y_train, y_test, windowSize = windowSize,
wasPCAapplied=True, numPCAComponents = numPCAcomponents,testRatio = testRatio)
```
|
github_jupyter
|
import numpy as np
from sklearn.decomposition import PCA
import scipy.io as sio
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import os
import random
from random import shuffle
from skimage.transform import rotate
import scipy.ndimage
def loadIndianPinesData():
data_path = os.path.join(os.getcwd(),'Data')
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio=0.10):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=345,
stratify=y)
return X_train, X_test, y_train, y_test
def oversampleWeakClasses(X, y):
uniqueLabels, labelCounts = np.unique(y, return_counts=True)
maxCount = np.max(labelCounts)
labelInverseRatios = maxCount / labelCounts
# repeat for every label and concat
newX = X[y == uniqueLabels[0], :, :, :].repeat(round(labelInverseRatios[0]), axis=0)
newY = y[y == uniqueLabels[0]].repeat(round(labelInverseRatios[0]), axis=0)
for label, labelInverseRatio in zip(uniqueLabels[1:], labelInverseRatios[1:]):
cX = X[y== label,:,:,:].repeat(round(labelInverseRatio), axis=0)
cY = y[y == label].repeat(round(labelInverseRatio), axis=0)
newX = np.concatenate((newX, cX))
newY = np.concatenate((newY, cY))
np.random.seed(seed=42)
rand_perm = np.random.permutation(newY.shape[0])
newX = newX[rand_perm, :, :, :]
newY = newY[rand_perm]
return newX, newY
def standartizeData(X):
newX = np.reshape(X, (-1, X.shape[2]))
scaler = preprocessing.StandardScaler().fit(newX)
newX = scaler.transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1],X.shape[2]))
return newX, scaler
def applyPCA(X, numComponents=75):
newX = np.reshape(X, (-1, X.shape[2]))
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
def createPatches(X, y, windowSize=5, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]))
patchesLabels = np.zeros((X.shape[0] * X.shape[1]))
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
def AugmentData(X_train):
for i in range(int(X_train.shape[0]/2)):
patch = X_train[i,:,:,:]
num = random.randint(0,2)
if (num == 0):
flipped_patch = np.flipud(patch)
if (num == 1):
flipped_patch = np.fliplr(patch)
if (num == 2):
no = random.randrange(-180,180,30)
flipped_patch = scipy.ndimage.interpolation.rotate(patch, no,axes=(1, 0),
reshape=False, output=None, order=3, mode='constant', cval=0.0, prefilter=False)
patch2 = flipped_patch
X_train[i,:,:,:] = patch2
return X_train
def savePreprocessedData(X_trainPatches, X_testPatches, y_trainPatches, y_testPatches, windowSize, wasPCAapplied = False, numPCAComponents = 0, testRatio = 0.25):
if wasPCAapplied:
with open("X_trainPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, X_trainPatches)
with open("X_testPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, X_testPatches)
with open("y_trainPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, y_trainPatches)
with open("y_testPatches_" + str(windowSize) + "PCA" + str(numPCAComponents) + "testRatio" + str(testRatio) + ".npy", 'bw') as outfile:
np.save(outfile, y_testPatches)
else:
with open("../preprocessedData/XtrainWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, X_trainPatches)
with open("../preprocessedData/XtestWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, X_testPatches)
with open("../preprocessedData/ytrainWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, y_trainPatches)
with open("../preprocessedData/ytestWindowSize" + str(windowSize) + ".npy", 'bw') as outfile:
np.save(outfile, y_testPatches)
# Load the Global values (windowSize, numPCAcomponents, testRatio) from the text file global_variables.txt
myFile = open('global_variables.txt', 'r')
file = myFile.readlines()[:]
for line in file:
if line[0:3] == "win":
ds = line.find('=')
windowSize = int(line[ds+1:-1],10)
elif line[0:3] == "num":
ds = line.find('=')
numPCAcomponents = int(line[ds+2:-1],10)
else:
ds = line.find('=')
testRatio = float(line[ds+1:])
# Global Variables
#numPCAComponents = 30
#windowSize = 5
#testRatio = 0.25
X, y = loadIndianPinesData()
X,pca = applyPCA(X,numPCAcomponents)
XPatches, yPatches = createPatches(X, y, windowSize=windowSize)
X_train, X_test, y_train, y_test = splitTrainTestSet(XPatches, yPatches, testRatio)
X_train, y_train = oversampleWeakClasses(X_train, y_train)
X_train = AugmentData(X_train)
savePreprocessedData(X_train, X_test, y_train, y_test, windowSize = windowSize,
wasPCAapplied=True, numPCAComponents = numPCAcomponents,testRatio = testRatio)
| 0.488527 | 0.568296 |
```
import os
import pandas as pd
import numpy as np
import pickle
import json
FILES_DIR = "/home/blagojce/EPFL_semester3/NTDS/ntds_project/ml-100k"
```
<b>u.data</b> -- The full u data set, 100000 ratings by 943 users on 1682 items.
Each user has rated at least 20 movies. Users and items are
numbered consecutively from 1. The data is randomly
ordered. This is a tab separated list of
user id | item id | rating | timestamp.
The time stamps are unix seconds since 1/1/1970 UTC
```
df_data_path = os.path.join(FILES_DIR, "u.data")
df_data = pd.read_csv(df_data_path, header=None, delimiter="\t")
df_data.columns = ["user_id", "item_id", "rating", "timestamp"]
df_data["timestamp"] = pd.to_datetime(df_data["timestamp"], unit='s')
print("Unique users: %d" % len(df_data["user_id"].unique()))
print("Unique movies: %d" % len(df_data["item_id"].unique()))
print("Distinct ratings: %s" % sorted(df_data["rating"].unique()))
df_data["rating"].hist()
```
<b>u.info</b> <b>USELESS</b> -- The number of users, items, and ratings in the u data set.
```
df_info_path = os.path.join(FILES_DIR, "u.info")
df_info = pd.read_csv(df_info_path, header=None, delimiter="\t")
df_info
```
<b>u.item</b> -- Information about the items (movies); this is a tab separated
list of
movie id | movie title | release date | video release date |
IMDb URL | unknown | Action | Adventure | Animation |
Children's | Comedy | Crime | Documentary | Drama | Fantasy |
Film-Noir | Horror | Musical | Mystery | Romance | Sci-Fi |
Thriller | War | Western |
The last 19 fields are the genres, a 1 indicates the movie
is of that genre, a 0 indicates it is not; movies can be in
several genres at once.
The movie ids are the ones used in the u.data data set.
```
df_item_path = os.path.join(FILES_DIR, "u.item")
df_item = pd.read_csv(df_item_path, header=None, delimiter="|", encoding='latin1')
df_item.columns = ["item_id" , "movie_title" , "release_date" , "video_release_date" , "IMDb_URL" , "unknown" , "Action" , "Adventure" , "Animation" , "Children's" , "Comedy" , "Crime" , "Documentary" , "Drama" , "Fantasy" , "Film-Noir" , "Horror" , "Musical" , "Mystery" , "Romance" , "Sci-Fi" , "Thriller" , "War" , "Western"]
df_item = df_item.drop(columns= ["video_release_date"]) # evey value is NaN
df_item.head()
```
<b>u.genre</b> -- A list of the genres.
```
df_genre_path = os.path.join(FILES_DIR, "u.genre")
df_genre = pd.read_csv(df_genre_path, header=None, delimiter="|")
df_genre
```
<b>u.user</b> -- Demographic information about the users; this is a tab
separated list of
user id | age | gender | occupation | zip code
The user ids are the ones used in the u.data data set.
```
df_user_path = os.path.join(FILES_DIR, "u.user")
df_user = pd.read_csv(df_user_path, header=None, delimiter="|")
df_user.columns = ["user_id", "age", "gendre", "occupation", "zip_code"]
df_user.head()
df_user["age"].hist()
```
<b>u.occupation</b> -- A list of the occupations.
```
df_occupation_path = os.path.join(FILES_DIR, "u.occupation")
df_occupation = pd.read_csv(df_occupation_path, header=None, delimiter="|")
df_occupation
```
There are some already done train/test splits in the data directory, like for 5 fold cross validadtion, etc.
```
import urllib
import json
serviceurl = 'http://www.omdbapi.com/?'
omdbapi = "123b5018"
apikey = '&apikey='+omdbapi
def search_movie(title):
if len(title) < 1 or title=='quit':
print("Goodbye now…")
return None
try:
url = serviceurl + urllib.parse.urlencode({'t': title})+apikey
print(f'Retrieving the data of "{title}" now… ')
uh = urllib.request.urlopen(url)
data = uh.read()
json_data=json.loads(data)
if json_data['Response']=='True':
print(json_data)
return json_data
except urllib.error.URLError as e:
print(f"ERROR: {e.reason}")
return None
def search_movie_title_year(title, year):
if len(title) < 1 or title=='quit':
print("Goodbye now…")
return None
try:
url = serviceurl + urllib.parse.urlencode({'t': title}, {'year': year})+apikey
print(f'Retrieving the data of "{title}" now… ')
uh = urllib.request.urlopen(url)
data = uh.read()
json_data=json.loads(data)
if json_data['Response']=='True':
print(json_data)
return json_data
except urllib.error.URLError as e:
print(f"ERROR: {e.reason}")
return None
# title and years to download
movie_titles = [(x.rsplit(" ", 1)[0], x.rsplit(" ", 1)[1]) for x in list(df_item["movie_title"]) if x != "unknown"]
# after many attemps and hand downloading the missing data we obtained the following metadata
with open('oimdb_metadata_crawled.pickle', 'rb') as handle:
oimdb_metadata = pickle.load(handle)
oimdb_metadata = [m[1] for m in oimdb_metadata]
df_oimdb_metadata = pd.read_json(json.dumps(oimdb_metadata))
df_oimdb_metadata
```
df_oimdb_metadata.isna().any():<br>
DVD True - may be useful, because there are not much nan values<br>
Production True - may be useful, because there are not much nan values<br>
BoxOffice True - useles, almost every value is nan<br>
Episode True - useles, almost every value is nan<br>
Season True - useles, almost every value is nan<br>
Website True - useles, almost every value is nan<br>
seriesID True - useles, almost every value is nan<br>
totalSeasons True - useles, almost every value is nan<br>
```
df_oimdb_metadata.drop(["BoxOffice", "Episode", "Season", "Website", "seriesID", "totalSeasons"], axis=1, inplace=True)
df_oimdb_metadata
```
|
github_jupyter
|
import os
import pandas as pd
import numpy as np
import pickle
import json
FILES_DIR = "/home/blagojce/EPFL_semester3/NTDS/ntds_project/ml-100k"
df_data_path = os.path.join(FILES_DIR, "u.data")
df_data = pd.read_csv(df_data_path, header=None, delimiter="\t")
df_data.columns = ["user_id", "item_id", "rating", "timestamp"]
df_data["timestamp"] = pd.to_datetime(df_data["timestamp"], unit='s')
print("Unique users: %d" % len(df_data["user_id"].unique()))
print("Unique movies: %d" % len(df_data["item_id"].unique()))
print("Distinct ratings: %s" % sorted(df_data["rating"].unique()))
df_data["rating"].hist()
df_info_path = os.path.join(FILES_DIR, "u.info")
df_info = pd.read_csv(df_info_path, header=None, delimiter="\t")
df_info
df_item_path = os.path.join(FILES_DIR, "u.item")
df_item = pd.read_csv(df_item_path, header=None, delimiter="|", encoding='latin1')
df_item.columns = ["item_id" , "movie_title" , "release_date" , "video_release_date" , "IMDb_URL" , "unknown" , "Action" , "Adventure" , "Animation" , "Children's" , "Comedy" , "Crime" , "Documentary" , "Drama" , "Fantasy" , "Film-Noir" , "Horror" , "Musical" , "Mystery" , "Romance" , "Sci-Fi" , "Thriller" , "War" , "Western"]
df_item = df_item.drop(columns= ["video_release_date"]) # evey value is NaN
df_item.head()
df_genre_path = os.path.join(FILES_DIR, "u.genre")
df_genre = pd.read_csv(df_genre_path, header=None, delimiter="|")
df_genre
df_user_path = os.path.join(FILES_DIR, "u.user")
df_user = pd.read_csv(df_user_path, header=None, delimiter="|")
df_user.columns = ["user_id", "age", "gendre", "occupation", "zip_code"]
df_user.head()
df_user["age"].hist()
df_occupation_path = os.path.join(FILES_DIR, "u.occupation")
df_occupation = pd.read_csv(df_occupation_path, header=None, delimiter="|")
df_occupation
import urllib
import json
serviceurl = 'http://www.omdbapi.com/?'
omdbapi = "123b5018"
apikey = '&apikey='+omdbapi
def search_movie(title):
if len(title) < 1 or title=='quit':
print("Goodbye now…")
return None
try:
url = serviceurl + urllib.parse.urlencode({'t': title})+apikey
print(f'Retrieving the data of "{title}" now… ')
uh = urllib.request.urlopen(url)
data = uh.read()
json_data=json.loads(data)
if json_data['Response']=='True':
print(json_data)
return json_data
except urllib.error.URLError as e:
print(f"ERROR: {e.reason}")
return None
def search_movie_title_year(title, year):
if len(title) < 1 or title=='quit':
print("Goodbye now…")
return None
try:
url = serviceurl + urllib.parse.urlencode({'t': title}, {'year': year})+apikey
print(f'Retrieving the data of "{title}" now… ')
uh = urllib.request.urlopen(url)
data = uh.read()
json_data=json.loads(data)
if json_data['Response']=='True':
print(json_data)
return json_data
except urllib.error.URLError as e:
print(f"ERROR: {e.reason}")
return None
# title and years to download
movie_titles = [(x.rsplit(" ", 1)[0], x.rsplit(" ", 1)[1]) for x in list(df_item["movie_title"]) if x != "unknown"]
# after many attemps and hand downloading the missing data we obtained the following metadata
with open('oimdb_metadata_crawled.pickle', 'rb') as handle:
oimdb_metadata = pickle.load(handle)
oimdb_metadata = [m[1] for m in oimdb_metadata]
df_oimdb_metadata = pd.read_json(json.dumps(oimdb_metadata))
df_oimdb_metadata
df_oimdb_metadata.drop(["BoxOffice", "Episode", "Season", "Website", "seriesID", "totalSeasons"], axis=1, inplace=True)
df_oimdb_metadata
| 0.144269 | 0.444625 |
# Data Analysis - Data Exploration
```
import pandas as pd
import sklearn
import missingno as msno
import numpy as np
from sklearn.impute import KNNImputer
import sklearn.neighbors._base
import sys
sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base
from sklearn.decomposition import PCA
from missingpy import MissForest
from sklearn.cluster import KMeans, AgglomerativeClustering
import matplotlib.pyplot as plt
from yellowbrick.cluster import SilhouetteVisualizer
import warnings
warnings.filterwarnings('ignore')
NUTS2_GDP_EDU = pd.read_pickle("NUTS2.pkl")
pd.set_option('display.max_rows', 100)
NUTS2_GDP_EDU
```
#### Changing EDU data type to float
```
#ED8 is the raw number of Doctoral students
NUTS2_GDP_EDU["EDU_2019_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED8"], downcast="float",errors='coerce')
#ED7 is the raw number of Master students
NUTS2_GDP_EDU["EDU_2019_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED7"], downcast="float",errors='coerce')
#ED6 is the raw number of Bachelor students
NUTS2_GDP_EDU["EDU_2019_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED6"], downcast="float",errors='coerce')
msno.matrix(NUTS2_GDP_EDU)
msno.bar(NUTS2_GDP_EDU)
```
#### Changing GDP data type to float
```
#gdp_m represents GDP in millions of euros in a purticular region
NUTS2_GDP_EDU["GDP_2019_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2019_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2018_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2018_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2017_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2017_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2016_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2016_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2015_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2015_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2014_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2014_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2013_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2013_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2012_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2012_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2011_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2011_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2010_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2010_M"], downcast="float",errors='coerce')
#gdp euros per induvidual in a purticular NUTS region
NUTS2_GDP_EDU["GDP_2019"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2019"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2018"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2018"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2017"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2017"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2016"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2016"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2015"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2015"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2014"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2014"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2013"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2013"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2012"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2012"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2011"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2011"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2010"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2010"], downcast="float",errors='coerce')
msno.heatmap(NUTS2_GDP_EDU)
```
A value near -1 means if one variable appears then the other variable is very likely to be missing.
A value near 0 means there is no dependence between the occurrence of missing values of two variables.
A value near 1 means if one variable appears then the other variable is very likely to be present
### Miss Forrest missing imputation
Advantages:
1)Can be applied to mixed data types (missings in numeric & categorical variables)
2)No pre-processing required (no dummy-coding, standardization, data splitting, etc.)
3)No assumptions required (aside from the normal assumption of being MAR/MCAR)
4)Robust to noisy data, as random forests effectively have build-in feature selection. Methods like KNN imputation will have poor predictions in datasets with weak & non-informative predictors, whereas missForest() will make little to no use of these features
5)Non-parametric: makes no assumptions about the relationship between the features, unlike MICE which assumes linearity
Excellent predictive power
6)Can leverage non-linear and interaction effects between features to improve imputation accuracy
7)Gives an OOB error estimate for its predictions (Numeric: NRMSE/MSE, Categorical: PFC)
8)Works with high dimensionality data (p≫n)
Disadvantages:
1)Imputation time, which increases with the number of observations, predictors and number of predictors containing missing values
2)It inherits the same lack of interpretability of random forests
3)It is an algorithm, not a model object you can store somewhere. This means it has to run each time missing data has to be imputed, which could be problematic in some production environments
```
NaN_d = NUTS2_GDP_EDU.drop(['FREQ','GEO'],axis = 1)
imputer = MissForest()
X_imputed = imputer.fit_transform(NaN_d)
Filled = pd.DataFrame(X_imputed)
Filled
NUTS2_GDP_EDU_I = pd.DataFrame()
NUTS2_GDP_EDU_I['FREQ'] = NUTS2_GDP_EDU['FREQ']
NUTS2_GDP_EDU_I['GEO'] = NUTS2_GDP_EDU['GEO']
#ED8 is the raw number of Doctoral students
NUTS2_GDP_EDU_I["EDU_2019_ED8"] = Filled.loc[:,0]
NUTS2_GDP_EDU_I["EDU_2018_ED8"] = Filled.loc[:,1]
NUTS2_GDP_EDU_I["EDU_2017_ED8"] = Filled.loc[:,2]
NUTS2_GDP_EDU_I["EDU_2016_ED8"] = Filled.loc[:,3]
NUTS2_GDP_EDU_I["EDU_2015_ED8"] = Filled.loc[:,4]
NUTS2_GDP_EDU_I["EDU_2014_ED8"] = Filled.loc[:,5]
NUTS2_GDP_EDU_I["EDU_2013_ED8"] = Filled.loc[:,6]
#NUTS2_GDP_EDU["EDU_2012_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED8"], downcast="float",errors='coerce')
#ED7 is the raw number of Master students
NUTS2_GDP_EDU_I["EDU_2019_ED7"] = Filled.loc[:,7]
NUTS2_GDP_EDU_I["EDU_2018_ED7"] = Filled.loc[:,8]
NUTS2_GDP_EDU_I["EDU_2017_ED7"] = Filled.loc[:,9]
NUTS2_GDP_EDU_I["EDU_2016_ED7"] = Filled.loc[:,10]
NUTS2_GDP_EDU_I["EDU_2015_ED7"] = Filled.loc[:,11]
NUTS2_GDP_EDU_I["EDU_2014_ED7"] = Filled.loc[:,12]
NUTS2_GDP_EDU_I["EDU_2013_ED7"] = Filled.loc[:,13]
#NUTS2_GDP_EDU["EDU_2012_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED7"], downcast="float",errors='coerce')
#ED6 is the raw number of Bachelor students
NUTS2_GDP_EDU_I["EDU_2019_ED6"] = Filled.loc[:,14]
NUTS2_GDP_EDU_I["EDU_2018_ED6"] = Filled.loc[:,15]
NUTS2_GDP_EDU_I["EDU_2017_ED6"] = Filled.loc[:,16]
NUTS2_GDP_EDU_I["EDU_2016_ED6"] = Filled.loc[:,17]
NUTS2_GDP_EDU_I["EDU_2015_ED6"] = Filled.loc[:,18]
NUTS2_GDP_EDU_I["EDU_2014_ED6"] = Filled.loc[:,19]
NUTS2_GDP_EDU_I["EDU_2013_ED6"] = Filled.loc[:,20]
#NUTS2_GDP_EDU["EDU_2012_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED6"], downcast="float",errors='coerce')
#gdp_m represents GDP in millions of euros in a purticular region
NUTS2_GDP_EDU_I["GDP_2019_M"] = Filled.loc[:,21]
NUTS2_GDP_EDU_I["GDP_2018_M"] = Filled.loc[:,22]
NUTS2_GDP_EDU_I["GDP_2017_M"] = Filled.loc[:,23]
NUTS2_GDP_EDU_I["GDP_2016_M"] = Filled.loc[:,24]
NUTS2_GDP_EDU_I["GDP_2015_M"] = Filled.loc[:,25]
NUTS2_GDP_EDU_I["GDP_2014_M"] = Filled.loc[:,26]
NUTS2_GDP_EDU_I["GDP_2013_M"] = Filled.loc[:,27]
NUTS2_GDP_EDU_I["GDP_2012_M"] = Filled.loc[:,28]
NUTS2_GDP_EDU_I["GDP_2011_M"] = Filled.loc[:,29]
NUTS2_GDP_EDU_I["GDP_2010_M"] = Filled.loc[:,30]
#gdp euros per induvidual in a purticular NUTS region
NUTS2_GDP_EDU_I["GDP_2019"] = Filled.loc[:,31]
NUTS2_GDP_EDU_I["GDP_2018"] = Filled.loc[:,32]
NUTS2_GDP_EDU_I["GDP_2017"] = Filled.loc[:,33]
NUTS2_GDP_EDU_I["GDP_2016"] = Filled.loc[:,34]
NUTS2_GDP_EDU_I["GDP_2015"] = Filled.loc[:,35]
NUTS2_GDP_EDU_I["GDP_2014"] = Filled.loc[:,36]
NUTS2_GDP_EDU_I["GDP_2013"] = Filled.loc[:,37]
NUTS2_GDP_EDU_I["GDP_2012"] = Filled.loc[:,38]
NUTS2_GDP_EDU_I["GDP_2011"] = Filled.loc[:,39]
NUTS2_GDP_EDU_I["GDP_2010"] = Filled.loc[:,40]
msno.matrix(NUTS2_GDP_EDU_I)
```
### K Means Clustering
1) Standarization of the variables is not required
2) Every run is bound to give slightly different result as the starting point of the K-Means is random
3) The siloehts of the K-means clusters will be analyzed to give further insignt
##### Elbow Method
```
K_mean = pd.DataFrame(NUTS2_GDP_EDU_I)
K_mean = K_mean.drop(columns=['FREQ'])
K_means_D = K_mean.drop(columns=['GEO'])
K_means_D.head()
K_mean
wcss = []
for i in range(1, 15):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(K_means_D)
wcss.append(kmeans.inertia_)
print(wcss)
plt.plot(range(1, 15), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
pca = PCA(3)
#Transform the data
pca_d = pca.fit_transform(K_means_D)
pca_d.shape
pca.explained_variance_ratio_
```
From the graph above we can safely take upto 3 clusters in out data and even 2 clusters which has a good WSS score can be taken
```
kmeans = KMeans(n_clusters= 3)
np.random.seed(1234)
k5cls = kmeans.fit(pca_d)
k5cls.labels_
#filter rows of original data
filtered_label0 = pca_d[k5cls.labels_ == 0]
filtered_label1 = pca_d[k5cls.labels_ == 1]
filtered_label2 = pca_d[k5cls.labels_ == 2]
#Plotting the results
plt.scatter(filtered_label0[:,0] , filtered_label0[:,1] , color = 'red')
plt.scatter(filtered_label1[:,0] , filtered_label1[:,1] , color = 'black')
plt.scatter(filtered_label2[:,0] , filtered_label2[:,1] , color = 'black')
plt.show()
kmeans = KMeans(n_clusters= 4)
np.random.seed(1234)
k5cls = kmeans.fit(K_means_D)
k5cls.labels_
filtered_label0 = K_mean[k5cls.labels_ == 0]
filtered_label0
filtered_label1 = K_mean[k5cls.labels_ == 1]
filtered_label1
filtered_label2 = K_mean[k5cls.labels_ == 2]
filtered_label2
filtered_label3 = K_mean[k5cls.labels_ == 3]
filtered_label3
f, ax = plt.subplots(1, figsize=(15, 15), dpi =50)
visualizer = SilhouetteVisualizer(kmeans, colors='yellowbrick')
visualizer.fit(K_means_D)
```
There is no negeative dispersion which is a good sign as it showcases that none of the clusters are wrongly assigned, expect when we increase the value of the number of clusters.
# Conclusion
93% percent of the variance in the data is explained by the first principle component, this makes sense in way that our data showcases time series of only 5 attributes, 3 of them related to education and 2 of the to GDP, these data over the last 10 years have not changed drastically and most of these atrributes are sharply towards the first principle component
The K-Means clustering showcases a similar story, where we can see that on a row wise most of the places with respect to just the attributes chosen are clustering together.
The issue is that these variables alone in a time series are not enough form distinct clustering.
Moreover if the entire data set of NUTS2 is taken from Eurostat, we will be able to notice intresting clusters
|
github_jupyter
|
import pandas as pd
import sklearn
import missingno as msno
import numpy as np
from sklearn.impute import KNNImputer
import sklearn.neighbors._base
import sys
sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base
from sklearn.decomposition import PCA
from missingpy import MissForest
from sklearn.cluster import KMeans, AgglomerativeClustering
import matplotlib.pyplot as plt
from yellowbrick.cluster import SilhouetteVisualizer
import warnings
warnings.filterwarnings('ignore')
NUTS2_GDP_EDU = pd.read_pickle("NUTS2.pkl")
pd.set_option('display.max_rows', 100)
NUTS2_GDP_EDU
#ED8 is the raw number of Doctoral students
NUTS2_GDP_EDU["EDU_2019_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED8"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED8"], downcast="float",errors='coerce')
#ED7 is the raw number of Master students
NUTS2_GDP_EDU["EDU_2019_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED7"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED7"], downcast="float",errors='coerce')
#ED6 is the raw number of Bachelor students
NUTS2_GDP_EDU["EDU_2019_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2019_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2018_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2018_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2017_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2017_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2016_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2016_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2015_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2015_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2014_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2014_ED6"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["EDU_2013_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2013_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2012_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED6"], downcast="float",errors='coerce')
msno.matrix(NUTS2_GDP_EDU)
msno.bar(NUTS2_GDP_EDU)
#gdp_m represents GDP in millions of euros in a purticular region
NUTS2_GDP_EDU["GDP_2019_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2019_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2018_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2018_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2017_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2017_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2016_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2016_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2015_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2015_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2014_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2014_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2013_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2013_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2012_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2012_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2011_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2011_M"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2010_M"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2010_M"], downcast="float",errors='coerce')
#gdp euros per induvidual in a purticular NUTS region
NUTS2_GDP_EDU["GDP_2019"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2019"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2018"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2018"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2017"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2017"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2016"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2016"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2015"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2015"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2014"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2014"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2013"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2013"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2012"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2012"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2011"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2011"], downcast="float",errors='coerce')
NUTS2_GDP_EDU["GDP_2010"] = pd.to_numeric(NUTS2_GDP_EDU["GDP_2010"], downcast="float",errors='coerce')
msno.heatmap(NUTS2_GDP_EDU)
NaN_d = NUTS2_GDP_EDU.drop(['FREQ','GEO'],axis = 1)
imputer = MissForest()
X_imputed = imputer.fit_transform(NaN_d)
Filled = pd.DataFrame(X_imputed)
Filled
NUTS2_GDP_EDU_I = pd.DataFrame()
NUTS2_GDP_EDU_I['FREQ'] = NUTS2_GDP_EDU['FREQ']
NUTS2_GDP_EDU_I['GEO'] = NUTS2_GDP_EDU['GEO']
#ED8 is the raw number of Doctoral students
NUTS2_GDP_EDU_I["EDU_2019_ED8"] = Filled.loc[:,0]
NUTS2_GDP_EDU_I["EDU_2018_ED8"] = Filled.loc[:,1]
NUTS2_GDP_EDU_I["EDU_2017_ED8"] = Filled.loc[:,2]
NUTS2_GDP_EDU_I["EDU_2016_ED8"] = Filled.loc[:,3]
NUTS2_GDP_EDU_I["EDU_2015_ED8"] = Filled.loc[:,4]
NUTS2_GDP_EDU_I["EDU_2014_ED8"] = Filled.loc[:,5]
NUTS2_GDP_EDU_I["EDU_2013_ED8"] = Filled.loc[:,6]
#NUTS2_GDP_EDU["EDU_2012_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED8"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED8"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED8"], downcast="float",errors='coerce')
#ED7 is the raw number of Master students
NUTS2_GDP_EDU_I["EDU_2019_ED7"] = Filled.loc[:,7]
NUTS2_GDP_EDU_I["EDU_2018_ED7"] = Filled.loc[:,8]
NUTS2_GDP_EDU_I["EDU_2017_ED7"] = Filled.loc[:,9]
NUTS2_GDP_EDU_I["EDU_2016_ED7"] = Filled.loc[:,10]
NUTS2_GDP_EDU_I["EDU_2015_ED7"] = Filled.loc[:,11]
NUTS2_GDP_EDU_I["EDU_2014_ED7"] = Filled.loc[:,12]
NUTS2_GDP_EDU_I["EDU_2013_ED7"] = Filled.loc[:,13]
#NUTS2_GDP_EDU["EDU_2012_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED7"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED7"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED7"], downcast="float",errors='coerce')
#ED6 is the raw number of Bachelor students
NUTS2_GDP_EDU_I["EDU_2019_ED6"] = Filled.loc[:,14]
NUTS2_GDP_EDU_I["EDU_2018_ED6"] = Filled.loc[:,15]
NUTS2_GDP_EDU_I["EDU_2017_ED6"] = Filled.loc[:,16]
NUTS2_GDP_EDU_I["EDU_2016_ED6"] = Filled.loc[:,17]
NUTS2_GDP_EDU_I["EDU_2015_ED6"] = Filled.loc[:,18]
NUTS2_GDP_EDU_I["EDU_2014_ED6"] = Filled.loc[:,19]
NUTS2_GDP_EDU_I["EDU_2013_ED6"] = Filled.loc[:,20]
#NUTS2_GDP_EDU["EDU_2012_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2012_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2011_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2011_ED6"], downcast="float",errors='coerce')
#NUTS2_GDP_EDU["EDU_2010_ED6"] = pd.to_numeric(NUTS2_GDP_EDU["EDU_2010_ED6"], downcast="float",errors='coerce')
#gdp_m represents GDP in millions of euros in a purticular region
NUTS2_GDP_EDU_I["GDP_2019_M"] = Filled.loc[:,21]
NUTS2_GDP_EDU_I["GDP_2018_M"] = Filled.loc[:,22]
NUTS2_GDP_EDU_I["GDP_2017_M"] = Filled.loc[:,23]
NUTS2_GDP_EDU_I["GDP_2016_M"] = Filled.loc[:,24]
NUTS2_GDP_EDU_I["GDP_2015_M"] = Filled.loc[:,25]
NUTS2_GDP_EDU_I["GDP_2014_M"] = Filled.loc[:,26]
NUTS2_GDP_EDU_I["GDP_2013_M"] = Filled.loc[:,27]
NUTS2_GDP_EDU_I["GDP_2012_M"] = Filled.loc[:,28]
NUTS2_GDP_EDU_I["GDP_2011_M"] = Filled.loc[:,29]
NUTS2_GDP_EDU_I["GDP_2010_M"] = Filled.loc[:,30]
#gdp euros per induvidual in a purticular NUTS region
NUTS2_GDP_EDU_I["GDP_2019"] = Filled.loc[:,31]
NUTS2_GDP_EDU_I["GDP_2018"] = Filled.loc[:,32]
NUTS2_GDP_EDU_I["GDP_2017"] = Filled.loc[:,33]
NUTS2_GDP_EDU_I["GDP_2016"] = Filled.loc[:,34]
NUTS2_GDP_EDU_I["GDP_2015"] = Filled.loc[:,35]
NUTS2_GDP_EDU_I["GDP_2014"] = Filled.loc[:,36]
NUTS2_GDP_EDU_I["GDP_2013"] = Filled.loc[:,37]
NUTS2_GDP_EDU_I["GDP_2012"] = Filled.loc[:,38]
NUTS2_GDP_EDU_I["GDP_2011"] = Filled.loc[:,39]
NUTS2_GDP_EDU_I["GDP_2010"] = Filled.loc[:,40]
msno.matrix(NUTS2_GDP_EDU_I)
K_mean = pd.DataFrame(NUTS2_GDP_EDU_I)
K_mean = K_mean.drop(columns=['FREQ'])
K_means_D = K_mean.drop(columns=['GEO'])
K_means_D.head()
K_mean
wcss = []
for i in range(1, 15):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(K_means_D)
wcss.append(kmeans.inertia_)
print(wcss)
plt.plot(range(1, 15), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
pca = PCA(3)
#Transform the data
pca_d = pca.fit_transform(K_means_D)
pca_d.shape
pca.explained_variance_ratio_
kmeans = KMeans(n_clusters= 3)
np.random.seed(1234)
k5cls = kmeans.fit(pca_d)
k5cls.labels_
#filter rows of original data
filtered_label0 = pca_d[k5cls.labels_ == 0]
filtered_label1 = pca_d[k5cls.labels_ == 1]
filtered_label2 = pca_d[k5cls.labels_ == 2]
#Plotting the results
plt.scatter(filtered_label0[:,0] , filtered_label0[:,1] , color = 'red')
plt.scatter(filtered_label1[:,0] , filtered_label1[:,1] , color = 'black')
plt.scatter(filtered_label2[:,0] , filtered_label2[:,1] , color = 'black')
plt.show()
kmeans = KMeans(n_clusters= 4)
np.random.seed(1234)
k5cls = kmeans.fit(K_means_D)
k5cls.labels_
filtered_label0 = K_mean[k5cls.labels_ == 0]
filtered_label0
filtered_label1 = K_mean[k5cls.labels_ == 1]
filtered_label1
filtered_label2 = K_mean[k5cls.labels_ == 2]
filtered_label2
filtered_label3 = K_mean[k5cls.labels_ == 3]
filtered_label3
f, ax = plt.subplots(1, figsize=(15, 15), dpi =50)
visualizer = SilhouetteVisualizer(kmeans, colors='yellowbrick')
visualizer.fit(K_means_D)
| 0.317638 | 0.676406 |
```
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
```
Import it using pandas
```
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
```
### Clean the data
The dataset contains a few unknown values.
```
dataset.isna().sum()
```
To keep this initial tutorial simple drop those rows.
```
dataset = dataset.dropna()
```
The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
```
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
```
### Split the data into train and test
Now split the dataset into a training set and a test set.
We will use the test set in the final evaluation of our model.
```
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
```
### Inspect the data
Have a quick look at the joint distribution of a few pairs of columns from the training set.
```
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
```
Also look at the overall statistics:
```
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
```
### Split features from labels
Separate the target value, or "label", from the features. This label is the value that you will train the model to predict.
```
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
```
### Normalize the data
Look again at the `train_stats` block above and note how different the ranges of each feature are.
It is good practice to normalize features that use different scales and ranges. Although the model *might* converge without feature normalization, it makes training more difficult, and it makes the resulting model dependent on the choice of units used in the input.
Note: Although we intentionally generate these statistics from only the training dataset, these statistics will also be used to normalize the test dataset. We need to do that to project the test dataset into the same distribution that the model has been trained on.
```
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
```
This normalized data is what we will use to train the model.
Caution: The statistics used to normalize the inputs here (mean and standard deviation) need to be applied to any other data that is fed to the model, along with the one-hot encoding that we did earlier. That includes the test set as well as live data when the model is used in production.
## The model
### Build the model
Let's build our model. Here, we'll use a `Sequential` model with two densely connected hidden layers, and an output layer that returns a single, continuous value. The model building steps are wrapped in a function, `build_model`, since we'll create a second model, later on.
```
def build_model():
model = keras.Sequential([
layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation=tf.nn.relu),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
model = build_model()
```
### Inspect the model
Use the `.summary` method to print a simple description of the model
```
model.summary()
```
Now try out the model. Take a batch of `10` examples from the training data and call `model.predict` on it.
```
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
```
It seems to be working, and it produces a result of the expected shape and type.
### Train the model
Train the model for 1000 epochs, and record the training and validation accuracy in the `history` object.
```
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
```
Visualize the model's training progress using the stats stored in the `history` object.
```
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
```
This graph shows little improvement, or even degradation in the validation error after about 100 epochs. Let's update the `model.fit` call to automatically stop training when the validation score doesn't improve. We'll use an *EarlyStopping callback* that tests a training condition for every epoch. If a set amount of epochs elapses without showing improvement, then automatically stop the training.
You can learn more about this callback [here](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).
```
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
```
The graph shows that on the validation set, the average error is usually around +/- 2 MPG. Is this good? We'll leave that decision up to you.
Let's see how well the model generalizes by using the **test** set, which we did not use when training the model. This tells us how well we can expect the model to predict when we use it in the real world.
```
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
```
### Make predictions
Finally, predict MPG values using data in the testing set:
```
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
```
It looks like our model predicts reasonably well. Let's take a look at the error distribution.
```
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
```
It's not quite gaussian, but we might expect that because the number of samples is very small.
## Conclusion
This notebook introduced a few techniques to handle a regression problem.
* Mean Squared Error (MSE) is a common loss function used for regression problems (different loss functions are used for classification problems).
* Similarly, evaluation metrics used for regression differ from classification. A common regression metric is Mean Absolute Error (MAE).
* When numeric input data features have values with different ranges, each feature should be scaled independently to the same range.
* If there is not much training data, one technique is to prefer a small network with few hidden layers to avoid overfitting.
* Early stopping is a useful technique to prevent overfitting.
|
github_jupyter
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
dataset_path
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
dataset.isna().sum()
dataset = dataset.dropna()
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
def build_model():
model = keras.Sequential([
layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation=tf.nn.relu),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
model = build_model()
model.summary()
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mean_squared_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_squared_error'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
| 0.89214 | 0.884838 |
# Домашняя работа №4
# Студент: Правилов Михаил
# Задание 1
"Напишите программу вычисляющую корни полиномов Лежандра, используя любой из методов с лекции, кроме половинного деления. Используйте для вычисления значений полиномов scipy.special.legendre и перемежаемость корней полномов послежовательных степеней."
Я предлагаю использовать итеративный метод Ньютона, так как производная выражается через сами полиномы, а также этот метод довольно-таки прост в реализации.
Полиномы будем считать с помощью рекуррентной формулы.
Выпишем формулы:
$P_{n + 1}(x) = \frac{2n + 1}{n + 1}xP_n(x) - \frac{n}{n + 1}P_{n - 1}(x)$,
$P_0(x) = 1, P_1(x) = x$
$P'_n(x) = \frac{n}{1 - x^2} * [P_{n - 1}(x) - xP_n(x)]$
$x_i^{(k + 1)} = x_i^{(k)} - \frac{P_n(x_i^{(k)})}{P'_n(x_i^{(k)})}$ шаг в метода Ньютона для вычисления i-го корня.
$x_i^{(0)} = cos[\pi(4i - 1) / (4n + 2)]$ начальная точка для поиска i-го корня. Здесь мы как раз пользуемся перемежаемостью, у нас $x_i^{(0)}$ будет лежать между корнями (или корнем и +-1) многочлена на одну степень ниже.
Корней как известно $n$ => $i = 1..n$
Также можно воспользоваться перемежаемость в лоб. Давайте просто брать $a_i^{n} = \frac{a_{i - 1}^{n - 1} + a_{i}^{n - 1}}{2}$, где $a_i^n$ - это начальная точка поиска i-го корня n-го полинома Лежандра. Вообще считаться корни будут подольше. Я остановился на этом варианте, потому что предыдущий с косинусами не доказывал.
По поводу количества итераций: попробовал разные значения, 10 дает хороший результат.
```
from scipy.special import legendre
def get_legendre_derivative(n):
def derivative(x):
P_n_1 = legendre(n - 1)
P_n = legendre(n)
return n / (1 - x ** 2) * (P_n_1(x) - x * P_n(x))
return derivative
def calculate_legendre_i_root_cos(n, i):
number_of_iterations = 10
x_cur = np.cos(np.pi * (4 * i - 1) / (4 * n + 2))
P_n = legendre(n)
P_n_derivative = get_legendre_derivative(n)
for k in range(number_of_iterations):
x_cur = x_cur - P_n(x_cur) / P_n_derivative(x_cur)
return x_cur
def calculate_legendre_i_root_interlacing(n, i):
number_of_iterations = 10
left_root = None
right_root = None
if n == 1:
return 0
if i == 1:
left_root = -1
else:
left_root = calculate_legendre_i_root_interlacing(n - 1, i - 1)
if i == n:
right_root = 1
else:
right_root = calculate_legendre_i_root_interlacing(n - 1, i)
x_cur = (left_root + right_root) / 2
P_n = legendre(n)
P_n_derivative = get_legendre_derivative(n)
for k in range(number_of_iterations):
x_cur = x_cur - P_n(x_cur) / P_n_derivative(x_cur)
return x_cur
def calculate_legendre_roots(n, calculate_legendre_i_root=calculate_legendre_i_root_interlacing):
return [calculate_legendre_i_root(n, i) for i in range(1, n + 1)]
n = 5
roots = calculate_legendre_roots(n)
numpy_roots = np.polynomial.legendre.leggauss(n)[0]
print("Legendre polynomial roots for n = " + str(n))
print("my implementation: " + str(roots))
print("numpy implementation: " + str(numpy_roots))
```
"Найдите веса соответствуюей формулы Гаусса, используя написанную ранее программу"
Мы знаем с лекции, что $w_i = \int_{a}^{b}{\prod_{k \neq i}{\frac{x - x_k}{x_i - x_k}}\rho(x) dx}$
Для полиномов Лежандра $\alpha = \beta = 0$, поэтому $\rho(x) = 1$ (было на лекции).
Также из предыдущего дз возьмем метод Симпсона. С помощью него и будем считать интеграл для весов. M = 1000 возьмем, где M количество интервалов для метода Сипмсона. Как показало предыдущее дз 1000 будет более чем достаточно.
```
def calculate_Simpson(f, a, b, M):
H = (b - a) / M
res = f(a) + f(b) + 4 * f(a + H / 2)
x_cur = a + H
for i in range(1, M):
res += 2 * f(x_cur) + 4 * f(x_cur + H / 2)
x_cur += H
res *= H / 6
return res
def get_weights(a, b, xs, calculation_method=calculate_Simpson, M=1000):
N = len(xs)
def f_to_integrate(i):
def f(x):
prod = 1
for k in range(0, N):
if k != i:
x_k = xs[k]
x_i = xs[i]
prod *= (x - x_k) / (x_i - x_k)
return prod
return f
weights = []
for i in range(0, N):
weight = calculation_method(f_to_integrate(i), a, b, M)
weights.append(weight)
return weights
def get_weights_legendre_specific(xs):
P_n_derivative = get_legendre_derivative(n)
return [2 / ((1 - xs[i] ** 2) * P_n_derivative(xs[i]) ** 2) for i in range(0, len(xs))]
def get_weights_for_legendre(n, calculate_legendre_i_root=calculate_legendre_i_root_interlacing):
roots = calculate_legendre_roots(n, calculate_legendre_i_root)
return get_weights(-1, 1, roots)
n = 5
weights = get_weights_for_legendre(n)
numpy_weights = np.polynomial.legendre.leggauss(n)[1]
print("Legendre polynomial weights for n = " + str(n))
print("my implementation: " + str(weights))
print("numpy implementation: " + str(numpy_weights))
```
Вспомним наше реальное значение интеграла $\int_{-1}^{5}{\frac{1}{1 + 9x^2} dx} = 1 / 3 * arctg(3x) |_{-1}^{5} \approx 0.917757978$
"Напишите программу вычисляющую I с помощью полученной квадратурной формулы"
```
a = -1
b = 5
def f(x):
return 1 / (1 + 9 * x ** 2)
def antiderivative(x):
return 1 / 3 * np.arctan(3 * x)
a = -1
b = 5
length = b - a
real_value = antiderivative(b) - antiderivative(a)
def calculate_integral_legendre(f, a, b, n, calculate_i_root=calculate_legendre_i_root_interlacing):
roots = calculate_legendre_roots(n, calculate_i_root)
weights = get_weights_for_legendre(n, calculate_i_root)
f_values = [f((b - a) / 2 * roots[i] + (b + a) / 2) for i in range(len(roots))]
return (b - a) / 2 * np.dot(weights, f_values)
def draw_error_integral(f, a, b, real_value, calculation_method, N_min, N_max):
data_x = []
data_y = []
N_cur = N_min
step = 1
while N_cur <= N_max:
approx = calculation_method(f, a, b, N_cur)
data_x.append(N_cur)
data_y.append(np.log10(abs(approx - real_value)))
N_cur += step
plt.subplot(211)
plt.plot(data_x, data_y)
plt.ylabel("log10(|integral_real - approx_value|)")
plt.xlabel("N")
plt.figure(figsize=(10, 10), dpi=180)
draw_error_integral(f, a, b, real_value, calculate_integral_legendre, 1, 40)
draw_error_integral(f, a, b, real_value, calculate_Simpson, 1, 40)
plt.title("Error of methods")
plt.legend(("Legendre", "Simpson"))
plt.show()
```
Графики скачут постоянно, и Симпсон лучше. Если не учитывать скачки, это похоже на прямую. А значит, раз это график в координатах $(log10(err(x)); x)$, то прямая означает, что погрешность убывает экспоненциально.
|
github_jupyter
|
from scipy.special import legendre
def get_legendre_derivative(n):
def derivative(x):
P_n_1 = legendre(n - 1)
P_n = legendre(n)
return n / (1 - x ** 2) * (P_n_1(x) - x * P_n(x))
return derivative
def calculate_legendre_i_root_cos(n, i):
number_of_iterations = 10
x_cur = np.cos(np.pi * (4 * i - 1) / (4 * n + 2))
P_n = legendre(n)
P_n_derivative = get_legendre_derivative(n)
for k in range(number_of_iterations):
x_cur = x_cur - P_n(x_cur) / P_n_derivative(x_cur)
return x_cur
def calculate_legendre_i_root_interlacing(n, i):
number_of_iterations = 10
left_root = None
right_root = None
if n == 1:
return 0
if i == 1:
left_root = -1
else:
left_root = calculate_legendre_i_root_interlacing(n - 1, i - 1)
if i == n:
right_root = 1
else:
right_root = calculate_legendre_i_root_interlacing(n - 1, i)
x_cur = (left_root + right_root) / 2
P_n = legendre(n)
P_n_derivative = get_legendre_derivative(n)
for k in range(number_of_iterations):
x_cur = x_cur - P_n(x_cur) / P_n_derivative(x_cur)
return x_cur
def calculate_legendre_roots(n, calculate_legendre_i_root=calculate_legendre_i_root_interlacing):
return [calculate_legendre_i_root(n, i) for i in range(1, n + 1)]
n = 5
roots = calculate_legendre_roots(n)
numpy_roots = np.polynomial.legendre.leggauss(n)[0]
print("Legendre polynomial roots for n = " + str(n))
print("my implementation: " + str(roots))
print("numpy implementation: " + str(numpy_roots))
def calculate_Simpson(f, a, b, M):
H = (b - a) / M
res = f(a) + f(b) + 4 * f(a + H / 2)
x_cur = a + H
for i in range(1, M):
res += 2 * f(x_cur) + 4 * f(x_cur + H / 2)
x_cur += H
res *= H / 6
return res
def get_weights(a, b, xs, calculation_method=calculate_Simpson, M=1000):
N = len(xs)
def f_to_integrate(i):
def f(x):
prod = 1
for k in range(0, N):
if k != i:
x_k = xs[k]
x_i = xs[i]
prod *= (x - x_k) / (x_i - x_k)
return prod
return f
weights = []
for i in range(0, N):
weight = calculation_method(f_to_integrate(i), a, b, M)
weights.append(weight)
return weights
def get_weights_legendre_specific(xs):
P_n_derivative = get_legendre_derivative(n)
return [2 / ((1 - xs[i] ** 2) * P_n_derivative(xs[i]) ** 2) for i in range(0, len(xs))]
def get_weights_for_legendre(n, calculate_legendre_i_root=calculate_legendre_i_root_interlacing):
roots = calculate_legendre_roots(n, calculate_legendre_i_root)
return get_weights(-1, 1, roots)
n = 5
weights = get_weights_for_legendre(n)
numpy_weights = np.polynomial.legendre.leggauss(n)[1]
print("Legendre polynomial weights for n = " + str(n))
print("my implementation: " + str(weights))
print("numpy implementation: " + str(numpy_weights))
a = -1
b = 5
def f(x):
return 1 / (1 + 9 * x ** 2)
def antiderivative(x):
return 1 / 3 * np.arctan(3 * x)
a = -1
b = 5
length = b - a
real_value = antiderivative(b) - antiderivative(a)
def calculate_integral_legendre(f, a, b, n, calculate_i_root=calculate_legendre_i_root_interlacing):
roots = calculate_legendre_roots(n, calculate_i_root)
weights = get_weights_for_legendre(n, calculate_i_root)
f_values = [f((b - a) / 2 * roots[i] + (b + a) / 2) for i in range(len(roots))]
return (b - a) / 2 * np.dot(weights, f_values)
def draw_error_integral(f, a, b, real_value, calculation_method, N_min, N_max):
data_x = []
data_y = []
N_cur = N_min
step = 1
while N_cur <= N_max:
approx = calculation_method(f, a, b, N_cur)
data_x.append(N_cur)
data_y.append(np.log10(abs(approx - real_value)))
N_cur += step
plt.subplot(211)
plt.plot(data_x, data_y)
plt.ylabel("log10(|integral_real - approx_value|)")
plt.xlabel("N")
plt.figure(figsize=(10, 10), dpi=180)
draw_error_integral(f, a, b, real_value, calculate_integral_legendre, 1, 40)
draw_error_integral(f, a, b, real_value, calculate_Simpson, 1, 40)
plt.title("Error of methods")
plt.legend(("Legendre", "Simpson"))
plt.show()
| 0.724773 | 0.939969 |
# A group-based test
Next, we test bilateral symmetry by making an assumption that the left and the right
hemispheres both come from a stochastic block model, which models the probability
of any potential edge as a function of the groups that the source and target nodes
are part of.
For now, we use some broad cell type categorizations for each neuron to determine its
group. Alternatively, there are many methods for *estimating* these assignments to
groups for each neuron, which we do not explore here.
```
from pkg.utils import set_warnings
set_warnings()
import datetime
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import rotate_labels
from matplotlib.transforms import Bbox
from myst_nb import glue as default_glue
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import savefig
from pkg.perturb import remove_edges
from pkg.plot import set_theme
from pkg.stats import stochastic_block_test
from seaborn.utils import relative_luminance
DISPLAY_FIGS = False
FILENAME = "sbm_unmatched_test"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, prefix="fig")
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, prefix=None):
savename = f"{FILENAME}-{name}"
if prefix is not None:
savename = prefix + ":" + savename
default_glue(savename, var, display=False)
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
neutral_color = sns.color_palette("Set2")[2]
GROUP_KEY = "simple_group"
left_adj, left_nodes = load_unmatched(side="left")
right_adj, right_nodes = load_unmatched(side="right")
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
```
## The stochastic block model (SBM)
A [**stochastic block model (SBM)**
](https://en.wikipedia.org/wiki/Stochastic_block_model)
is a popular statistical model of networks. Put simply, this model treats the
probability of an edge occuring between node $i$ and node $j$ as purely a function of
the *communities* or *groups* that node $i$ and $j$ belong to. Therefore, this model
is parameterized by:
1. An assignment of each node in the network to a group. Note that this assignment
can be considered to be deterministic or random, depending on the specific
framing of the model one wants to use.
2. A set of group-to-group connection probabilities
```{admonition} Math
Let $n$ be the number of nodes, and $K$ be the number of groups in an SBM. For a
network $A$ sampled from an SBM:
$$ A \sim SBM(B, \tau)$$
We say that for all $(i,j), i \neq j$, with $i$ and $j$ both running
from $1 ... n$ the probability of edge $(i,j)$ occuring is:
$$ P[A_{ij} = 1] = P_{ij} = B_{\tau_i, \tau_j} $$
where $B \in [0,1]^{K \times K}$ is a matrix of group-to-group connection
probabilities and $\tau \in \{1...K\}^n$ is a vector of node-to-group assignments.
Note that here we are assuming $\tau$ is a fixed vector of assignments, though other
formuations of the SBM allow these assignments to themselves come from a categorical
distribution.
```
## Testing under the SBM model
Assuming this model, there are a few ways that one could test for differences between
two networks. In our case, we are interested in comparing the group-to-group
connection probability matrices, $B$, for the left and right hemispheres.
````{admonition} Math
We are interested in testing:
```{math}
:label: sbm_unmatched_null
H_0: B^{(L)} = B^{(R)}, \quad H_A: B^{(L)} \neq B^{(R)}
```
````
Rather than having to compare one proportion as in [](er_unmatched_test.ipynb), we are
now interedted in comparing all $K^2$ probabilities between the SBM models for the
left and right hemispheres.
```{admonition} Math
The hypothesis test above can be decomposed into $K^2$ indpendent hypotheses.
$B^{(L)}$
and $B^{(R)}$ are both $K \times K$ matrices, where each element $b_{kl}$ represents
the probability of a connection from a neuron in group $k$ to one in group $l$. We
also know that group $k$ for the left network corresponds with group $k$ for the
right. In other words, the *groups* are matched. Thus, we are interested in testing,
for $k, l$ both running from $1...K$:
$$ H_0: B_{kl}^{(L)} = B_{kl}^{(R)},
\quad H_A: B_{kl}^{(L)} \neq B_{kl}^{(R)}$$
```
Thus, we will use
[Fisher's exact test](https://en.wikipedia.org/wiki/Fisher%27s_exact_test) to
compare each set of probabilities. To combine these multiple hypotheses into one, we
will use [Fisher's method](https://en.wikipedia.org/wiki/Fisher%27s_method) for
combining p-values to give us a p-value for the overall test. We also can look at
the p-values for each of the individual tests after correction for multiple
comparisons by the
[Bonferroni-Holm method.
](https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method)
For the current investigation, we focus on the case where $\tau$ is known ahead of
time, sometimes called the **A priori SBM**. We use some broad cell type labels which
were described in the paper which published the data to
define the group assignments $\tau$. Here, we do not explore
estimating these assignments, though many techniques exist for doing so. We note that
the results presented here could change depending on the group assignments which are
used. We also do not consider tests which would compare the assignment vectors,
$\tau$. {numref}`Figure {number} <fig:sbm_unmatched_test-group_counts>` shows the
number of neurons in each group in the group assignments $\tau$ for the left and
the right hemispheres. The number of neurons in each group is quite similar between
the two hemispheres.
```
stat, pvalue, misc = stochastic_block_test(
left_adj, right_adj, labels1=left_labels, labels2=right_labels, method="fisher"
)
glue("uncorrected_pvalue", pvalue)
n_tests = misc["n_tests"]
glue("n_tests", n_tests)
set_theme(font_scale=1)
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
group_counts_left = misc["group_counts1"]
group_counts_right = misc["group_counts2"]
for i in range(len(group_counts_left)):
ax.bar(i - 0.17, group_counts_left[i], width=0.3, color=network_palette["Left"])
ax.bar(i + 0.17, group_counts_right[i], width=0.3, color=network_palette["Right"])
rotate_labels(ax)
ax.set(
ylabel="Count",
xlabel="Group",
xticks=np.arange(len(group_counts_left)) + 0.2,
xticklabels=group_counts_left.index,
)
gluefig("group_counts", fig)
```
```{glue:figure} fig:sbm_unmatched_test-group_counts
:name: "fig:sbm_unmatched_test-group_counts"
The number of neurons in each group in each hemisphere. Note the similarity between
the hemispheres.
```
```
def plot_stochastic_block_test(misc, pvalue_vmin=None):
# get values
B1 = misc["probabilities1"]
B2 = misc["probabilities2"]
null_odds = misc["null_odds"]
B2 = B2 * null_odds
index = B1.index
p_max = max(B1.values.max(), B2.values.max())
uncorrected_pvalues = misc["uncorrected_pvalues"]
n_tests = misc["n_tests"]
K = B1.shape[0]
alpha = 0.05
hb_thresh = alpha / n_tests
# set up plot
pad = 2
width_ratios = [0.5, pad + 0.8, 10, pad - 0.4, 10, pad + 0.9, 10, 0.5]
set_theme(font_scale=1.25)
fig, axs = plt.subplots(
1,
len(width_ratios),
figsize=(30, 10),
gridspec_kw=dict(
width_ratios=width_ratios,
),
)
left_col = 2
right_col = 4
pvalue_col = 6
heatmap_kws = dict(
cmap="Blues", square=True, cbar=False, vmax=p_max, fmt="s", xticklabels=True
)
# heatmap of left connection probabilities
annot = np.full((K, K), "")
annot[B1.values == 0] = 0
ax = axs[left_col]
sns.heatmap(B1, ax=ax, annot=annot, **heatmap_kws)
ax.set(ylabel="Source group", xlabel="Target group")
ax.set_title(r"$\hat{B}$ left", fontsize="xx-large", color=network_palette["Left"])
# heatmap of right connection probabilities
annot = np.full((K, K), "")
annot[B2.values == 0] = 0
ax = axs[right_col]
im = sns.heatmap(B2, ax=ax, annot=annot, **heatmap_kws)
ax.set(ylabel="", xlabel="Target group")
text = r"$\hat{B}$ right"
if null_odds != 1:
text = r"$c$" + text
ax.set_title(text, fontsize="xx-large", color=network_palette["Right"])
# handle the colorbars
# NOTE: did it this way cause the other options weren't playing nice with auto
# constrain
# layouts.
def shrink_axis(ax, scale=0.7):
pos = ax.get_position()
mid = (pos.ymax + pos.ymin) / 2
height = pos.ymax - pos.ymin
new_pos = Bbox(
[
[pos.xmin, mid - scale * 0.5 * height],
[pos.xmax, mid + scale * 0.5 * height],
]
)
ax.set_position(new_pos)
ax = axs[0]
shrink_axis(ax, scale=0.5)
_ = fig.colorbar(
im.get_children()[0],
cax=ax,
fraction=1,
shrink=1,
ticklocation="left",
)
# plot p-values
ax = axs[pvalue_col]
annot = np.full((K, K), "")
annot[(B1.values == 0) & (B2.values == 0)] = "B"
annot[(B1.values == 0) & (B2.values != 0)] = "L"
annot[(B1.values != 0) & (B2.values == 0)] = "R"
plot_pvalues = np.log10(uncorrected_pvalues)
plot_pvalues[np.isnan(plot_pvalues)] = 0
im = sns.heatmap(
plot_pvalues,
ax=ax,
annot=annot,
cmap="RdBu",
center=0,
square=True,
cbar=False,
fmt="s",
vmin=pvalue_vmin,
)
ax.set(ylabel="", xlabel="Target group")
ax.set(xticks=np.arange(K) + 0.5, xticklabels=index)
ax.set_title(r"$log_{10}($p-value$)$", fontsize="xx-large")
colors = im.get_children()[0].get_facecolors()
significant = uncorrected_pvalues < hb_thresh
# NOTE: the x's looked bad so I did this super hacky thing...
pad = 0.2
for idx, (is_significant, color) in enumerate(
zip(significant.values.ravel(), colors)
):
if is_significant:
i, j = np.unravel_index(idx, (K, K))
# REF: seaborn heatmap
lum = relative_luminance(color)
text_color = ".15" if lum > 0.408 else "w"
xs = [j + pad, j + 1 - pad]
ys = [i + pad, i + 1 - pad]
ax.plot(xs, ys, color=text_color, linewidth=4)
xs = [j + 1 - pad, j + pad]
ys = [i + pad, i + 1 - pad]
ax.plot(xs, ys, color=text_color, linewidth=4)
# plot colorbar for the pvalue plot
# NOTE: only did it this way for consistency with the other colorbar
ax = axs[7]
shrink_axis(ax, scale=0.5)
_ = fig.colorbar(
im.get_children()[0],
cax=ax,
fraction=1,
shrink=1,
ticklocation="right",
)
fig.text(0.11, 0.85, "A)", fontweight="bold", fontsize=50)
fig.text(0.63, 0.85, "B)", fontweight="bold", fontsize=50)
# remove dummy axes
for i in range(len(width_ratios)):
if not axs[i].has_data():
axs[i].set_visible(False)
return fig, axs
fig, axs = plot_stochastic_block_test(misc)
gluefig("sbm_uncorrected", fig)
# need to save this for later for setting colorbar the same on other plot
pvalue_vmin = np.log10(np.nanmin(misc["uncorrected_pvalues"].values))
```
Next, we run the test for bilateral symmetry under the stochastic block model.
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>` shows both the
estimated group-to-group probability matrices, $\hat{B}^{(L)}$ and $\hat{B}^{(R)}$,
as well as the p-values from each test comparing each element of these matrices. From
a visual comparison of $\hat{B}^{(L)}$ and $\hat{B}^{(R)}$
{numref}`(Figure {number} A) <fig:sbm_unmatched_test-sbm_uncorrected>`, we see that
the
group-to-group connection probabilities are qualitatively similar. Note also that some
group-to-group connection probabilities are zero, making it non-sensical to do a
comparision of binomial proportions. We highlight these elements in the $\hat{B}$
matrices with an explicit "0", noting that we did not run the corresponding test in
these cases.
In {numref}`Figure {number} B <fig:sbm_unmatched_test-sbm_uncorrected>`, we see the
p-values from all {glue:text}`sbm_unmatched_test-n_tests` that were run. After
Bonferroni-Holm correction, 5 tests yield p-values less than 0.05, indicating that
we reject the null hypothesis that those elements of the $\hat{B}$ matrices are the
same between the two hemispheres. We also combine all p-values using Fisher's method,
which yields an overall p-value for the entire null hypothesis in
Equation {eq}`sbm_unmatched_null` of
{glue:text}`sbm_unmatched_test-uncorrected_pvalue:0.2e`.
```{glue:figure} fig:sbm_unmatched_test-sbm_uncorrected
:name: "fig:sbm_unmatched_test-sbm_uncorrected"
Comparison of stochastic block model fits for the left and right hemispheres.
**A)** The estimated group-to-group connection probabilities for the left
and right hemispheres appear qualitatively similar. Any estimated
probabilities which are zero (i.e. no edge was present between a given pair of
communities) is indicated explicitly with a "0" in that cell of the matrix.
**B)** The p-values for each hypothesis test between individual elements of
the block probability matrices. In other words, each cell represents a test for
whether a given group-to-group connection probability is the same on the left and the
right sides. "X" denotes a significant p-value after Bonferroni-Holm correction,
with $\alpha=0.05$. "B" indicates that a test was not run since the estimated
probability
was zero in that cell on both the left and right. "L" indicates this was the case on
the left only, and "R" that it was the case on the right only. These individual
p-values were combined using Fisher's method, resulting in an overall p-value (for the
null hypothesis that the two group connection probability matrices are the same) of
{glue:text}`sbm_unmatched_test-uncorrected_pvalue:0.2e`.
```
## Adjusting for a difference in density
From {numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>`, we see that
we have sufficient evidence to reject
the null hypothesis of bilateral symmetry under this version of the SBM. However,
we already saw in [](er_unmatched_test) that the overall
densities between the two networks are different. Could it be that this rejection of
the null hypothesis under the SBM can be explained purely by this difference in
density? In other words, are the group-to-group connection probabilities on the right
simply a "scaled up" version of those on the right, where each probability is scaled
by the same amount?
In {numref}`Figure {number} <fig:sbm_unmatched_test-probs_uncorrected>`,
we plot the estimated
probabilities on the left and the right hemispheres (i.e. each element of $\hat{B}$),
as
well as the difference between them. While subtle, we note that there is a slight
tendency for the left hemisphere estimated probability to be lower than the
corresponding one on the right. Specifically, we can also look at the group-to-group
connection probabilities which were significantly different in
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>` - these are plotted
in {numref}`Figure {number} <fig:sbm_unmatched_test-significant_p_comparison>`. Note
that in every case, the estimated probability on the right is higher with that on the
right.
```
def plot_estimated_probabilities(misc):
B1 = misc["probabilities1"]
B2 = misc["probabilities2"]
null_odds = misc["null_odds"]
B2 = B2 * null_odds
B1_ravel = B1.values.ravel()
B2_ravel = B2.values.ravel()
arange = np.arange(len(B1_ravel))
sum_ravel = B1_ravel + B2_ravel
sort_inds = np.argsort(-sum_ravel)
B1_ravel = B1_ravel[sort_inds]
B2_ravel = B2_ravel[sort_inds]
fig, axs = plt.subplots(2, 1, figsize=(10, 10), sharex=True)
ax = axs[0]
sns.scatterplot(
x=arange,
y=B1_ravel,
color=network_palette["Left"],
ax=ax,
linewidth=0,
s=15,
alpha=0.5,
)
sns.scatterplot(
x=arange,
y=B2_ravel,
color=network_palette["Right"],
ax=ax,
linewidth=0,
s=15,
alpha=0.5,
zorder=-1,
)
ax.text(
0.7,
0.8,
"Left",
color=network_palette["Left"],
transform=ax.transAxes,
)
ax.text(
0.7,
0.7,
"Right",
color=network_palette["Right"],
transform=ax.transAxes,
)
ax.set_yscale("log")
ax.set(
ylabel="Estimated probability " + r"($\hat{p}$)",
xticks=[],
xlabel="Sorted group pairs",
)
ax.spines["bottom"].set_visible(False)
ax = axs[1]
diff = B1_ravel - B2_ravel
yscale = np.max(np.abs(diff))
yscale *= 1.05
sns.scatterplot(
x=arange, y=diff, ax=ax, linewidth=0, s=25, color=neutral_color, alpha=1
)
ax.axhline(0, color="black", zorder=-1)
ax.spines["bottom"].set_visible(False)
ax.set(
xticks=[],
ylabel=r"$\hat{p}_{left} - \hat{p}_{right}$",
xlabel="Sorted group pairs",
ylim=(-yscale, yscale),
)
n_greater = np.count_nonzero(diff > 0)
n_total = len(diff)
ax.text(
0.3,
0.8,
f"Left connection stronger ({n_greater}/{n_total})",
color=network_palette["Left"],
transform=ax.transAxes,
)
n_lesser = np.count_nonzero(diff < 0)
ax.text(
0.3,
0.15,
f"Right connection stronger ({n_lesser}/{n_total})",
color=network_palette["Right"],
transform=ax.transAxes,
)
fig.text(0.02, 0.905, "A)", fontweight="bold", fontsize=30)
fig.text(0.02, 0.49, "B)", fontweight="bold", fontsize=30)
return fig, ax
fig, ax = plot_estimated_probabilities(misc)
gluefig("probs_uncorrected", fig)
```
```{glue:figure} fig:sbm_unmatched_test-probs_uncorrected
:name: "fig:sbm_unmatched_test-probs_uncorrected"
Comparison of estimated connection probabilities for the left and right hemispheres.
**A)** The estimated group-to-group connection probabilities ($\hat{p}$), sorted by
the mean left/right connection probability. Note the very subtle tendency for the
left probability to be lower than the corresponding one on the right. **B)** The
differences between corresponding group-to-group connection probabilities
($\hat{p}^{(L)} - \hat{p}^{(R)}$). The trend of the left connection probabilities
being slightly smaller than the corresponding probability on the right is more
apparent here, as there are more negative than positive values.
```
```
def plot_significant_probabilities(misc):
B1 = misc["probabilities1"]
B2 = misc["probabilities2"]
null_odds = misc["null_odds"]
B2 = B2 * null_odds
index = B1.index
uncorrected_pvalues = misc["uncorrected_pvalues"]
n_tests = misc["n_tests"]
alpha = 0.05
hb_thresh = alpha / n_tests
significant = uncorrected_pvalues < hb_thresh
row_inds, col_inds = np.nonzero(significant.values)
rows = []
for row_ind, col_ind in zip(row_inds, col_inds):
source = index[row_ind]
target = index[col_ind]
left_p = B1.loc[source, target]
right_p = B2.loc[source, target]
pair = source + r"$\rightarrow$" + target
rows.append(
{
"source": source,
"target": target,
"p": left_p,
"side": "Left",
"pair": pair,
}
)
rows.append(
{
"source": source,
"target": target,
"p": right_p,
"side": "Right",
"pair": pair,
}
)
sig_data = pd.DataFrame(rows)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.pointplot(
data=sig_data,
y="p",
x="pair",
ax=ax,
hue="side",
dodge=True,
join=False,
palette=network_palette,
)
ax.get_legend().set_title("Side")
rotate_labels(ax)
ax.set(xlabel="Group pair", ylabel="Connection probability")
return fig, ax
fig, ax = plot_significant_probabilities(misc)
gluefig("significant_p_comparison", fig)
```
```{glue:figure} fig:sbm_unmatched_test-significant_p_comparison
:name: "fig:sbm_unmatched_test-significant_p_comparison"
Comparison of estimated group-to-group connection probabilities for the group-pairs
which were significantly different in
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>`.
In each case, the connection probability on the right hemisphere is higher.
```
These observations are consistent with the idea that perhaps the probabilities
on the right are a scaled up version of those on the right, for some global scaling.
We can frame this question as a new null hypothesis:
````{admonition} Math
With variables defined as in Equation {eq}`sbm_unmatched_null`, we can write our new
null hypothesis as:
```{math}
:label: sbm_unmatched_null_adjusted
H_0: B^{(L)} = c B^{(R)}, \quad H_A: B^{(L)} \neq c B^{(R)}
```
where $c$ is the ratio of the densities, $c = \frac{p^{(L)}}{p^{(R)}}$.
````
### Correcting by subsampling edges for one network
One naive (though quite intuitive) approach to adjust our test for a difference in
density is to simply make the densities of the two networks the same and then rerun
our
test. To do so, we calculated the number of edge removals (from the right hemisphere)
required to set the network densities roughly the same. We then randomly removed
that many edges from the right hemisphere network and
then re-ran the SBM test procedure above. We repeated this procedure
{glue:text}`sbm_unmatched_test-n_resamples` times, resulting in a p-value for each
subsampling of the right network.
The distribution of p-values from this process is
shown in {numref}`Figure {number} <fig:sbm_unmatched_test-pvalues_corrected>`. Whereas
the p-value for the original null hypothesis was
{glue:text}`sbm_unmatched_test-uncorrected_pvalue:0.2e`, we see now that the p-values
from our subsampled, density-adjusted test are around 0.8, indicating insufficient
evidence to reject our density-adjusted null hypothesis of bilateral symmetry
(Equation {eq}`sbm_unmatched_null_adjusted`).
```
n_edges_left = np.count_nonzero(left_adj)
n_edges_right = np.count_nonzero(right_adj)
n_left = left_adj.shape[0]
n_right = right_adj.shape[0]
density_left = n_edges_left / (n_left ** 2)
density_right = n_edges_right / (n_right ** 2)
n_remove = int((density_right - density_left) * (n_right ** 2))
glue("density_left", density_left)
glue("density_right", density_right)
glue("n_remove", n_remove)
rows = []
n_resamples = 25
glue("n_resamples", n_resamples)
for i in range(n_resamples):
subsampled_right_adj = remove_edges(
right_adj, effect_size=n_remove, random_seed=rng
)
stat, pvalue, misc = stochastic_block_test(
left_adj,
subsampled_right_adj,
labels1=left_labels,
labels2=right_labels,
method="fisher",
)
rows.append({"stat": stat, "pvalue": pvalue, "misc": misc, "resample": i})
resample_results = pd.DataFrame(rows)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.histplot(data=resample_results, x="pvalue", ax=ax)
ax.set(xlabel="p-value", ylabel="", yticks=[])
ax.spines["left"].set_visible(False)
mean_resample_pvalue = np.mean(resample_results["pvalue"])
median_resample_pvalue = np.median(resample_results["pvalue"])
gluefig("pvalues_corrected", fig)
```
```{glue:figure} fig:sbm_unmatched_test-pvalues_corrected
:name: "fig:sbm_unmatched_test-pvalues_corrected"
Histogram of p-values after a correction for network density. For the observed
networks
the left hemisphere has a density of
{glue:text}`sbm_unmatched_test-density_left:0.4f`, and the right
hemisphere has
a density of
{glue:text}`sbm_unmatched_test-density_right:0.4f`. Here, we randomly removed exactly
{glue:text}`sbm_unmatched_test-n_remove`
edges from the right hemisphere network, which makes the density of the right network
match that of the left hemisphere network. Then, we re-ran the stochastic block model
testing
procedure from {numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>`.
This entire process
was repeated {glue:text}`sbm_unmatched_test-n_resamples` times. The histogram above
shows the
distribution
of p-values for the overall test. Note that the p-values are no longer small,
indicating
that with this density correction, we now failed to reject our null hypothesis of
bilateral symmetry under the stochastic block model.
```
### An analytic approach to correcting for differences in density
Instead of randomly resetting the density of the right hemisphere network, we can
actually modify the hypothesis we are testing for each element of the $\hat{B}$
matrices to include this adjustment by some constant scale, $c$.
```{admonition} Math
Fisher's exact test (used
above to compare each element of the $\hat{B}$ matrices) tests the null hypotheses:
$$H_0: B_{kl}^{(L)} = B_{kl}^{(R)}, \quad H_A: B_{kl}^{(L)} \neq B_{kl}^{(R)}$$
for each $(k, l)$ pair, where $k$ and $l$ are the indices of the source and target
groups, respectively.
Instead, we can use a test of:
$$H_0: B_{kl}^{(L)} = c B_{kl}^{(R)}, \quad H_A: B_{kl}^{(L)} \neq c B_{kl}^{(R)}$$
In our case, $c$ is a constant that we fit to the entire right hemisphere network to
set its density equal to the left, $c = \frac{p^{(L)}}{p_{(R)}}$
A test for the adjusted null hypothesis above is given by using
[Fisher's noncentral hypergeometric distribution
](https://en.wikipedia.org/wiki/Fisher%27s_noncentral_hypergeometric_distribution)
and applying a procedure much like that of the traditional Fisher's exact test.
```
More information about this test can be found in [](nhypergeom_sims).
```
null_odds = density_left / density_right
stat, pvalue, misc = stochastic_block_test(
left_adj,
right_adj,
labels1=left_labels,
labels2=right_labels,
method="fisher",
null_odds=null_odds,
)
glue("corrected_pvalue", pvalue)
fig, axs = plot_stochastic_block_test(misc, pvalue_vmin=pvalue_vmin)
gluefig("sbm_corrected", fig)
```
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_corrected>` shows the results
of running the analytic version of the density-adjusted test based on Fisher's
noncentral hypergeometric distribution. Note that now only two group-to-group
probability comparisons are significant after Bonferroni-Holm correction, and the
overall p-value for this test of Equation {eq}`sbm_unmatched_null_adjusted` is
{glue:text}`sbm_unmatched_test-corrected_pvalue:0.2f`.
```{glue:figure} fig:sbm_unmatched_test-sbm_corrected
:name: "fig:sbm_unmatched_test-sbm_corrected"
Comparison of stochastic block model fits for the left and right hemispheres after
correcting for a difference in hemisphere density.
**A)** The estimated group-to-group connection probabilities for the left
and right hemispheres, after the right hemisphere probabilities were scaled by a
density-adjusting constant, $c$. Any estimated
probabilities which are zero (i.e. no edge was present between a given pair of
communities) is indicated explicitly with a "0" in that cell of the matrix.
**B)** The p-values for each hypothesis test between individual elements of
the block probability matrices. In other words, each cell represents a test for
whether a given group-to-group connection probability is the same on the left and the
right sides. "X" denotes a significant p-value after Bonferroni-Holm correction,
with $\alpha=0.05$. "B" indicates that a test was not run since the estimated
probability
was zero in that cell on both the left and right. "L" indicates this was the case on
the left only, and "R" that it was the case on the right only. These individual
p-values were combined using Fisher's method, resulting in an overall p-value (for the
null hypothesis that the two group connection probability matrices are the same after
adjustment by a density-normalizing constant, $c$) of
{glue:text}`sbm_unmatched_test-corrected_pvalue:0.2f`.
```
Taken together, these results suggest that for the unmatched networks, and using the
known cell type labels, we reject the null hypothesis of bilateral symmetry under the
SBM (Equation {eq}`sbm_unmatched_null`), but fail to reject the null hypothesis of
bilateral symmetry under the SBM after a density adjustment (Equation
{eq}`sbm_unmatched_null_adjusted`). Moreover, they highlight the insights that
can be gained
by considering multiple definitions of bilateral symmetry.
```
elapsed = time.time() - t0
delta = datetime.timedelta(seconds=elapsed)
```
|
github_jupyter
|
from pkg.utils import set_warnings
set_warnings()
import datetime
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import rotate_labels
from matplotlib.transforms import Bbox
from myst_nb import glue as default_glue
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import savefig
from pkg.perturb import remove_edges
from pkg.plot import set_theme
from pkg.stats import stochastic_block_test
from seaborn.utils import relative_luminance
DISPLAY_FIGS = False
FILENAME = "sbm_unmatched_test"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, prefix="fig")
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, prefix=None):
savename = f"{FILENAME}-{name}"
if prefix is not None:
savename = prefix + ":" + savename
default_glue(savename, var, display=False)
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
neutral_color = sns.color_palette("Set2")[2]
GROUP_KEY = "simple_group"
left_adj, left_nodes = load_unmatched(side="left")
right_adj, right_nodes = load_unmatched(side="right")
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
## Testing under the SBM model
Assuming this model, there are a few ways that one could test for differences between
two networks. In our case, we are interested in comparing the group-to-group
connection probability matrices, $B$, for the left and right hemispheres.
Thus, we will use
[Fisher's exact test](https://en.wikipedia.org/wiki/Fisher%27s_exact_test) to
compare each set of probabilities. To combine these multiple hypotheses into one, we
will use [Fisher's method](https://en.wikipedia.org/wiki/Fisher%27s_method) for
combining p-values to give us a p-value for the overall test. We also can look at
the p-values for each of the individual tests after correction for multiple
comparisons by the
[Bonferroni-Holm method.
](https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method)
For the current investigation, we focus on the case where $\tau$ is known ahead of
time, sometimes called the **A priori SBM**. We use some broad cell type labels which
were described in the paper which published the data to
define the group assignments $\tau$. Here, we do not explore
estimating these assignments, though many techniques exist for doing so. We note that
the results presented here could change depending on the group assignments which are
used. We also do not consider tests which would compare the assignment vectors,
$\tau$. {numref}`Figure {number} <fig:sbm_unmatched_test-group_counts>` shows the
number of neurons in each group in the group assignments $\tau$ for the left and
the right hemispheres. The number of neurons in each group is quite similar between
the two hemispheres.
Next, we run the test for bilateral symmetry under the stochastic block model.
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>` shows both the
estimated group-to-group probability matrices, $\hat{B}^{(L)}$ and $\hat{B}^{(R)}$,
as well as the p-values from each test comparing each element of these matrices. From
a visual comparison of $\hat{B}^{(L)}$ and $\hat{B}^{(R)}$
{numref}`(Figure {number} A) <fig:sbm_unmatched_test-sbm_uncorrected>`, we see that
the
group-to-group connection probabilities are qualitatively similar. Note also that some
group-to-group connection probabilities are zero, making it non-sensical to do a
comparision of binomial proportions. We highlight these elements in the $\hat{B}$
matrices with an explicit "0", noting that we did not run the corresponding test in
these cases.
In {numref}`Figure {number} B <fig:sbm_unmatched_test-sbm_uncorrected>`, we see the
p-values from all {glue:text}`sbm_unmatched_test-n_tests` that were run. After
Bonferroni-Holm correction, 5 tests yield p-values less than 0.05, indicating that
we reject the null hypothesis that those elements of the $\hat{B}$ matrices are the
same between the two hemispheres. We also combine all p-values using Fisher's method,
which yields an overall p-value for the entire null hypothesis in
Equation {eq}`sbm_unmatched_null` of
{glue:text}`sbm_unmatched_test-uncorrected_pvalue:0.2e`.
## Adjusting for a difference in density
From {numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>`, we see that
we have sufficient evidence to reject
the null hypothesis of bilateral symmetry under this version of the SBM. However,
we already saw in [](er_unmatched_test) that the overall
densities between the two networks are different. Could it be that this rejection of
the null hypothesis under the SBM can be explained purely by this difference in
density? In other words, are the group-to-group connection probabilities on the right
simply a "scaled up" version of those on the right, where each probability is scaled
by the same amount?
In {numref}`Figure {number} <fig:sbm_unmatched_test-probs_uncorrected>`,
we plot the estimated
probabilities on the left and the right hemispheres (i.e. each element of $\hat{B}$),
as
well as the difference between them. While subtle, we note that there is a slight
tendency for the left hemisphere estimated probability to be lower than the
corresponding one on the right. Specifically, we can also look at the group-to-group
connection probabilities which were significantly different in
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_uncorrected>` - these are plotted
in {numref}`Figure {number} <fig:sbm_unmatched_test-significant_p_comparison>`. Note
that in every case, the estimated probability on the right is higher with that on the
right.
These observations are consistent with the idea that perhaps the probabilities
on the right are a scaled up version of those on the right, for some global scaling.
We can frame this question as a new null hypothesis:
where $c$ is the ratio of the densities, $c = \frac{p^{(L)}}{p^{(R)}}$.
n_edges_left = np.count_nonzero(left_adj)
n_edges_right = np.count_nonzero(right_adj)
n_left = left_adj.shape[0]
n_right = right_adj.shape[0]
density_left = n_edges_left / (n_left ** 2)
density_right = n_edges_right / (n_right ** 2)
n_remove = int((density_right - density_left) * (n_right ** 2))
glue("density_left", density_left)
glue("density_right", density_right)
glue("n_remove", n_remove)
rows = []
n_resamples = 25
glue("n_resamples", n_resamples)
for i in range(n_resamples):
subsampled_right_adj = remove_edges(
right_adj, effect_size=n_remove, random_seed=rng
)
stat, pvalue, misc = stochastic_block_test(
left_adj,
subsampled_right_adj,
labels1=left_labels,
labels2=right_labels,
method="fisher",
)
rows.append({"stat": stat, "pvalue": pvalue, "misc": misc, "resample": i})
resample_results = pd.DataFrame(rows)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.histplot(data=resample_results, x="pvalue", ax=ax)
ax.set(xlabel="p-value", ylabel="", yticks=[])
ax.spines["left"].set_visible(False)
mean_resample_pvalue = np.mean(resample_results["pvalue"])
median_resample_pvalue = np.median(resample_results["pvalue"])
gluefig("pvalues_corrected", fig)
### An analytic approach to correcting for differences in density
Instead of randomly resetting the density of the right hemisphere network, we can
actually modify the hypothesis we are testing for each element of the $\hat{B}$
matrices to include this adjustment by some constant scale, $c$.
More information about this test can be found in [](nhypergeom_sims).
{numref}`Figure {number} <fig:sbm_unmatched_test-sbm_corrected>` shows the results
of running the analytic version of the density-adjusted test based on Fisher's
noncentral hypergeometric distribution. Note that now only two group-to-group
probability comparisons are significant after Bonferroni-Holm correction, and the
overall p-value for this test of Equation {eq}`sbm_unmatched_null_adjusted` is
{glue:text}`sbm_unmatched_test-corrected_pvalue:0.2f`.
Taken together, these results suggest that for the unmatched networks, and using the
known cell type labels, we reject the null hypothesis of bilateral symmetry under the
SBM (Equation {eq}`sbm_unmatched_null`), but fail to reject the null hypothesis of
bilateral symmetry under the SBM after a density adjustment (Equation
{eq}`sbm_unmatched_null_adjusted`). Moreover, they highlight the insights that
can be gained
by considering multiple definitions of bilateral symmetry.
| 0.747524 | 0.935169 |
```
import warnings
warnings.filterwarnings('ignore')
import sys
print(sys.executable)
!{sys.executable} -m pip install scikit-image
!{sys.executable} -m pip install scipy
!{sys.executable} -m pip install opencv-python
!{sys.executable} -m pip install pillow
!{sys.executable} -m pip install matplotlib
!{sys.executable} -m pip install pandas
import numpy as np
import cv2
from skimage import transform, color
import scipy.misc
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import time
import timeit
%matplotlib inline
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# Create Images and rescale intensity between 0 and 255
image_small = np.random.rand(80, 80, 3)
image_small *= 255
image_small = image_small.astype(np.uint8)
image_medium = np.random.rand(800, 800, 3)
image_medium *= 255
image_medium = image_small.astype(np.uint8)
image_large = np.random.rand(8000, 8000, 3)
image_large *= 255
image_large = image_small.astype(np.uint8)
image_xlarge = np.random.rand(18000, 18000, 3)
image_xlarge *= 255
image_xlarge = image_small.astype(np.uint8)
# resize
print("---- scikit-image ----")
t_1_scikit_small = %timeit -o transform.resize(image_small, (80, 80, 3), mode='reflect')
t_1_scikit_medium = %timeit -o transform.resize(image_medium, (80, 80, 3), mode='reflect')
t_1_scikit_large = %timeit -o transform.resize(image_large, (80, 80, 3), mode='reflect')
print("---- opencv ----")
t_1_opencv_small = %timeit -o cv2.resize(image_small, (80, 80))
t_1_opencv_medium = %timeit -o cv2.resize(image_medium, (80, 80))
t_1_opencv_large = %timeit -o cv2.resize(image_large, (80, 80))
print("---- scipy ----")
t_1_numpy_small = %timeit -o scipy.misc.imresize(image_small, (80, 80))
t_1_numpy_medium = %timeit -o scipy.misc.imresize(image_medium, (80, 80))
t_1_numpy_large = %timeit -o scipy.misc.imresize(image_large, (80, 80))
# ------------------------------------
# Test
# ------------------------------------
print("---- scikit-image ----")
t_2_scikit_small = %timeit -o color.rgb2gray(image_small)
t_2_scikit_medium = %timeit -o color.rgb2gray(image_medium)
t_2_scikit_large = %timeit -o color.rgb2gray(image_large)
print("---- opencv ----")
t_2_opencv_small = %timeit -o cv2.cvtColor(image_small, cv2.COLOR_RGB2GRAY)
t_2_opencv_medium = %timeit -o cv2.cvtColor(image_medium, cv2.COLOR_RGB2GRAY)
t_2_opencv_large = %timeit -o cv2.cvtColor(image_large, cv2.COLOR_RGB2GRAY)
print("---- numpy ----")
t_2_numpy_small = %timeit -o rgb2gray(image_small)
t_2_numpy_medium = %timeit -o rgb2gray(image_medium)
t_2_numpy_large = %timeit -o rgb2gray(image_large)
# resize + rgb2gray
def scikit_r_rgb2gray(img):
img = transform.resize(img, (80, 80, 3), mode='reflect')
img = color.rgb2gray(img)
return img
def opencv_r_rgb2gray(img):
img = cv2.resize(img, (80, 80))
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def np_r_rgb2gray(img):
img = scipy.misc.imresize(img, (80, 80))
img = rgb2gray(img)
return img
print("---- scikit-image ----")
t_3_scikit_small = %timeit -o scikit_r_rgb2gray(image_small)
t_3_scikit_medium = %timeit -o scikit_r_rgb2gray(image_medium)
t_3_scikit_large = %timeit -o scikit_r_rgb2gray(image_large)
print("---- opencv ----")
t_3_opencv_small = %timeit -o opencv_r_rgb2gray(image_small)
t_3_opencv_medium = %timeit -o opencv_r_rgb2gray(image_medium)
t_3_opencv_large = %timeit -o opencv_r_rgb2gray(image_large)
print("---- numpy + scipy ----")
t_3_numpy_small = %timeit -o np_r_rgb2gray(image_small)
t_3_numpy_medium = %timeit -o np_r_rgb2gray(image_medium)
t_3_numpy_large = %timeit -o np_r_rgb2gray(image_large)
# rgb2gray + resize
def scikit_r_rgb2gray(img):
img = color.rgb2gray(img)
img = transform.resize(img, (80, 80, 1), mode='reflect')
return img
def opencv_r_rgb2gray(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = cv2.resize(img, (80, 80))
return img
def np_r_rgb2gray(img):
img = rgb2gray(img)
img = scipy.misc.imresize(img, (80, 80))
return img
print("---- scikit-image ----")
t_4_scikit_small = %timeit -o scikit_r_rgb2gray(image_small)
t_4_scikit_medium = %timeit -o scikit_r_rgb2gray(image_medium)
t_4_scikit_large = %timeit -o scikit_r_rgb2gray(image_large)
print("---- opencv ----")
t_4_opencv_small = %timeit -o opencv_r_rgb2gray(image_small)
t_4_opencv_medium = %timeit -o opencv_r_rgb2gray(image_medium)
t_4_opencv_large = %timeit -o opencv_r_rgb2gray(image_large)
print("---- numpy + scipy ----")
t_4_numpy_small = %timeit -o np_r_rgb2gray(image_small)
t_4_numpy_medium = %timeit -o np_r_rgb2gray(image_medium)
t_4_numpy_large = %timeit -o np_r_rgb2gray(image_large)
"""
# ------------------------------------
# Plotting Average
# ------------------------------------
rgb_2_gray_average = {
'framework': ['scikit-image', 'opencv', 'numpy'],
'small': [scikit_rgb_2_gray_small.average, opencv_rgb_2_gray_small.average, numpy_rgb_2_gray_small.average],
'medium': [scikit_rgb_2_gray_medium.average, opencv_rgb_2_gray_medium.average, numpy_rgb_2_gray_medium.average],
'large': [scikit_rgb_2_gray_large.average, opencv_rgb_2_gray_large.average, numpy_rgb_2_gray_large.average]
}
df = pd.DataFrame(rgb_2_gray_average, columns = ['framework', 'small', 'medium', 'large'])
fig, ax = plt.subplots(figsize=(10,3))
pos = list(range(len(df['small'])))
width = 0.25
plt.bar(pos, df['small'], width, alpha=1.0, color='#EE3224', label=df['framework'][0])
plt.bar([p + width for p in pos], df['medium'], width, alpha=1.0, color='#F78F1E', label=df['framework'][1])
plt.bar([p + width*2 for p in pos], df['large'], width, alpha=1.0, color='#FFC222', label=df['framework'][2])
ax.set_ylabel('Time')
ax.set_title('RGB2GRAY Test - Average')
ax.set_xticks([p + 1.5 * width for p in pos])
ax.set_xticklabels(df['framework'])
#plt.legend(['Pre Score', 'Mid Score', 'Post Score'], loc='upper left')
plt.grid()
plt.show()
# ------------------------------------
# Plotting Best
# ------------------------------------
rgb_2_gray_best = {
'framework': ['scikit-image', 'opencv', 'numpy'],
'small': [scikit_rgb_2_gray_small.best, opencv_rgb_2_gray_small.best, numpy_rgb_2_gray_small.best],
'medium': [scikit_rgb_2_gray_medium.best, opencv_rgb_2_gray_medium.best, numpy_rgb_2_gray_medium.best],
'large': [scikit_rgb_2_gray_large.best, opencv_rgb_2_gray_large.best, numpy_rgb_2_gray_large.best]
}
df = pd.DataFrame(rgb_2_gray_best, columns = ['framework', 'small', 'medium', 'large'])
fig, ax = plt.subplots(figsize=(10,3))
pos = list(range(len(df['small'])))
width = 0.25
plt.bar(pos, df['small'], width, alpha=1.0, color='#EE3224', label=df['framework'][0])
plt.bar([p + width for p in pos], df['medium'], width, alpha=1.0, color='#F78F1E', label=df['framework'][1])
plt.bar([p + width*2 for p in pos], df['large'], width, alpha=1.0, color='#FFC222', label=df['framework'][2])
ax.set_ylabel('Time')
ax.set_title('RGB2GRAY Test - Best')
ax.set_xticks([p + 1.5 * width for p in pos])
ax.set_xticklabels(df['framework'])
#plt.legend(['Pre Score', 'Mid Score', 'Post Score'], loc='upper left')
plt.grid()
plt.show()
"""
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import sys
print(sys.executable)
!{sys.executable} -m pip install scikit-image
!{sys.executable} -m pip install scipy
!{sys.executable} -m pip install opencv-python
!{sys.executable} -m pip install pillow
!{sys.executable} -m pip install matplotlib
!{sys.executable} -m pip install pandas
import numpy as np
import cv2
from skimage import transform, color
import scipy.misc
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import time
import timeit
%matplotlib inline
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# Create Images and rescale intensity between 0 and 255
image_small = np.random.rand(80, 80, 3)
image_small *= 255
image_small = image_small.astype(np.uint8)
image_medium = np.random.rand(800, 800, 3)
image_medium *= 255
image_medium = image_small.astype(np.uint8)
image_large = np.random.rand(8000, 8000, 3)
image_large *= 255
image_large = image_small.astype(np.uint8)
image_xlarge = np.random.rand(18000, 18000, 3)
image_xlarge *= 255
image_xlarge = image_small.astype(np.uint8)
# resize
print("---- scikit-image ----")
t_1_scikit_small = %timeit -o transform.resize(image_small, (80, 80, 3), mode='reflect')
t_1_scikit_medium = %timeit -o transform.resize(image_medium, (80, 80, 3), mode='reflect')
t_1_scikit_large = %timeit -o transform.resize(image_large, (80, 80, 3), mode='reflect')
print("---- opencv ----")
t_1_opencv_small = %timeit -o cv2.resize(image_small, (80, 80))
t_1_opencv_medium = %timeit -o cv2.resize(image_medium, (80, 80))
t_1_opencv_large = %timeit -o cv2.resize(image_large, (80, 80))
print("---- scipy ----")
t_1_numpy_small = %timeit -o scipy.misc.imresize(image_small, (80, 80))
t_1_numpy_medium = %timeit -o scipy.misc.imresize(image_medium, (80, 80))
t_1_numpy_large = %timeit -o scipy.misc.imresize(image_large, (80, 80))
# ------------------------------------
# Test
# ------------------------------------
print("---- scikit-image ----")
t_2_scikit_small = %timeit -o color.rgb2gray(image_small)
t_2_scikit_medium = %timeit -o color.rgb2gray(image_medium)
t_2_scikit_large = %timeit -o color.rgb2gray(image_large)
print("---- opencv ----")
t_2_opencv_small = %timeit -o cv2.cvtColor(image_small, cv2.COLOR_RGB2GRAY)
t_2_opencv_medium = %timeit -o cv2.cvtColor(image_medium, cv2.COLOR_RGB2GRAY)
t_2_opencv_large = %timeit -o cv2.cvtColor(image_large, cv2.COLOR_RGB2GRAY)
print("---- numpy ----")
t_2_numpy_small = %timeit -o rgb2gray(image_small)
t_2_numpy_medium = %timeit -o rgb2gray(image_medium)
t_2_numpy_large = %timeit -o rgb2gray(image_large)
# resize + rgb2gray
def scikit_r_rgb2gray(img):
img = transform.resize(img, (80, 80, 3), mode='reflect')
img = color.rgb2gray(img)
return img
def opencv_r_rgb2gray(img):
img = cv2.resize(img, (80, 80))
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def np_r_rgb2gray(img):
img = scipy.misc.imresize(img, (80, 80))
img = rgb2gray(img)
return img
print("---- scikit-image ----")
t_3_scikit_small = %timeit -o scikit_r_rgb2gray(image_small)
t_3_scikit_medium = %timeit -o scikit_r_rgb2gray(image_medium)
t_3_scikit_large = %timeit -o scikit_r_rgb2gray(image_large)
print("---- opencv ----")
t_3_opencv_small = %timeit -o opencv_r_rgb2gray(image_small)
t_3_opencv_medium = %timeit -o opencv_r_rgb2gray(image_medium)
t_3_opencv_large = %timeit -o opencv_r_rgb2gray(image_large)
print("---- numpy + scipy ----")
t_3_numpy_small = %timeit -o np_r_rgb2gray(image_small)
t_3_numpy_medium = %timeit -o np_r_rgb2gray(image_medium)
t_3_numpy_large = %timeit -o np_r_rgb2gray(image_large)
# rgb2gray + resize
def scikit_r_rgb2gray(img):
img = color.rgb2gray(img)
img = transform.resize(img, (80, 80, 1), mode='reflect')
return img
def opencv_r_rgb2gray(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = cv2.resize(img, (80, 80))
return img
def np_r_rgb2gray(img):
img = rgb2gray(img)
img = scipy.misc.imresize(img, (80, 80))
return img
print("---- scikit-image ----")
t_4_scikit_small = %timeit -o scikit_r_rgb2gray(image_small)
t_4_scikit_medium = %timeit -o scikit_r_rgb2gray(image_medium)
t_4_scikit_large = %timeit -o scikit_r_rgb2gray(image_large)
print("---- opencv ----")
t_4_opencv_small = %timeit -o opencv_r_rgb2gray(image_small)
t_4_opencv_medium = %timeit -o opencv_r_rgb2gray(image_medium)
t_4_opencv_large = %timeit -o opencv_r_rgb2gray(image_large)
print("---- numpy + scipy ----")
t_4_numpy_small = %timeit -o np_r_rgb2gray(image_small)
t_4_numpy_medium = %timeit -o np_r_rgb2gray(image_medium)
t_4_numpy_large = %timeit -o np_r_rgb2gray(image_large)
"""
# ------------------------------------
# Plotting Average
# ------------------------------------
rgb_2_gray_average = {
'framework': ['scikit-image', 'opencv', 'numpy'],
'small': [scikit_rgb_2_gray_small.average, opencv_rgb_2_gray_small.average, numpy_rgb_2_gray_small.average],
'medium': [scikit_rgb_2_gray_medium.average, opencv_rgb_2_gray_medium.average, numpy_rgb_2_gray_medium.average],
'large': [scikit_rgb_2_gray_large.average, opencv_rgb_2_gray_large.average, numpy_rgb_2_gray_large.average]
}
df = pd.DataFrame(rgb_2_gray_average, columns = ['framework', 'small', 'medium', 'large'])
fig, ax = plt.subplots(figsize=(10,3))
pos = list(range(len(df['small'])))
width = 0.25
plt.bar(pos, df['small'], width, alpha=1.0, color='#EE3224', label=df['framework'][0])
plt.bar([p + width for p in pos], df['medium'], width, alpha=1.0, color='#F78F1E', label=df['framework'][1])
plt.bar([p + width*2 for p in pos], df['large'], width, alpha=1.0, color='#FFC222', label=df['framework'][2])
ax.set_ylabel('Time')
ax.set_title('RGB2GRAY Test - Average')
ax.set_xticks([p + 1.5 * width for p in pos])
ax.set_xticklabels(df['framework'])
#plt.legend(['Pre Score', 'Mid Score', 'Post Score'], loc='upper left')
plt.grid()
plt.show()
# ------------------------------------
# Plotting Best
# ------------------------------------
rgb_2_gray_best = {
'framework': ['scikit-image', 'opencv', 'numpy'],
'small': [scikit_rgb_2_gray_small.best, opencv_rgb_2_gray_small.best, numpy_rgb_2_gray_small.best],
'medium': [scikit_rgb_2_gray_medium.best, opencv_rgb_2_gray_medium.best, numpy_rgb_2_gray_medium.best],
'large': [scikit_rgb_2_gray_large.best, opencv_rgb_2_gray_large.best, numpy_rgb_2_gray_large.best]
}
df = pd.DataFrame(rgb_2_gray_best, columns = ['framework', 'small', 'medium', 'large'])
fig, ax = plt.subplots(figsize=(10,3))
pos = list(range(len(df['small'])))
width = 0.25
plt.bar(pos, df['small'], width, alpha=1.0, color='#EE3224', label=df['framework'][0])
plt.bar([p + width for p in pos], df['medium'], width, alpha=1.0, color='#F78F1E', label=df['framework'][1])
plt.bar([p + width*2 for p in pos], df['large'], width, alpha=1.0, color='#FFC222', label=df['framework'][2])
ax.set_ylabel('Time')
ax.set_title('RGB2GRAY Test - Best')
ax.set_xticks([p + 1.5 * width for p in pos])
ax.set_xticklabels(df['framework'])
#plt.legend(['Pre Score', 'Mid Score', 'Post Score'], loc='upper left')
plt.grid()
plt.show()
"""
| 0.350644 | 0.382055 |
## 1. Two Sum
```
;;; 使用散列表
(defun two-sum (nums target)
(let ((hash-table (make-hash-table)))
(loop for i below (length nums) do
(let* ((key (- target (nth i nums)))
(value (gethash key hash-table)))
(if value
(return (list value i))
(setf (gethash (nth i nums) hash-table) i))))))
```
## 2. Add Two Numbers
```
;;; 对两个链表进行遍历,重复的代码有点多
(defun add-two-numbers (l1 l2)
(let ((res nil)
(quotient)
(remainder 0))
(loop while (and l1 l2) do
(setf (values remainder quotient)
(floor (+ (car l1) (car l2) remainder) 10))
(push quotient res)
(setf l1 (cdr l1))
(setf l2 (cdr l2)))
(loop while l1 do
(setf (values remainder quotient)
(floor (+ (car l1) remainder) 10))
(push quotient res)
(setf l1 (cdr l1)))
(loop while l2 do
(setf (values remainder quotient)
(floor (+ (car l2) remainder) 10))
(push quotient res)
(setf l2 (cdr l2)))
(if (> remainder 0)
(reverse (push remainder res))
(reverse res))))
;;; 将重复的 loop while 部分进行抽取,勉强弄成这样
(defmacro lw ((lst &optional (another '(0))) &body body)
`(loop while (and ,lst ,another) do
(setf (values remainder quotient)
(floor (+ (car ,lst)
(car ,another)
remainder) 10))
(push quotient res)
,@body))
(defun add-two-numbers (l1 l2)
(let ((res nil)
(quotient)
(remainder 0))
(lw (l1 l2)
(setf l1 (cdr l1))
(setf l2 (cdr l2)))
(lw (l1 '(0))
(setf l1 (cdr l1)))
(lw (l2 '(0))
(setf l2 (cdr l2)))
(if (> remainder 0)
(reverse (push remainder res))
(reverse res))))
```
## 3. Longest Substring Without Repeating Characters
```
;;; 使用关联表 alist 存储出现字符与对应的索引
;;; 函数 ASSOC 接受一个键和一个 alist 并返回第一个 CAR 匹配该键的点对单元
;;; 双指针进行查找
(defun length-of-longest-substring (s)
(let ((alist nil)
(longest-length 0)
(p -1))
(loop
for c across s
for index from 0 to (length s) do
(let ((cell (assoc c alist))
(size (- index p)))
(cond
(cell (setf p (cdr cell)))
((> size longest-length) (setf longest-length size))))
(push (cons c index) alist))
longest-length))
```
## 4. Median of Two Sorted Arrays
```
;;; 遍历两个数组并将数据放入新的排序好的数组中,再计算
;;; multiple-value-bind 宏接受多重返回值
(defun find-median-sorted-arrays (nums1 nums2)
(let ((lst nil)
(len (+ (length nums1) (length nums2))))
(loop while (and nums1 nums2) do
(if (< (car nums1) (car nums2))
(push (pop nums1) lst)
(push (pop nums2) lst)))
(setf lst (reverse lst))
(nconc lst nums1 nums2)
(multiple-value-bind (q r) (floor len 2)
(if (zerop r)
(/ (+ (nth q lst) (nth (1- q) lst)) 2)
(nth q lst)))))
;;; 将两个数组合并后排序,再计算
(defun find-median-sorted-arrays-1 (nums1 nums2)
(let* ((lst (sort (append nil nums1 nums2) #'<))
(len (length lst)))
(multiple-value-bind (q r) (floor len 2)
(if (zerop r)
(/ (+ (nth q lst) (nth (1- q) lst)) 2)
(nth q lst)))))
```
## 5. Longest Palindromic Substring
```
;;; 双指针进行查找,max-length 存储最大长度,start 存在最大长度开始位置。
;;; 遍历过程中,判断当前指针与前 max-length + 1 或 max-length + 2 中
;;; 的字符串是否为回文序列。
;;; 使用宏抽取重复,感觉宏被用歪了 :)
(defmacro increase-length (step)
`(let* ((in-start (- i max-length ,step -1))
(sub (subseq s in-start (+ i 1))))
(when (string= sub (reverse sub))
(incf max-length ,step)
(setf start in-start))))
(defun longest-palindrome (s)
(let ((size (length s))
(start 0)
(max-length 1))
(if (string= s (reverse s))
(return-from longest-palindrome s))
(loop for i from 1 below size do
(if (null (if (>= (- i max-length 1) 0)
(increase-length 2)))
(increase-length 1)))
(subseq s start (+ start max-length))))
```
## 6. ZigZag Conversion
```
;;; 找到规律后按规律写出代码
(defun convert (s num-rows)
(if (or (= 1 num-rows)
(> num-rows (length s)))
s
(let* ((length-s (length s))
(res (make-array length-s
:fill-pointer 0
:element-type 'character))
(step (* 2 (- num-rows 1))))
(loop for n below num-rows do
(do* ((idx n (+ idx step))
(ex-idx (+ idx (- step (* 2 n))) (+ idx (- step (* 2 n)))))
((>= idx length-s))
(vector-push (elt s idx) res)
(if (and (< ex-idx length-s)
(not (zerop n))
(not (= idx ex-idx)))
(vector-push (elt s ex-idx) res))))
res)))
```
## 7. Reverse Integer
```
;;; 转换成字符串 :)
(defun reverse-int (x)
(if (>= x 0)
(+ (parse-integer (reverse (write-to-string x))))
(- (parse-integer (reverse (write-to-string (abs x)))))))
```
## 8. String to Integer (atoi)
```
;;; 使用数组而不是列表是因为 vector-push 能把元素添加到尾部
(defun my-atoi (str)
(let ((str (string-trim " " str))
(digit (make-array 12
:fill-pointer 0
:element-type 'character)))
(if (zerop (length str))
(return-from my-atoi 0)
(vector-push (elt str 0) digit))
(loop for s across (subseq str 1) do
(if (digit-char-p s)
(vector-push s digit)
(return)))
(handler-case (let ((res (parse-integer digit))
(limit (expt 2 31)))
(cond ((> res (1- limit))
(1- limit))
((< res (- limit))
(- limit))
(t res)))
(error () 0))))
```
## 9. Palindrome Number
```
(defun is-palindrome (x)
(string= (write-to-string x)
(reverse (write-to-string x))))
```
## 10. Regular Expression Matching
```
;;; 动态规划-备忘录进行匹配
(defun is-match (s p)
(let ((memo (make-hash-table :test 'equal)))
(defun dp (str pattern)
(let ((key (list str pattern)))
(multiple-value-bind (value present) (gethash key memo)
(if present
value
(progn
(cond ((string= pattern "")
(setf (gethash key memo) (string= str "")))
((string= str "")
(setf (gethash key memo)
(and (eql (position #\* pattern) 1)
(dp str (subseq pattern 2)))))
((eql (position #\* pattern) 1)
(setf (gethash key memo)
(or (dp str (subseq pattern 2))
(dp (subseq str 1) pattern))))
((or (string= (elt pattern 0) #\.)
(string= (elt pattern 0) (elt str 0)))
(setf (gethash key memo)
(dp (subseq str 1) (subseq pattern 1))))
(t
(setf (gethash key memo) nil)))
(gethash key memo))))))
(dp s p)))
```
## 11. Container With Most Water
```
;;; 双指针
(defmacro shorten (short i step)
`(progn
(incf ,i ,step)
(setf area (* ,short (- l r)))
(loop while (and (< r l) (< (nth ,i height) ,short)) do
(incf ,i ,step))))
(defun max-area (height)
(let ((r 0)
(l (1- (length height)))
(area 0)
(max-area 0))
(loop while (< r l) do
(if (< (nth r height) (nth l height))
(shorten (nth r height) r 1)
(shorten (nth l height) l -1))
(if (> area max-area)
(setf max-area area)))
max-area))
```
## 12. Integer to Roman
```
;;; :)
;;; 自己实现的话,将个十百千位拆出来再转换
(defun int-to-roman (num)
(format nil "~@R" num))
```
## 13. Roman to Integer
```
;;; 每位字符转换为对应的数值,再计算
(defun roman-to-integer (s)
(let* ((roman "IVXLCDM")
(nums '(1 5 10 50 100 500 1000))
(roman-nums (loop for c across s
collect (nth (position c roman) nums))))
(loop for (a b) on roman-nums
sum (if (and b (< a b)) (- a) a))))
```
## 14. Longest Common Prefix
```
;;; 按顺序取出所有对应字符,再判断是否全部相同
(defun longest-common-prefix (strs)
(if (not strs)
(return-from longest-common-prefix ""))
(let* ((chars (apply #'map 'list #'list strs))
(size (length chars)))
(print chars)
(loop
for ch in chars
for i to size do
(if (not (= (length (remove-duplicates ch)) 1))
(return-from longest-common-prefix (subseq (first strs) 0 i))))
(subseq (first strs) 0 size)))
```
## 15. 3Sum
```
(defun three-sum (nums)
(let ((table (make-hash-table))
(neg-nums nil)
(pos-nums nil)
(res nil))
(loop for n in nums do
(let ((value (gethash n table)))
(if value
(setf (gethash n table) (1+ value))
(setf (gethash n table) 1))))
(loop for k being the hash-keys in table do
(if (< k 0)
(push k neg-nums)
(push k pos-nums)))
(setf neg-nums (sort neg-nums '<))
(setf pos-nums (sort pos-nums '<))
(loop for n in neg-nums do
(loop for p in pos-nums do
(let* ((balance (- 0 n p))
(value (gethash balance table)))
(if (and value
(or (and (or (= balance n) (= balance p))
(> value 1))
(and (< n balance)
(< balance p))))
(push (list n balance p) res)))))
res))
```
## 16. 3Sum Closest
```
(defun three-sum-closest (nums target)
(let ((size (length nums))
(nums (sort nums #'<))
(closest nil))
(loop for i to (- size 3) do
(let* ((a (1+ i))
(b (1- size))
(min (+ (nth i nums)
(nth a nums)
(nth (1+ a) nums)))
(max (+ (nth i nums)
(nth (1- b) nums)
(nth b nums))))
(cond
((< max target) (push max closest))
((> min target) (push min closest))
(t (loop while (< a b) do
(let ((sum (+ (nth i nums)
(nth a nums)
(nth b nums))))
(push sum closest)
(cond
((< sum target) (incf a))
((> sum target) (decf b))
(t (return-from three-sum-closest target)))))))))
(first (sort closest #'(lambda (x y)
(< (abs (- x target))
(abs (- y target))))))))
```
## 17. Letter Combinations of a Phone Number
```
(defun letter-combinations (digits)
(let* ((letters (pairlis
'(#\2 #\3 #\4 #\5 #\6 #\7 #\8 #\9)
'("abc" "def" "ghi" "jkl" "mno" "pqrs" "tuv" "wxyz")))
(res (loop for c across
(cdr (assoc (elt digits 0) letters)) collect (string c))))
(loop for d across (subseq digits 1) do
(let ((strs nil))
(loop for a across (cdr (assoc d letters)) do
(loop for r in res do
(push (concatenate 'string r `(,a)) strs)))
(setf res strs)))
res))
```
## 18. 4Sum
```
(defun n-sum (nums target n &optional (result nil) (results nil))
(if (= n 2)
(let ((start 0)
(end (1- (length nums))))
(loop while (< start end) do
(let* ((a (nth start nums))
(b (nth end nums))
(s (+ a b)))
(cond
((= s target)
(push (append result (list a b)) results)
(incf start)
(decf end)
(loop while (and (< start end)
(= (nth start nums)
(nth (1- start) nums)))
do (incf start))
(loop while (and (< start end)
(= (nth end nums)
(nth (1+ end) nums)))
do (decf end)))
((< s target)
(incf start))
(t
(decf end))))))
;; n > 2
(loop for i to (- (length nums) n) do
(if (and (not (zerop i))
(= (nth i nums) (nth (1- i) nums)))
nil
(setf results (n-sum (subseq nums (1+ i))
(- target (nth i nums))
(1- n)
(append result (list (nth i nums)))
results)))))
results)
(defun four-sum (nums target)
(n-sum (sort nums #'<) target 4))
```
## 19. Remove Nth Node From End of List
```
(defun remove-nth-from-end (head n)
(let ((i (- (length head) n)))
(append (subseq head 0 i)
(subseq head (1+ i)))))
```
## 20. Valid Parentheses
```
(defun is-valid (s)
(let ((res nil)
(pair (pairlis '(#\) #\] #\}) '(#\( #\[ #\{))))
(loop for c across s do
(if (and res
(eql (first res) (cdr (assoc c pair))))
(pop res)
(push c res)))
(not res)))
```
## 21. Merge Two Sorted Lists
```
;;; :)
;;; 若是用 car 和 cdr 呢
(defun merge-two-lists (l1 l2)
(sort (append l1 l2) #'<))
```
## 22. Generate Parentheses
```
;;; 动态规划
(let ((alist (pairlis '(0 1) (list nil '("()")))))
(defun generate-parenthesis (n)
(cond
((assoc n alist)
(cdr (assoc n alist)))
(t
(let ((res nil))
(loop for i from 1 below (1- n) do
(loop for a in (generate-parenthesis i) do
(loop for b in (generate-parenthesis (- n 1 i)) do
(push (concatenate 'string "(" a ")" b)
res))))
(loop for x in (generate-parenthesis (1- n)) do
(push (concatenate 'string "(" x ")") res)
(push (concatenate 'string "()" x) res))
(push (cons n (remove-duplicates res :test #'string=))
alist)
(cdr (assoc n alist)))))))
```
## 23. Merge k Sorted Lists
```
;;; 常规思路应该是先从合并两个有序列表开始
(defun merge-k-lists (lists)
(sort (apply #'append lists) #'<))
```
## 24. Swap Nodes in Pairs
```
(defun swap-pairs (head)
(loop for i below (floor (length head) 2) do
(rotatef (nth (* i 2) head) (nth (1+ (* i 2)) head)))
head)
```
## 25. Reverse Nodes in k-Group
```
(defun reverse-k-group (lst k)
(let ((res (copy-list lst)))
(if (= k 1)
(return-from reverse-k-group res))
(loop for end from k to (length lst) by k do
(setf (subseq res (- end k) end)
(reverse (subseq res (- end k) end))))
res))
```
|
github_jupyter
|
;;; 使用散列表
(defun two-sum (nums target)
(let ((hash-table (make-hash-table)))
(loop for i below (length nums) do
(let* ((key (- target (nth i nums)))
(value (gethash key hash-table)))
(if value
(return (list value i))
(setf (gethash (nth i nums) hash-table) i))))))
;;; 对两个链表进行遍历,重复的代码有点多
(defun add-two-numbers (l1 l2)
(let ((res nil)
(quotient)
(remainder 0))
(loop while (and l1 l2) do
(setf (values remainder quotient)
(floor (+ (car l1) (car l2) remainder) 10))
(push quotient res)
(setf l1 (cdr l1))
(setf l2 (cdr l2)))
(loop while l1 do
(setf (values remainder quotient)
(floor (+ (car l1) remainder) 10))
(push quotient res)
(setf l1 (cdr l1)))
(loop while l2 do
(setf (values remainder quotient)
(floor (+ (car l2) remainder) 10))
(push quotient res)
(setf l2 (cdr l2)))
(if (> remainder 0)
(reverse (push remainder res))
(reverse res))))
;;; 将重复的 loop while 部分进行抽取,勉强弄成这样
(defmacro lw ((lst &optional (another '(0))) &body body)
`(loop while (and ,lst ,another) do
(setf (values remainder quotient)
(floor (+ (car ,lst)
(car ,another)
remainder) 10))
(push quotient res)
,@body))
(defun add-two-numbers (l1 l2)
(let ((res nil)
(quotient)
(remainder 0))
(lw (l1 l2)
(setf l1 (cdr l1))
(setf l2 (cdr l2)))
(lw (l1 '(0))
(setf l1 (cdr l1)))
(lw (l2 '(0))
(setf l2 (cdr l2)))
(if (> remainder 0)
(reverse (push remainder res))
(reverse res))))
;;; 使用关联表 alist 存储出现字符与对应的索引
;;; 函数 ASSOC 接受一个键和一个 alist 并返回第一个 CAR 匹配该键的点对单元
;;; 双指针进行查找
(defun length-of-longest-substring (s)
(let ((alist nil)
(longest-length 0)
(p -1))
(loop
for c across s
for index from 0 to (length s) do
(let ((cell (assoc c alist))
(size (- index p)))
(cond
(cell (setf p (cdr cell)))
((> size longest-length) (setf longest-length size))))
(push (cons c index) alist))
longest-length))
;;; 遍历两个数组并将数据放入新的排序好的数组中,再计算
;;; multiple-value-bind 宏接受多重返回值
(defun find-median-sorted-arrays (nums1 nums2)
(let ((lst nil)
(len (+ (length nums1) (length nums2))))
(loop while (and nums1 nums2) do
(if (< (car nums1) (car nums2))
(push (pop nums1) lst)
(push (pop nums2) lst)))
(setf lst (reverse lst))
(nconc lst nums1 nums2)
(multiple-value-bind (q r) (floor len 2)
(if (zerop r)
(/ (+ (nth q lst) (nth (1- q) lst)) 2)
(nth q lst)))))
;;; 将两个数组合并后排序,再计算
(defun find-median-sorted-arrays-1 (nums1 nums2)
(let* ((lst (sort (append nil nums1 nums2) #'<))
(len (length lst)))
(multiple-value-bind (q r) (floor len 2)
(if (zerop r)
(/ (+ (nth q lst) (nth (1- q) lst)) 2)
(nth q lst)))))
;;; 双指针进行查找,max-length 存储最大长度,start 存在最大长度开始位置。
;;; 遍历过程中,判断当前指针与前 max-length + 1 或 max-length + 2 中
;;; 的字符串是否为回文序列。
;;; 使用宏抽取重复,感觉宏被用歪了 :)
(defmacro increase-length (step)
`(let* ((in-start (- i max-length ,step -1))
(sub (subseq s in-start (+ i 1))))
(when (string= sub (reverse sub))
(incf max-length ,step)
(setf start in-start))))
(defun longest-palindrome (s)
(let ((size (length s))
(start 0)
(max-length 1))
(if (string= s (reverse s))
(return-from longest-palindrome s))
(loop for i from 1 below size do
(if (null (if (>= (- i max-length 1) 0)
(increase-length 2)))
(increase-length 1)))
(subseq s start (+ start max-length))))
;;; 找到规律后按规律写出代码
(defun convert (s num-rows)
(if (or (= 1 num-rows)
(> num-rows (length s)))
s
(let* ((length-s (length s))
(res (make-array length-s
:fill-pointer 0
:element-type 'character))
(step (* 2 (- num-rows 1))))
(loop for n below num-rows do
(do* ((idx n (+ idx step))
(ex-idx (+ idx (- step (* 2 n))) (+ idx (- step (* 2 n)))))
((>= idx length-s))
(vector-push (elt s idx) res)
(if (and (< ex-idx length-s)
(not (zerop n))
(not (= idx ex-idx)))
(vector-push (elt s ex-idx) res))))
res)))
;;; 转换成字符串 :)
(defun reverse-int (x)
(if (>= x 0)
(+ (parse-integer (reverse (write-to-string x))))
(- (parse-integer (reverse (write-to-string (abs x)))))))
;;; 使用数组而不是列表是因为 vector-push 能把元素添加到尾部
(defun my-atoi (str)
(let ((str (string-trim " " str))
(digit (make-array 12
:fill-pointer 0
:element-type 'character)))
(if (zerop (length str))
(return-from my-atoi 0)
(vector-push (elt str 0) digit))
(loop for s across (subseq str 1) do
(if (digit-char-p s)
(vector-push s digit)
(return)))
(handler-case (let ((res (parse-integer digit))
(limit (expt 2 31)))
(cond ((> res (1- limit))
(1- limit))
((< res (- limit))
(- limit))
(t res)))
(error () 0))))
(defun is-palindrome (x)
(string= (write-to-string x)
(reverse (write-to-string x))))
;;; 动态规划-备忘录进行匹配
(defun is-match (s p)
(let ((memo (make-hash-table :test 'equal)))
(defun dp (str pattern)
(let ((key (list str pattern)))
(multiple-value-bind (value present) (gethash key memo)
(if present
value
(progn
(cond ((string= pattern "")
(setf (gethash key memo) (string= str "")))
((string= str "")
(setf (gethash key memo)
(and (eql (position #\* pattern) 1)
(dp str (subseq pattern 2)))))
((eql (position #\* pattern) 1)
(setf (gethash key memo)
(or (dp str (subseq pattern 2))
(dp (subseq str 1) pattern))))
((or (string= (elt pattern 0) #\.)
(string= (elt pattern 0) (elt str 0)))
(setf (gethash key memo)
(dp (subseq str 1) (subseq pattern 1))))
(t
(setf (gethash key memo) nil)))
(gethash key memo))))))
(dp s p)))
;;; 双指针
(defmacro shorten (short i step)
`(progn
(incf ,i ,step)
(setf area (* ,short (- l r)))
(loop while (and (< r l) (< (nth ,i height) ,short)) do
(incf ,i ,step))))
(defun max-area (height)
(let ((r 0)
(l (1- (length height)))
(area 0)
(max-area 0))
(loop while (< r l) do
(if (< (nth r height) (nth l height))
(shorten (nth r height) r 1)
(shorten (nth l height) l -1))
(if (> area max-area)
(setf max-area area)))
max-area))
;;; :)
;;; 自己实现的话,将个十百千位拆出来再转换
(defun int-to-roman (num)
(format nil "~@R" num))
;;; 每位字符转换为对应的数值,再计算
(defun roman-to-integer (s)
(let* ((roman "IVXLCDM")
(nums '(1 5 10 50 100 500 1000))
(roman-nums (loop for c across s
collect (nth (position c roman) nums))))
(loop for (a b) on roman-nums
sum (if (and b (< a b)) (- a) a))))
;;; 按顺序取出所有对应字符,再判断是否全部相同
(defun longest-common-prefix (strs)
(if (not strs)
(return-from longest-common-prefix ""))
(let* ((chars (apply #'map 'list #'list strs))
(size (length chars)))
(print chars)
(loop
for ch in chars
for i to size do
(if (not (= (length (remove-duplicates ch)) 1))
(return-from longest-common-prefix (subseq (first strs) 0 i))))
(subseq (first strs) 0 size)))
(defun three-sum (nums)
(let ((table (make-hash-table))
(neg-nums nil)
(pos-nums nil)
(res nil))
(loop for n in nums do
(let ((value (gethash n table)))
(if value
(setf (gethash n table) (1+ value))
(setf (gethash n table) 1))))
(loop for k being the hash-keys in table do
(if (< k 0)
(push k neg-nums)
(push k pos-nums)))
(setf neg-nums (sort neg-nums '<))
(setf pos-nums (sort pos-nums '<))
(loop for n in neg-nums do
(loop for p in pos-nums do
(let* ((balance (- 0 n p))
(value (gethash balance table)))
(if (and value
(or (and (or (= balance n) (= balance p))
(> value 1))
(and (< n balance)
(< balance p))))
(push (list n balance p) res)))))
res))
(defun three-sum-closest (nums target)
(let ((size (length nums))
(nums (sort nums #'<))
(closest nil))
(loop for i to (- size 3) do
(let* ((a (1+ i))
(b (1- size))
(min (+ (nth i nums)
(nth a nums)
(nth (1+ a) nums)))
(max (+ (nth i nums)
(nth (1- b) nums)
(nth b nums))))
(cond
((< max target) (push max closest))
((> min target) (push min closest))
(t (loop while (< a b) do
(let ((sum (+ (nth i nums)
(nth a nums)
(nth b nums))))
(push sum closest)
(cond
((< sum target) (incf a))
((> sum target) (decf b))
(t (return-from three-sum-closest target)))))))))
(first (sort closest #'(lambda (x y)
(< (abs (- x target))
(abs (- y target))))))))
(defun letter-combinations (digits)
(let* ((letters (pairlis
'(#\2 #\3 #\4 #\5 #\6 #\7 #\8 #\9)
'("abc" "def" "ghi" "jkl" "mno" "pqrs" "tuv" "wxyz")))
(res (loop for c across
(cdr (assoc (elt digits 0) letters)) collect (string c))))
(loop for d across (subseq digits 1) do
(let ((strs nil))
(loop for a across (cdr (assoc d letters)) do
(loop for r in res do
(push (concatenate 'string r `(,a)) strs)))
(setf res strs)))
res))
(defun n-sum (nums target n &optional (result nil) (results nil))
(if (= n 2)
(let ((start 0)
(end (1- (length nums))))
(loop while (< start end) do
(let* ((a (nth start nums))
(b (nth end nums))
(s (+ a b)))
(cond
((= s target)
(push (append result (list a b)) results)
(incf start)
(decf end)
(loop while (and (< start end)
(= (nth start nums)
(nth (1- start) nums)))
do (incf start))
(loop while (and (< start end)
(= (nth end nums)
(nth (1+ end) nums)))
do (decf end)))
((< s target)
(incf start))
(t
(decf end))))))
;; n > 2
(loop for i to (- (length nums) n) do
(if (and (not (zerop i))
(= (nth i nums) (nth (1- i) nums)))
nil
(setf results (n-sum (subseq nums (1+ i))
(- target (nth i nums))
(1- n)
(append result (list (nth i nums)))
results)))))
results)
(defun four-sum (nums target)
(n-sum (sort nums #'<) target 4))
(defun remove-nth-from-end (head n)
(let ((i (- (length head) n)))
(append (subseq head 0 i)
(subseq head (1+ i)))))
(defun is-valid (s)
(let ((res nil)
(pair (pairlis '(#\) #\] #\}) '(#\( #\[ #\{))))
(loop for c across s do
(if (and res
(eql (first res) (cdr (assoc c pair))))
(pop res)
(push c res)))
(not res)))
;;; :)
;;; 若是用 car 和 cdr 呢
(defun merge-two-lists (l1 l2)
(sort (append l1 l2) #'<))
;;; 动态规划
(let ((alist (pairlis '(0 1) (list nil '("()")))))
(defun generate-parenthesis (n)
(cond
((assoc n alist)
(cdr (assoc n alist)))
(t
(let ((res nil))
(loop for i from 1 below (1- n) do
(loop for a in (generate-parenthesis i) do
(loop for b in (generate-parenthesis (- n 1 i)) do
(push (concatenate 'string "(" a ")" b)
res))))
(loop for x in (generate-parenthesis (1- n)) do
(push (concatenate 'string "(" x ")") res)
(push (concatenate 'string "()" x) res))
(push (cons n (remove-duplicates res :test #'string=))
alist)
(cdr (assoc n alist)))))))
;;; 常规思路应该是先从合并两个有序列表开始
(defun merge-k-lists (lists)
(sort (apply #'append lists) #'<))
(defun swap-pairs (head)
(loop for i below (floor (length head) 2) do
(rotatef (nth (* i 2) head) (nth (1+ (* i 2)) head)))
head)
(defun reverse-k-group (lst k)
(let ((res (copy-list lst)))
(if (= k 1)
(return-from reverse-k-group res))
(loop for end from k to (length lst) by k do
(setf (subseq res (- end k) end)
(reverse (subseq res (- end k) end))))
res))
| 0.194712 | 0.825273 |
# Números
Se estudian dos categorías de números:
- Enteros (Naturales)
- Reales
- Imaginarios
- Fracciones
## Enteros
```
2 + 2
type(2+2)
a = 2
b = 3
a
b
type(a)
type(b)
```
### Operaciones aritméticas con enteros:
```
a + b
a - b
a * b
a / b
a // b
type(a/b)
type(a//b)
a % b
a ** b
```
### Conversiones
```
cadena = '1000'
type(cadena)
# a + cadena # Genera error de tipo TypeError. No se pueden sumar valores numéricos con valores textuales.
numero_1000 = int(cadena)
numero_1000
type(numero_1000)
```
## Reales
Son números que tienen parte entera y parte decimal.
```
c = 2.0
d = 3.0
c
d
type(c)
type(d)
c + d
type(c+d)
a
type(a)
a + c
type(a + c)
type(2 + 3.0)
```
### Operaciones aritméticas con los números reales
```
c + d
c - d
c * d
c / d
c // d
type(c//d)
c ** d
```
### Caso especial
```
suma = 0.1 + 0.1 + 0.1
suma
suma == 0.3
```
**Nota importante**: Para resolver el caso especial de suma de reales debe usar la `Decimal` (módulo `decimal`).
```
from decimal import Decimal
numero = Decimal(0.1)
suma_decimal = numero + numero + numero
suma_decimal
numero_decimal = Decimal('0.1')
suma_decimal = numero_decimal + numero_decimal + numero_decimal
suma_decimal
suma_decimal == Decimal('0.3')
```
## Precedencia de operadores
Indica el orden en que se deben ejecutar las operaciones aritméticas.
```
1 + 2*5 / 2
1 + 2 * (5/2)
(1+2) * 5 / 2
```
1. ()
2. **
3. *, /, %
4. +, -
# Módulo para operaciones mátemáticas
```
import math
math.pi
math.e
math.sin(math.pi)
math.cos(math.pi)
math.factorial(5)
```
## Números imaginarios
Estos números están compuestos de dos partes:
1. Real
2. Imaginaria
Ejemplo: `2 + 3i`
```
imaginario = 2 + 3j
imaginario
type(imaginario)
imaginario_b = complex(-3, -2)
imaginario_b
type(imaginario_b)
```
### Operaciones aritméticas de números imaginarios
```
imaginario + imaginario_b
imaginario - imaginario_b
imaginario * imaginario_b
imaginario / imaginario_b
```
### Módulo `cmath` para cálculos sobre números imaginarios
```
import cmath
cmath.polar(imaginario)
cmath.rect(3.605551275463989, 0.982793723247329)
imaginario
cmath.log(imaginario)
cmath.sqrt(imaginario)
```
# Fracciones
Son números que tienen dos partes:
1. Númerador (parte superior)
2. Denominador (parte inferior)
$\frac{1}{2}$
```
from fractions import Fraction
un_medio = 1/2
un_medio
un_medio = Fraction(1, 2)
un_medio
print(un_medio)
str(un_medio)
un_tercio = Fraction(1, 3)
un_tercio
print(un_tercio)
```
## Operaciones aritméticas con fracciones:
```
suma = un_medio + un_tercio
suma
print(suma)
resta = un_medio - un_tercio
resta
producto = un_medio * un_tercio
producto
division = un_medio / un_tercio
division
print(Fraction('0.5'))
un_medio**2
```
## Partes de una fracción:
```
un_medio.numerator
un_medio.denominator
print(un_medio)
import math
math.sqrt(un_tercio)
```
|
github_jupyter
|
2 + 2
type(2+2)
a = 2
b = 3
a
b
type(a)
type(b)
a + b
a - b
a * b
a / b
a // b
type(a/b)
type(a//b)
a % b
a ** b
cadena = '1000'
type(cadena)
# a + cadena # Genera error de tipo TypeError. No se pueden sumar valores numéricos con valores textuales.
numero_1000 = int(cadena)
numero_1000
type(numero_1000)
c = 2.0
d = 3.0
c
d
type(c)
type(d)
c + d
type(c+d)
a
type(a)
a + c
type(a + c)
type(2 + 3.0)
c + d
c - d
c * d
c / d
c // d
type(c//d)
c ** d
suma = 0.1 + 0.1 + 0.1
suma
suma == 0.3
from decimal import Decimal
numero = Decimal(0.1)
suma_decimal = numero + numero + numero
suma_decimal
numero_decimal = Decimal('0.1')
suma_decimal = numero_decimal + numero_decimal + numero_decimal
suma_decimal
suma_decimal == Decimal('0.3')
1 + 2*5 / 2
1 + 2 * (5/2)
(1+2) * 5 / 2
import math
math.pi
math.e
math.sin(math.pi)
math.cos(math.pi)
math.factorial(5)
imaginario = 2 + 3j
imaginario
type(imaginario)
imaginario_b = complex(-3, -2)
imaginario_b
type(imaginario_b)
imaginario + imaginario_b
imaginario - imaginario_b
imaginario * imaginario_b
imaginario / imaginario_b
import cmath
cmath.polar(imaginario)
cmath.rect(3.605551275463989, 0.982793723247329)
imaginario
cmath.log(imaginario)
cmath.sqrt(imaginario)
from fractions import Fraction
un_medio = 1/2
un_medio
un_medio = Fraction(1, 2)
un_medio
print(un_medio)
str(un_medio)
un_tercio = Fraction(1, 3)
un_tercio
print(un_tercio)
suma = un_medio + un_tercio
suma
print(suma)
resta = un_medio - un_tercio
resta
producto = un_medio * un_tercio
producto
division = un_medio / un_tercio
division
print(Fraction('0.5'))
un_medio**2
un_medio.numerator
un_medio.denominator
print(un_medio)
import math
math.sqrt(un_tercio)
| 0.311636 | 0.893681 |
# Hartree-Fock Self-Consistent Field Theory
## I. Theoretical Overview
In this tutorial, we will seek to introduce the theory and implementation of the quantum chemical method known as Hartree-Fock Self-Consistent Field Theory (HF-SCF) with restricted orbitals and closed-shell systems (RHF). This theory seeks to solve the pseudo-eigenvalue matrix equation
$$\sum_{\nu} F_{\mu\nu}C_{\nu i} = \epsilon_i\sum_{\nu}S_{\mu\nu}C_{\nu i}$$
$${\bf FC} = {\bf SC\epsilon},$$
called the Roothan equations, which can be solved self-consistently for the orbital coefficient matrix **C** to and the orbital energy eigenvalues $\epsilon_i$. The Fock matrix, **F**, has elements $F_{\mu\nu}$ given (in the atomic orbital basis) as
$$F_{\mu\nu} = H_{\mu\nu} + 2(\mu\,\nu\left|\,\lambda\,\sigma)D_{\lambda\sigma} - (\mu\,\lambda\,\right|\nu\,\sigma)D_{\lambda\sigma},$$
where $D_{\lambda\sigma}$ is an element of the one-particle density matrix **D**, constructed from the orbital coefficient matrix **C**:
$$D_{\lambda\sigma} = C_{\sigma i}C_{\lambda i}$$
Formally, the orbital coefficient matrix **C** is a $N\times M$ matrix, where $N$ is the number of atomic basis functions, and $M$ is the total number of molecular orbitals. Physically, this matrix describes the contribution of every atomic basis function (columns) to a particular molecular orbital (e.g., the $i^{\rm th}$ row). The density matrix **D** is a square matrix describing the electron density contained in each orbital. In the molecular orbital basis, the density matrix has elements
$$D_{pq} = \left\{
\begin{array}{ll}
2\delta_{pq} & p\; {\rm occupied} \\
0 & p\; {\rm virtual} \\
\end{array}\right .$$
The total RHF energy is given by
$$E^{\rm RHF}_{\rm total} = E^{\rm RHF}_{\rm elec} + E^{\rm BO}_{\rm nuc},$$
where $E^{\rm RHF}_{\rm elec}$ is the final electronic RHF energy, and $E^{\rm BO}_{\rm nuc}$ is the total nuclear repulsion energy within the Born-Oppenheimer approximation. To compute the electronic energy, we may use the density matrix in the AO basis:
$$E^{\rm RHF}_{\rm elec} = (F_{\mu\nu} + H_{\mu\nu})D_{\mu\nu},$$
and the nuclear repulsion energy is simply
$$E^{\rm BO}_{\rm nuc} = \sum_{A>B}\frac{Z_AZ_B}{r_{AB}}$$
where $Z_A$ is the nuclear charge of atom $A$, and the sum runs over all unique nuclear pairs.
## II. Implementation
Using the above overview, let's write a RHF program using <span style="font-variant: small-caps"> Psi4 </span> and NumPy. First, we need to import these Python modules:
```
# ==> Import Psi4 & NumPy <==
import psi4
import numpy as np
```
Next, using what you learned in the previous tutorial module, set the following <span style="font-variant: small-caps"> Psi4 </span> and molecule options.
Memory & Output specifications:
- Give 500 Mb of memory to Psi4
- Set Psi4 output file to "output.dat"
- Set a variable `numpy_memory` to an acceptable amount of available memory for the working computer to use for storing tensors
Molecule definition:
- Define the "physicist's water molecule" (O-H bond length = 1.1 Angstroms, HOH bond angle = 104 degrees)
- Molecular symmetry C1
Computation options:
- basis set cc-pVDZ
- SCF type PK
- Energy convergence criterion to 0.00000001
```
# ==> Set Basic Psi4 Options <==
# Memory specification
psi4.set_memory(int(5e8))
numpy_memory = 2
# Set output file
psi4.core.set_output_file('output.dat', False)
# Define Physicist's water -- don't forget C1 symmetry!
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
# Set computation options
psi4.set_options({'basis': 'cc-pvdz',
'scf_type': 'pk',
'e_convergence': 1e-8})
```
Since we will be writing our own, iterative RHF procedure, we will need to define options that we can use to tweak our convergence behavior. For example, if something goes wrong and our SCF doesn't converge, we don't want to spiral into an infinite loop. Instead, we can specify the maximum number of iterations allowed, and store this value in a variable called `maxiter`. Here are some good default options for our program:
~~~python
MAXITER = 40
E_conv = 1.0e-6
~~~
These are by no means the only possible values for these options, and it's encouraged to try different values and see for yourself how different choices affect the performance of our program. For now, let's use the above as our default.
```
# ==> Set default program options <==
# Maximum SCF iterations
MAXITER = 40
# Energy convergence criterion
E_conv = 1.0e-6
```
Before we can build our Fock matrix, we'll need to compute the following static one- and two-electron quantities:
- Electron repulsion integrals (ERIs) **I** between our AOs
- Overlap matrix **S**
- Core Hamiltonian matrix **H**
Fortunately for us, we can do this using the machinery in <span style='font-variant: small-caps'> Psi4</span>. In the first module, you learned about `psi4.core.Wavefunction` and `psi4.core.MintsHelper` classes. In the cell below, use these classes to perform the following:
1. Create Class Instances
a. Build a wavefunction for our molecule and basis set
b. Create an instance of the `MintsHelper` class with the basis set for the wavefunction
2. Build overlap matrix, **S**
a. Get the AO overlap matrix from `MintsHelper`, and cast it into a NumPy array
b. Get the number of AO basis functions and number of doubly occupied orbitals from S and the wavefunciton
3. Compute ERI Tensor, **I**
a. Get ERI tensor from `MintsHelper`, and cast it into a NumPy array
4. Build core Hamiltonian, **H**
a. Get AO kinetic energy matrix from `MintsHelper`, and cast it into a NumPy array
b. Get AO potential energy matrix from `MintsHelper`, and cast it into a NumPy array
c. Build core Hamiltonian from kinetic & potential energy matrices
```
# ==> Compute static 1e- and 2e- quantities with Psi4 <==
# Class instantiation
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('basis'))
mints = psi4.core.MintsHelper(wfn.basisset())
# Overlap matrix
S = np.asarray(mints.ao_overlap())
# Number of basis Functions & doubly occupied orbitals
nbf = S.shape[0]
ndocc = wfn.nalpha()
print('Number of occupied orbitals: %3d' % (ndocc))
print('Number of basis functions: %3d' % (nbf))
# Memory check for ERI tensor
I_size = (nbf**4) * 8.e-9
print('\nSize of the ERI tensor will be {:4.2f} GB.'.format(I_size))
memory_footprint = I_size * 1.5
if I_size > numpy_memory:
psi4.core.clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds allotted memory \
limit of %4.2f GB." % (memory_footprint, numpy_memory))
# Build ERI Tensor
I = np.asarray(mints.ao_eri())
# Build core Hamiltonian
T = np.asarray(mints.ao_kinetic())
V = np.asarray(mints.ao_potential())
H = T + V
```
The Roothan equations
$${\bf FC} = {\bf SC\epsilon}$$
are only *pseudo*-eigenvalue equations due to the presence of the overlap matrix **S** on the right hand side of the equation. Normally, the AO basis set will not be orthonormal, so the overlap matrix **S** will not be unity and therefore cannot be ignored. Let's check to see whether our AO basis is orthonormal:
```
# ==> Inspecting S for AO orthonormality <==
hope = np.allclose(S, np.eye(S.shape[0]))
print('\nDo we have any hope that our AO basis is orthonormal? %s!' % (hope))
```
Just as we'd expected -- looks like we can't ignore the AO overlap matrix. Therefore, the Fock matrix **F** cannot simply be diagonalized to solve for the orbital coefficient matrix **C**. There is still hope, however! We can overcome this issue by transforming the AO basis so that all of our basis functions are orthonormal. In other words, we seek a matrix **A** such that the transformation
$${\bf A}^{\dagger}{\bf SA} = {\bf 1}$$
One method of doing this is called *symmetric orthogonalization*, which lets ${\bf A} = {\bf S}^{-1/2}$. Then,
$${\bf A}^{\dagger}{\bf SA} = {\bf S}^{-1/2}{\bf SS}^{-1/2} = {\bf S}^{-1/2}{\bf S}^{1/2} = {\bf S}^0 = {\bf 1},$$
and we see that this choice for **A** does in fact yield an orthonormal AO basis. In the cell below, construct this transformation matrix using <span style='font-variant: small-caps'> Psi4</span>'s built-in `Matrix` class member function `power()` just like the following:
~~~python
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = np.asarray(A)
~~~
```
# ==> Construct AO orthogonalization matrix A <==
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = np.asarray(A)
# Check orthonormality
S_p = A.dot(S).dot(A)
new_hope = np.allclose(S_p, np.eye(S.shape[0]))
if new_hope:
print('There is a new hope for diagonalization!')
else:
print("Whoops...something went wrong. Check that you've correctly built the transformation matrix.")
```
The drawback of this scheme is that we would now have to either re-compute the ERI and core Hamiltonian tensors in the newly orthogonal AO basis, or transform them using our **A** matrix (both would be overly costly, especially transforming **I**). On the other hand, substitute ${\bf C} = {\bf AC}'$ into the Roothan equations:
\begin{align}
{\bf FAC'} &= {\bf SAC}'{\bf \epsilon}\\
{\bf A}^{\dagger}({\bf FAC}')&= {\bf A}^{\dagger}({\bf SAC}'){\bf \epsilon}\\
({\bf A}^{\dagger}{\bf FA}){\bf C}'&= ({\bf A}^{\dagger}{\bf SA}){\bf C}'{\bf \epsilon}\\
{\bf F}'{\bf C}' &= {\bf 1C}'{\bf \epsilon}\\
{\bf F}'{\bf C}' &= {\bf C}'{\bf \epsilon}\\
\end{align}
Clearly, we have arrived at a canonical eigenvalue equation. This equation can be solved directly for the transformed orbital coefficient matrix ${\bf C}'$ by diagonalizing the transformed Fock matrix, ${\bf F}'$, before transforming ${\bf C}'$ back into the original AO basis with ${\bf C} = {\bf AC}'$.
Before we can get down to the business of using the Fock matrix **F** to compute the RHF energy, we first need to compute the orbital coefficient **C** matrix. But, before we compute the **C** matrix, we first need to build **F**. Wait...hold on a second. Which comes first, **C** or **F**? Looking at the Roothan equations more closely, we see that that both sides depend on the **C** matrix, since **F** is a function of the orbitals:
$${\bf F}({\bf C}){\bf C} = {\bf SC\epsilon}\,;\;\;F_{\mu\nu} = H_{\mu\nu} + 2(\mu\,\nu\mid\lambda\,\sigma)C_{\sigma i}C_{\lambda i} - (\mu\,\lambda\,\mid\nu\,\sigma)C_{\sigma i}C_{\lambda i}.$$
Therefore technically, *neither* **F** nor **C** can come first! In order to proceed, we instead begin with a *guess* for the Fock matrix, from which we obtain a guess at the **C** matrix. Without orbital coefficients (and therefore without electron densities), the most logical starting point for obtaining a guess at the Fock matrix is to begin with the only component of **F** that does *not* involve densities: the core Hamiltonian, **H**. Below, using the NumPy `np.linalg.eigh()` function, obtain coefficient and density matrices using the core guess:
1. Obtain ${\bf F}'$ by transforming the core Hamiltonian with the ${\bf A}$ matrix
2. Diagonalize the transformed Fock matrix for $\epsilon$ and ${\bf C}'$
3. Use doubly-occupied slice of coefficient matrix to build density matrix
```
# ==> Compute C & D matrices with CORE guess <==
# Transformed Fock matrix
F_p = A.dot(H).dot(A)
# Diagonalize F_p for eigenvalues & eigenvectors with NumPy
e, C_p = np.linalg.eigh(F_p)
# Transform C_p back into AO basis
C = A.dot(C_p)
# Grab occupied orbitals
C_occ = C[:, :ndocc]
# Build density matrix from occupied orbitals
D = np.einsum('pi,qi->pq', C_occ, C_occ)
```
The final quantity we need to compute before we can proceed with our implementation of the SCF procedure is the Born-Oppenheimer nuclear repulsion energy, $E^{\rm BO}_{\rm nuc}$. We could use the expression given above in $\S$1, however we can also obtain this value directly from <span style='font-variant: small-caps'> Psi4</span>'s `Molecule` class. In the cell below, compute the nuclear repulsion energy using either method.
```
# ==> Nuclear Repulsion Energy <==
E_nuc = mol.nuclear_repulsion_energy()
```
Within each SCF iteration, we'll have to perform a number of tensor contractions when building the Fock matrix, computing the total RHF energy, and performing several transformations. Since the computationl expense of this process is related to the number of unique indices, the most intensive step of computing the total electronic energy will be performing the four-index contractions corresponding to building Coulomb and Exchange matrices **J** and **K**, with elements
\begin{align}
J[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\,\nu\mid\lambda\,\sigma)D_{\lambda\sigma}\\
K[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\,\lambda\mid\nu\,\sigma)D_{\lambda\sigma},
\end{align}
when building the Fock matrix. Fortunately, once **J** and **K** have been built, the Fock matrix may be computed as a simple matrix addition, instead of element-wise:
$$ {\bf F} = {\bf H} + 2{\bf J} - {\bf K}.$$
Formation of the **J** and **K** matrices will be the most expensive step of the RHF procedure, scaling with respect to the number of AOs as ${\cal O}(N^4)$. Strategies for building these marices efficiently, as well as different methods for handling these tensor contractions, will be discussed in greater detail in tutorials 2c and 2d in this module, respectively.
Let's now write our SCF iterations according to the following algorithm:
#### Algorithm 1: SCF Iteration
for scf_iter less than MAXITER, do:
1. Build Fock matrix
- Build the Coulomb matrix **J**
- Build the Exchange matrix **K**
- Form the Fock matrix
2. RHF Energy
- Compute total RHF energy
- If change in RHF energy less than E_conv, break
- Save latest RHF energy as E_old
3. Compute new orbital guess
- Transform Fock matrix to orthonormal AO basis
- Diagonalize ${\bf F}'$ for $\epsilon$ and ${\bf C}'$
- Back transform ${\bf C}'$ to AO basis
- Form **D** from occupied orbital slice of **C**
```
# ==> SCF Iterations <==
# Pre-iteration energy declarations
SCF_E = 0.0
E_old = 0.0
print('==> Starting SCF Iterations <==\n')
# Begin Iterations
for scf_iter in range(1, MAXITER + 1):
# Build Fock matrix
J = np.einsum('pqrs,rs->pq', I, D)
K = np.einsum('prqs,rs->pq', I, D)
F = H + 2*J - K
# Compute RHF energy
SCF_E = np.einsum('pq,pq->', (H + F), D) + E_nuc
print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E' % (scf_iter, SCF_E, SCF_E - E_old))
# SCF Converged?
if (abs(SCF_E - E_old) < E_conv):
break
E_old = SCF_E
# Compute new orbital guess
F_p = A.dot(F).dot(A)
e, C_p = np.linalg.eigh(F_p)
C = A.dot(C_p)
C_occ = C[:, :ndocc]
D = np.einsum('pi,qi->pq', C_occ, C_occ)
# MAXITER exceeded?
if (scf_iter == MAXITER):
psi4.core.clean()
raise Exception("Maximum number of SCF iterations exceeded.")
# Post iterations
print('\nSCF converged.')
print('Final RHF Energy: %.8f [Eh]' % (SCF_E))
```
Congratulations! You've written your very own Restricted Hartree-Fock program! Finally, let's check your final RHF energy against <span style='font-variant: small-caps'> Psi4</span>:
```
# Compare to Psi4
SCF_E_psi = psi4.energy('SCF')
psi4.driver.p4util.compare_values(SCF_E_psi, SCF_E, 6, 'SCF Energy')
```
## References
1. A. Szabo and N. S. Ostlund, *Modern Quantum Chemistry*, Introduction to Advanced Electronic Structure Theory. Courier Corporation, 1996.
2. I. N. Levine, *Quantum Chemistry*. Prentice-Hall, New Jersey, 5th edition, 2000.
3. T. Helgaker, P. Jorgensen, and J. Olsen, *Molecular Electronic Structure Theory*, John Wiley & Sons Inc, 2000.
|
github_jupyter
|
# ==> Import Psi4 & NumPy <==
import psi4
import numpy as np
# ==> Set Basic Psi4 Options <==
# Memory specification
psi4.set_memory(int(5e8))
numpy_memory = 2
# Set output file
psi4.core.set_output_file('output.dat', False)
# Define Physicist's water -- don't forget C1 symmetry!
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
# Set computation options
psi4.set_options({'basis': 'cc-pvdz',
'scf_type': 'pk',
'e_convergence': 1e-8})
# ==> Set default program options <==
# Maximum SCF iterations
MAXITER = 40
# Energy convergence criterion
E_conv = 1.0e-6
# ==> Compute static 1e- and 2e- quantities with Psi4 <==
# Class instantiation
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('basis'))
mints = psi4.core.MintsHelper(wfn.basisset())
# Overlap matrix
S = np.asarray(mints.ao_overlap())
# Number of basis Functions & doubly occupied orbitals
nbf = S.shape[0]
ndocc = wfn.nalpha()
print('Number of occupied orbitals: %3d' % (ndocc))
print('Number of basis functions: %3d' % (nbf))
# Memory check for ERI tensor
I_size = (nbf**4) * 8.e-9
print('\nSize of the ERI tensor will be {:4.2f} GB.'.format(I_size))
memory_footprint = I_size * 1.5
if I_size > numpy_memory:
psi4.core.clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds allotted memory \
limit of %4.2f GB." % (memory_footprint, numpy_memory))
# Build ERI Tensor
I = np.asarray(mints.ao_eri())
# Build core Hamiltonian
T = np.asarray(mints.ao_kinetic())
V = np.asarray(mints.ao_potential())
H = T + V
# ==> Inspecting S for AO orthonormality <==
hope = np.allclose(S, np.eye(S.shape[0]))
print('\nDo we have any hope that our AO basis is orthonormal? %s!' % (hope))
# ==> Construct AO orthogonalization matrix A <==
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = np.asarray(A)
# Check orthonormality
S_p = A.dot(S).dot(A)
new_hope = np.allclose(S_p, np.eye(S.shape[0]))
if new_hope:
print('There is a new hope for diagonalization!')
else:
print("Whoops...something went wrong. Check that you've correctly built the transformation matrix.")
# ==> Compute C & D matrices with CORE guess <==
# Transformed Fock matrix
F_p = A.dot(H).dot(A)
# Diagonalize F_p for eigenvalues & eigenvectors with NumPy
e, C_p = np.linalg.eigh(F_p)
# Transform C_p back into AO basis
C = A.dot(C_p)
# Grab occupied orbitals
C_occ = C[:, :ndocc]
# Build density matrix from occupied orbitals
D = np.einsum('pi,qi->pq', C_occ, C_occ)
# ==> Nuclear Repulsion Energy <==
E_nuc = mol.nuclear_repulsion_energy()
# ==> SCF Iterations <==
# Pre-iteration energy declarations
SCF_E = 0.0
E_old = 0.0
print('==> Starting SCF Iterations <==\n')
# Begin Iterations
for scf_iter in range(1, MAXITER + 1):
# Build Fock matrix
J = np.einsum('pqrs,rs->pq', I, D)
K = np.einsum('prqs,rs->pq', I, D)
F = H + 2*J - K
# Compute RHF energy
SCF_E = np.einsum('pq,pq->', (H + F), D) + E_nuc
print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E' % (scf_iter, SCF_E, SCF_E - E_old))
# SCF Converged?
if (abs(SCF_E - E_old) < E_conv):
break
E_old = SCF_E
# Compute new orbital guess
F_p = A.dot(F).dot(A)
e, C_p = np.linalg.eigh(F_p)
C = A.dot(C_p)
C_occ = C[:, :ndocc]
D = np.einsum('pi,qi->pq', C_occ, C_occ)
# MAXITER exceeded?
if (scf_iter == MAXITER):
psi4.core.clean()
raise Exception("Maximum number of SCF iterations exceeded.")
# Post iterations
print('\nSCF converged.')
print('Final RHF Energy: %.8f [Eh]' % (SCF_E))
# Compare to Psi4
SCF_E_psi = psi4.energy('SCF')
psi4.driver.p4util.compare_values(SCF_E_psi, SCF_E, 6, 'SCF Energy')
| 0.54359 | 0.99251 |
```
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import norm
from sklearn.naive_bayes import GaussianNB
matplotlib.style.use('ggplot')
# generate a 2D gaussian with density
def gauss_pdf(mean, cov, x):
return (1./(((2*np.pi)**(1.*len(mean)/2))*np.linalg.det(cov)**.5))*np.exp(-np.matrix(x-mean)*np.matrix(np.linalg.inv(cov))*np.matrix(x-mean).T/2 ).tolist()[0][0]
N = 1000
p = []
mean = [1, 1]
cov = [[1, -.25], [-.25, 1]]
x = np.random.multivariate_normal(mean, cov, N)
for n in range(N):
p.append( gauss_pdf(mean, cov, x[n]) )
p = np.array(p)
# plot the 2D RVs with the 3rd dim being the pdf value
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x[:,0], x[:,1], p)
ax.legend()
plt.show()
def mahal(mean, cov, x):
return (-np.matrix(x-mean)*np.matrix(np.linalg.inv(cov))*np.matrix(x-mean).T/2 ).tolist()[0][0]
x = np.random.multivariate_normal([-1, -1], [[1, -.5], [-.5, 1]], 50000)
m = []
for n in range(N):
m.append( gauss_pdf(mean, cov, x[n]) )
x = x.T
d = plt.hist2d(x[0], x[1], bins = 75)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
x = np.linspace(-3, 5, 500)
pw1 = .6
pw2 = .4
pxw1 = norm.pdf(x, 3, .8)
pxw2 = norm.pdf(x, 1, .6)
px = pxw1*pw1+pxw2*pw2
pwx1 = pxw1*pw1/px
pwx2 = pxw2*pw2/px
plt.figure()
plt.plot(x, pxw1, 'b-', lw=5, alpha=0.6, label='$p(x|\omega_1)$')
plt.plot(x, pxw2, 'r-', lw=5, alpha=0.6, label='$p(x|\omega_2)$')
plt.plot(x, pwx1, 'b--', lw=5, alpha=0.6, label='$p(\omega_1|x)$')
plt.plot(x, pwx2, 'r--', lw=5, alpha=0.6, label='$p(\omega_2|x)$')
plt.legend()
plt.xlabel('$x$')
plt.ylabel('conditional probability')
plt.figure()
plt.plot(x, pxw1, 'b-', lw=5, alpha=0.6, label='$p(x|\omega_1)$')
plt.plot(x, pxw2, 'r-', lw=5, alpha=0.6, label='$p(x|\omega_2)$')
plt.legend()
plt.fill_between(x, 0, pxw2, where=pxw1 > pxw2, facecolor='red', alpha=0.5)
plt.fill_between(x, 0, pxw1, where=pxw2 > pxw1, facecolor='blue', alpha=0.5)
plt.text(-2.9, .4, '$p_2 = \int_{\mathcal{R}_2}p(x|\omega_1)p(\omega_1)dx$', fontsize=15, color='b')
plt.text(-2.9, .55, '$p_1 = \int_{\mathcal{R}_1}p(x|\omega_2)p(\omega_2)dx$', fontsize=15, color='r')
plt.text(-2.9, .2, '$p_{err} = p_1+p_2$', fontsize=15)
ax.arrow(1.9, 0.5, 2.1, 0.05, head_width=0.05, head_length=0.05, fc='k', ec='k')
plt.xlabel('$x$')
plt.ylabel('conditional probability')
pxw1
data = np.genfromtxt('../data/optical_train.csv', delimiter=',')
X = data[:,0:-1]
y = data[:,-1]
data = np.genfromtxt('../data/optical_test.csv', delimiter=',')
Xt = data[:,0:-1]
yt = data[:,-1]
gnb = GaussianNB()
gnb.fit(X, y)
ypred = gnb.predict(Xt)
print "Error: ", 100*np.sum(1.*(y_pred != yt))/len(y_pred)
from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred = gnb.fit(iris.data, iris.target).predict(iris.data)
print("Number of mislabeled points out of a total %d points : %d"% (iris.data.shape[0],(iris.target != y_pred).sum()))
print y_pred, yt
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import norm
from sklearn.naive_bayes import GaussianNB
matplotlib.style.use('ggplot')
# generate a 2D gaussian with density
def gauss_pdf(mean, cov, x):
return (1./(((2*np.pi)**(1.*len(mean)/2))*np.linalg.det(cov)**.5))*np.exp(-np.matrix(x-mean)*np.matrix(np.linalg.inv(cov))*np.matrix(x-mean).T/2 ).tolist()[0][0]
N = 1000
p = []
mean = [1, 1]
cov = [[1, -.25], [-.25, 1]]
x = np.random.multivariate_normal(mean, cov, N)
for n in range(N):
p.append( gauss_pdf(mean, cov, x[n]) )
p = np.array(p)
# plot the 2D RVs with the 3rd dim being the pdf value
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x[:,0], x[:,1], p)
ax.legend()
plt.show()
def mahal(mean, cov, x):
return (-np.matrix(x-mean)*np.matrix(np.linalg.inv(cov))*np.matrix(x-mean).T/2 ).tolist()[0][0]
x = np.random.multivariate_normal([-1, -1], [[1, -.5], [-.5, 1]], 50000)
m = []
for n in range(N):
m.append( gauss_pdf(mean, cov, x[n]) )
x = x.T
d = plt.hist2d(x[0], x[1], bins = 75)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
x = np.linspace(-3, 5, 500)
pw1 = .6
pw2 = .4
pxw1 = norm.pdf(x, 3, .8)
pxw2 = norm.pdf(x, 1, .6)
px = pxw1*pw1+pxw2*pw2
pwx1 = pxw1*pw1/px
pwx2 = pxw2*pw2/px
plt.figure()
plt.plot(x, pxw1, 'b-', lw=5, alpha=0.6, label='$p(x|\omega_1)$')
plt.plot(x, pxw2, 'r-', lw=5, alpha=0.6, label='$p(x|\omega_2)$')
plt.plot(x, pwx1, 'b--', lw=5, alpha=0.6, label='$p(\omega_1|x)$')
plt.plot(x, pwx2, 'r--', lw=5, alpha=0.6, label='$p(\omega_2|x)$')
plt.legend()
plt.xlabel('$x$')
plt.ylabel('conditional probability')
plt.figure()
plt.plot(x, pxw1, 'b-', lw=5, alpha=0.6, label='$p(x|\omega_1)$')
plt.plot(x, pxw2, 'r-', lw=5, alpha=0.6, label='$p(x|\omega_2)$')
plt.legend()
plt.fill_between(x, 0, pxw2, where=pxw1 > pxw2, facecolor='red', alpha=0.5)
plt.fill_between(x, 0, pxw1, where=pxw2 > pxw1, facecolor='blue', alpha=0.5)
plt.text(-2.9, .4, '$p_2 = \int_{\mathcal{R}_2}p(x|\omega_1)p(\omega_1)dx$', fontsize=15, color='b')
plt.text(-2.9, .55, '$p_1 = \int_{\mathcal{R}_1}p(x|\omega_2)p(\omega_2)dx$', fontsize=15, color='r')
plt.text(-2.9, .2, '$p_{err} = p_1+p_2$', fontsize=15)
ax.arrow(1.9, 0.5, 2.1, 0.05, head_width=0.05, head_length=0.05, fc='k', ec='k')
plt.xlabel('$x$')
plt.ylabel('conditional probability')
pxw1
data = np.genfromtxt('../data/optical_train.csv', delimiter=',')
X = data[:,0:-1]
y = data[:,-1]
data = np.genfromtxt('../data/optical_test.csv', delimiter=',')
Xt = data[:,0:-1]
yt = data[:,-1]
gnb = GaussianNB()
gnb.fit(X, y)
ypred = gnb.predict(Xt)
print "Error: ", 100*np.sum(1.*(y_pred != yt))/len(y_pred)
from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred = gnb.fit(iris.data, iris.target).predict(iris.data)
print("Number of mislabeled points out of a total %d points : %d"% (iris.data.shape[0],(iris.target != y_pred).sum()))
print y_pred, yt
| 0.603932 | 0.850903 |
```
import os
import json
import requests
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors="replace"):
self.encoder = encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
"utf-8", errors=self.errors
)
return text
def get_encoder(bpe_path, encoder_path):
with open(encoder_path, encoding="utf-8") as f:
encoder = json.load(f)
with open(bpe_path, encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
text = requests.get("https://www.gutenberg.org/files/58884/58884-0.txt").text
enc = get_encoder(
bpe_path="/mnt/efs/wikipedia/gpt-2/vocab.bpe",
encoder_path="/mnt/efs/wikipedia/gpt-2/encoder.json",
)
encoded = enc.encode(text)
decoded = enc.decode(encoded)
print(decoded[:5000])
```
|
github_jupyter
|
import os
import json
import requests
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors="replace"):
self.encoder = encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
"utf-8", errors=self.errors
)
return text
def get_encoder(bpe_path, encoder_path):
with open(encoder_path, encoding="utf-8") as f:
encoder = json.load(f)
with open(bpe_path, encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
text = requests.get("https://www.gutenberg.org/files/58884/58884-0.txt").text
enc = get_encoder(
bpe_path="/mnt/efs/wikipedia/gpt-2/vocab.bpe",
encoder_path="/mnt/efs/wikipedia/gpt-2/encoder.json",
)
encoded = enc.encode(text)
decoded = enc.decode(encoded)
print(decoded[:5000])
| 0.4856 | 0.291932 |
RIHAD VARIAWA, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING
<h1>Clustering Algorithms</h1>
<h3>Unsupervised learning</h3>
In unsupervised learning, we do something slightly different.
We say, all right, here's our space of independent variables.
Now try and see the many features that we have.
And now try and see what values of these features
will make subsets of these cases closer together
by some estimate.
OK, how do they fit closer together?
<ul>
<li>The algorithm tries to group similar data together (clusters) using the values of the feature space.</li>
You can think of having a vast--
data points sitting all over the place.
And you want to find planes that cut through the space, that can
group these data points together in a similar way.
OK, they're called clusters.
And they use only value the feature space.
Of course, what we do is we, again,
work our way through a training and testing sample.
And in the training sample, we give it
the independent variables, and it groups them.
And then we want to measure how well that's
done by looking whether these groups map
onto something in the real world that makes sense to us, right.
So, for example, we could take physical characteristics
of people and say, can we differentiate
between these physical characteristics
and put them into two groups?
If we put them into two groups, maybe we
can figure out men versus women, something like that.
But we don't say that this case corresponds
to a man or this set of features corresponds to women.
We let the algorithm use the features to differentiate
between the two groups of people.
</ul>
<h3>K-Means Clusterng</h3>
A popular algorithm for doing clustering
is __K-means clustering__, what it does
is it partitions the dataspace into clusters.
And it minimizes the distance between the mean of a cluster
and the data points.
So every data point is sitting in n-dimensional space, where
__each dimension is a feature__.
And so you can measure the distance between one data
point and another data point.
And we want to find clusters where
the __mean distance between data points in each cluster
is minimal__.
So we want to minimize that distance.
And what you need to know in advance
is how many clusters you're going to have in your data
set, in your domain, right.
Like, if you're doing men versus women, you have two clusters.
You know that.
Tou can't say _find me the number of clusters in that case_.
You need to actually tell the K-means algorithm
how many clusters to use.
<ul>
<li>partitions the dataspace into clusters
<li>minimizes distance between the mean of a cluster and the data points
<li>the desired number of clusters must be known in advance
</ul>
<h2>Image recognition dataset</h2>
<ul>
<li>Digits 0-9 pixelated into 64 quadrants
<li>Each value represents the area that is shaded
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
digits = load_digits()
digits
type(digits)
for item in digits:
print(item)
len(digits.data)
digits.data[0]
digits.images[0]
len(digits.target)
digits.target[10]
digits.target_names
```
### Scale the data to normal distribution
[Standardization of datasets](http://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-scaler) is a common requirement for many machine learning estimators implemented in scikit-learn; they might behave badly if the individual features do not more or less look like standard normally distributed data: Gaussian with zero mean and unit variance.
In practice we often ignore the shape of the distribution and just transform the data to center it by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation.
For instance, many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the l1 and l2 regularizers of linear models) assume that all features are centered around zero and have variance in the same order. If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected.
The function scale provides a quick and easy way to perform this operation on a single array-like dataset:
```
data = scale(digits.data)
data
```
#### Scaled data has zero mean and unit variance:
```
data.mean(axis=0)
data.std(axis=0)
```
### Render the digit images and their associated values
```
def print_digits(images,y,max_n=16):
# set up the figure size in inches
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1,
hspace=.05, wspace=.5)
i = 0
while i < max_n and i < digits.images.shape[0]:
# plot the images in a matrix of 20x20
p = fig.add_subplot(20, 20, i + 1, xticks=[],yticks=[])
p.imshow(images[i], cmap=plt.cm.bone)
# label the image with the target value
p.text(0, 14, str(y[i]))
i = i + 1
print_digits(digits.images, digits.target, max_n=16)
```
### Training and testing samples
Learning the parameters of a prediction function and testing it on the same data is a methodological mistake: a model that would just repeat the labels of the samples that it has just seen would have a perfect score but would fail to predict anything useful on yet-unseen data. This situation is called overfitting. To avoid it, it is common practice when performing a (supervised) machine learning experiment to hold out part of the available data as a test set X_test, y_test. Note that the word “experiment” is not intended to denote academic use only, because even in commercial settings machine learning usually starts out experimentally.
In scikit-learn a random split into training and test sets can be quickly computed with the [train_test_split](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) helper function.
<img src='scexamp1.png'>
```
from sklearn.model_selection import train_test_split
X_train,
X_test,
y_train,
y_test,
images_train,
images_test = train_test_split(data,
digits.target,
digits.images,
test_size=0.25,
random_state=42)
n_samples,n_features = X_train.shape
n_digits = len(np.unique(y_train))
labels = y_train
len(np.unique(y_train))
```
### Create the model and fit the data
And the last thing we do, which we do always
is always the same thing, is we import the cluster algorithm--
the cluster library, a module, and create a k-means clustering
algorithm from that.
We tell it to initialize it by doing some initialization
stuff.
So what it-- k-means works better when you initialize it,
so we initialize it initially.
We run this pre-algorithm, you can think of,
to do the initialization.
And to start with a-- well, what k-means does
is it starts by randomly allocating the digits, right?
So it'll say, all right, I need 10 categories,
and that's the number of clusters, 10.
And it'll randomly assign the data to 10 categories.
But if you can sort of intelligently
start off and use some knowledge about the data,
like maybe the means inside the values, all that kind of stuff,
then you're better off.
So you can use this __k-means-plus-plus__ to actually start the algorithm at a better point.
And so that's the k-means.
The [__KMeans__](http://scikit-learn.org/stable/modules/clustering.html#k-means) algorithm clusters data by trying to separate samples in n groups of equal variance, minimizing a criterion known as the inertia or within-cluster sum-of-squares. This algorithm requires the number of clusters to be specified. It scales well to large number of samples and has been used across a large range of application areas in many different fields.
__The k-means algorithm divides a set of N samples X into K disjoint clusters C__, each described by the mean _mu-j_ of the samples in the cluster. The means are commonly called the cluster “centroids”; note that they are not, in general, points from X, although they live in the same space. The K-means algorithm aims to choose centroids that minimise the inertia, or within-cluster sum of squared criterion
```
from sklearn import cluster
clf = cluster.KMeans(init='k-means++',n_clusters=10,random_state=42)
```
k-means++ runs an initializer before using the k-means algorithm
```
clf.fit(X_train)
#images_train
```
## Call print_digits with training images, and computed labels
### Returned labels are cluster numbers
So we've written our model so our model now has predictions
on--
not predictions, but really it's assigned
cluster numbers, to groups of to each individual data points.
So we've told our model to make 10 clusters.
So it's assigned 10 cluster numbers 0, 1, 2, to 9,
to each individual data point.
And those numbers are in this CLF, as our model,
in this attribute called Labels_._
So what we're going to do now is we're
going to take a look at our training data set
and look at the first 20 cases, and see what cluster numbers
were assigned to each case.
And we'll look at the actual image itself.
```
print_digits(images_train,clf.labels_,max_n=20)
```
So what we want to do then is, we
want to use the test sample now to generate predictions.
So we've got our training sample.
We've trained our model to differentiate
among these things by looking at data points.
And now we give it the testing sample.
And it's going to use the feature values and the trained
clustering that we did before to predict
which cluster things belong to.
```
y_pred=clf.predict(X_test)
#y_pred
def print_cluster(images,y_pred,cluster_number):
img=[]
lbl=[]
for i in range(len(images)):
if y_pred[i]==cluster_number:
img.append(images[i])
lbl.append(y_pred[i])
# images=images[y_pred==cluster_number]
# y_pred = y_pred[y_pred==cluster_number]
print_digits(img,lbl,max_n=6)
for k in range(10):
print_cluster(images_test[5],y_pred,k)
```
<h1>Evaluating the model</h1>
So let's evaluate the model.
For evaluating the model, we use something called __Adjusted Rand
Index__.
It's a measure of the __similarity between two groups__.
So the goal here is to-- we have a group
of actual test data, that is the actual numbers that we have.
So we know that each case in our dataset,
we know what the number is.
We know whether it's a 1, or a 2, or a 3.
And we also have the predictions.
That's what our model is predicting.
And we want to see what's the similarity between these two
sets.
Are they reasonably similar?
We don't look at actual values.
We're seeing whether the differentiation is similar
or not.
So if it's 0.0, then there's no similarity at all.
And it's completely random, any overlap in that.
And 1.0 indicates that the two groups are identical.
<li>Adjusted rand index: A measure of the similarity between two groups</li>
<li>We'll use it to see how similar the y_test actuals and predicted groupings are</li>
<li>http://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_rand_score.html</li>
<li>0.0 indicates that there is no similarity and any overlap is explainable as totally random</li>
<li>1.0 indicates that the two groups are identical</li>
```
from sklearn import metrics
print("Adjusted rand score: {0:2}".
format(metrics.adjusted_rand_score(y_test, y_pred)))
```
<h2>Confusion matrix</h2>
So what this _confusion matrix_ is doing is,
we have a cluster number and actual prediction.
So what this does is, each row in this corresponds
to a number in the test sample.
So each role is--
so this is Row 0 in the test sample.
And each column is the cluster assigned to that case
by the model.
So what this is saying is that for our actual 0s, none of them
were put in Cluster 0.
None of them were put in Cluster 1, so none of them
into Cluster 0, none of them into Cluster 1.
43 were assigned to Cluster 2, and none of them
into any other clusters.
So if you're looking at 0s, we've
done a great job of identifying them.
If we get a Cluster 2, we are reasonably
confident that it's a 0.
Let me rephrase that.
If our model is saying it's 0, then it's
going to put it inside Cluster 2.
So that's a great number.
That's our testing sample.
It's not the training sample.
Can it tell us confidently the other way?
Well, almost, because if we find that we have Cluster 2, then
the only case in Cluster 2 that's not identified as 0,
is this one here, which is 0, 1, 2, 3, 4, 5 6.
In one case, it took a 6 that was actually a 6,
and identified it as a-- put it in Cluster 2, which is a 0.
So with 0s, we've done a very good job.
So given a 0, we can, with reasonable confidence--
and Cluster Number 2-- we can, with extreme confidence,
actually, say, hey, that's a 0.
<li>Each row corresponds to a number (y_test)
<li>Each column to y_pred (the cluster number)
<li>Data is the number of times y_test was assigned to the corresponding y_pred
<li>For example, 0 is fully assigned to cluster 2 (Row 0, Column 2)
<li>8 is assigned to cluster 0 21 times (Row 8, Column 0)
<li>7, which is cluster 6 is assigned to cluster 6 34 times (Row 7, Column 6)
```
print(metrics.confusion_matrix(y_test, y_pred))
```
Let's look at this one.
So that's-- let's look at this one here.
There's 0, 1, 2, 3.
Let's look at a 3.
So the 3-- if this is the row for 3s--
so 3s get assigned to Cluster 0 in one case, Cluster 3 in one
case, and so on, and so forth, but Cluster 8 in 39 cases.
So in 39 cases, we're getting a 3 in Cluster 8.
And in 4 plus 1, 5 plus 1, 6 plus 1, 7 cases,
we're getting a 3 in some other cluster.
So again, that means that we have a precision, essentially,
of up 39 divided by 39 plus whatever the denominator is--
4, 5, 6, 7.
However, if you look at the other way around,
and we look at the column-- that is Column Number 8 over here--
we find that in Column 8, in Cluster 8, 39 of them are 3s.
But one is a 2, 16 are 5s, 11 are 8, and 40 are 9s.
So if given that we get a cluster value of 8
for a figure, can we with confidence say it's a 3?
Not really, because we have so many cases where it
could be a 9 or it could be 8.
In fact, there are more 9s than there are 3s in that cluster.
So that's done a very poor job of identifying
the cluster correctly.
So this is what do you want to do with a confusion matrix.
You want to see how well it's actually figured
out the clusters themselves.
So you look at this stuff, and you can then
decide for each digit how well it's been doing,
maybe try to get more data or higher pixel rate,
higher resolution, before you actually work and say,
hey, maybe we get a better bet.
But we can see our 56% Rand score is coming from the fact
that we are doing well on some numbers.
We're doing well on 0.
We're probably doing well on 0, 1, 2, 3, 4.
Yep, we're doing well on 4, because 4,
we have 0, 1, 2, 3, 4, 50 cases that are in Cluster 1.
Here, 50 cases in Cluster 1, and only 2 plus 1, 3 plus 1,
4 cases that are--
5 cases, actually, that are in other clusters.
And even more important, Cluster 1
doesn't contain anything except 4s.
So if you get a Cluster 1, you can say, hey, that's a 4,
even though you may not recognize every 4 correctly.
So that's how you sort of analyze
the results of your clustering analysis
and looking at this confusion matrix.
## Graphical view of the clusters¶
<li>First reduce the x dimensions to 2 using principle component analysis PCA</li>
[PCA](http://scikit-learn.org/stable/modules/decomposition.html#pca) is used to decompose a multivariate dataset in a set of successive orthogonal components that explain a maximum amount of the variance. In scikit-learn, PCA is implemented as a transformer object that learns n components in its fit method, and can be used on new data to project it on these components.
The optional parameter whiten=True makes it possible to project the data onto the singular space while scaling each component to unit variance. This is often useful if the models down-stream make strong assumptions on the isotropy of the signal: this is for example the case for Support Vector Machines with the RBF kernel and the K-Means clustering algorithm.
<li>https://en.wikipedia.org/wiki/Principal_component_analysis</li>
<li>Then figure out the range of values and define the grid</li>
<li>Run k-means on the reduced (2 component) data set</li>
<li>Draw a color map and plot the pca points on this map</li>
<li>Find the cluster centroids and plot them on the color map</li>
```
from sklearn import decomposition
pca = decomposition.PCA(n_components=2).fit(X_train)
reduced_X_train = pca.transform(X_train)
# Step size of the mesh.
h = .01
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = reduced_X_train[:, 0].min() + 1, reduced_X_train[:, 0].max() - 1
y_min, y_max = reduced_X_train[:, 1].min() + 1, reduced_X_train[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
kmeans = cluster.KMeans(init='k-means++', n_clusters=n_digits,
n_init=10)
kmeans.fit(reduced_X_train)
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower')
plt.plot(reduced_X_train[:, 0], reduced_X_train[:, 1], 'k.',
markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],marker='.',
s=169, linewidths=3, color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA reduced data)\nCentroids are marked with white dots')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
digits = load_digits()
digits
type(digits)
for item in digits:
print(item)
len(digits.data)
digits.data[0]
digits.images[0]
len(digits.target)
digits.target[10]
digits.target_names
data = scale(digits.data)
data
data.mean(axis=0)
data.std(axis=0)
def print_digits(images,y,max_n=16):
# set up the figure size in inches
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1,
hspace=.05, wspace=.5)
i = 0
while i < max_n and i < digits.images.shape[0]:
# plot the images in a matrix of 20x20
p = fig.add_subplot(20, 20, i + 1, xticks=[],yticks=[])
p.imshow(images[i], cmap=plt.cm.bone)
# label the image with the target value
p.text(0, 14, str(y[i]))
i = i + 1
print_digits(digits.images, digits.target, max_n=16)
from sklearn.model_selection import train_test_split
X_train,
X_test,
y_train,
y_test,
images_train,
images_test = train_test_split(data,
digits.target,
digits.images,
test_size=0.25,
random_state=42)
n_samples,n_features = X_train.shape
n_digits = len(np.unique(y_train))
labels = y_train
len(np.unique(y_train))
from sklearn import cluster
clf = cluster.KMeans(init='k-means++',n_clusters=10,random_state=42)
clf.fit(X_train)
#images_train
print_digits(images_train,clf.labels_,max_n=20)
y_pred=clf.predict(X_test)
#y_pred
def print_cluster(images,y_pred,cluster_number):
img=[]
lbl=[]
for i in range(len(images)):
if y_pred[i]==cluster_number:
img.append(images[i])
lbl.append(y_pred[i])
# images=images[y_pred==cluster_number]
# y_pred = y_pred[y_pred==cluster_number]
print_digits(img,lbl,max_n=6)
for k in range(10):
print_cluster(images_test[5],y_pred,k)
from sklearn import metrics
print("Adjusted rand score: {0:2}".
format(metrics.adjusted_rand_score(y_test, y_pred)))
print(metrics.confusion_matrix(y_test, y_pred))
from sklearn import decomposition
pca = decomposition.PCA(n_components=2).fit(X_train)
reduced_X_train = pca.transform(X_train)
# Step size of the mesh.
h = .01
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = reduced_X_train[:, 0].min() + 1, reduced_X_train[:, 0].max() - 1
y_min, y_max = reduced_X_train[:, 1].min() + 1, reduced_X_train[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
kmeans = cluster.KMeans(init='k-means++', n_clusters=n_digits,
n_init=10)
kmeans.fit(reduced_X_train)
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower')
plt.plot(reduced_X_train[:, 0], reduced_X_train[:, 1], 'k.',
markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],marker='.',
s=169, linewidths=3, color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA reduced data)\nCentroids are marked with white dots')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| 0.645455 | 0.985286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.