text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
# <font color='firebrick'><center>Idx Stats Report</center></font>
### This report provides information from the output of samtools idxstats tool. It outputs the number of mapped reads per chromosome/contig.
<br>
```
from IPython.display import display, Markdown
from IPython.display import HTML
import IPython.core.display as di
import csv
import numpy as np
import zlib
import CGAT.IOTools as IOTools
import itertools as ITL
import os
import string
import pandas as pd
import sqlite3
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages # noqa: E402
#mpl.use('Agg') # noqa: E402
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
from matplotlib.colors import ListedColormap
from matplotlib import cm
from matplotlib import rc, font_manager
import CGAT.Experiment as E
import math
from random import shuffle
import matplotlib as mpl
import datetime
import seaborn as sns
import nbformat
%matplotlib inline
##################################################
#Plot customization
#plt.ioff()
plt.style.use('seaborn-white')
#plt.style.use('ggplot')
title_font = {'size':'20','color':'darkblue', 'weight':'bold', 'verticalalignment':'bottom'} # Bottom vertical alignment for more space
axis_font = {'size':'18', 'weight':'bold'}
#For summary page pdf
'''To add description page
plt.figure()
plt.axis('off')
plt.text(0.5,0.5,"my title",ha='center',va='center')
pdf.savefig()
'''
#Panda data frame cutomization
pd.options.display.width = 80
pd.set_option('display.max_colwidth', -1)
chr_feature=['total_reads','total_mapped_reads',
'chr1','chr2','chr3','chr4',
'chr5','chr6','chr7','chr8',
'chr9','chr10','chr11','chr12',
'chr13','chr14','chr15','chr16',
'chr17','chr18','chr19','chrX',
'chrY','chrM']
chr_index=['Total reads','Total mapped reads',
'chr1','chr2','chr3','chr4',
'chr5','chr6','chr7','chr8',
'chr9','chr10','chr11','chr12',
'chr13','chr14','chr15','chr16',
'chr17','chr18','chr19','chrX',
'chrY','chrM']
colors_category = ['red','green','darkorange','yellowgreen', 'pink', 'gold', 'lightskyblue',
'orchid','darkgoldenrod','skyblue','b', 'red',
'darkorange','grey','violet','magenta','cyan',
'hotpink','mediumslateblue']
threshold = 5
def hover(hover_color="#ffff99"):
return dict(selector="tr:hover",
props=[("background-color", "%s" % hover_color)])
def y_fmt(y, pos):
decades = [1e9, 1e6, 1e3, 1e0, 1e-3, 1e-6, 1e-9 ]
suffix = ["G", "M", "k", "" , "m" , "u", "n" ]
if y == 0:
return str(0)
for i, d in enumerate(decades):
if np.abs(y) >=d:
val = y/float(d)
signf = len(str(val).split(".")[1])
if signf == 0:
return '{val:d} {suffix}'.format(val=int(val), suffix=suffix[i])
else:
if signf == 1:
#print(val, signf)
if str(val).split(".")[1] == "0":
return '{val:d} {suffix}'.format(val=int(round(val)), suffix=suffix[i])
tx = "{"+"val:.{signf}f".format(signf = signf) +"} {suffix}"
return tx.format(val=val, suffix=suffix[i])
#return y
return y
def getTables(dbname):
'''
Retrieves the names of all tables in the database.
Groups tables into dictionaries by annotation
'''
dbh = sqlite3.connect(dbname)
c = dbh.cursor()
statement = "SELECT name FROM sqlite_master WHERE type='table'"
c.execute(statement)
tables = c.fetchall()
print(tables)
c.close()
dbh.close()
return
def readDBTable(dbname, tablename):
'''
Reads the specified table from the specified database.
Returns a list of tuples representing each row
'''
dbh = sqlite3.connect(dbname)
c = dbh.cursor()
statement = "SELECT * FROM %s" % tablename
c.execute(statement)
allresults = c.fetchall()
c.close()
dbh.close()
return allresults
def getDBColumnNames(dbname, tablename):
dbh = sqlite3.connect(dbname)
res = pd.read_sql('SELECT * FROM %s' % tablename, dbh)
dbh.close()
return res.columns
def plotBar(df,samplename):
fig, ax = plt.subplots()
ax.set_frame_on(True)
ax.xaxis.set_major_formatter(FuncFormatter(y_fmt))
colors=['yellowgreen','darkorange']
for ii in range(0,df.shape[0]):
plt.barh(ii,df['chrX'][ii],color=colors[0], align="center",height=0.6,edgecolor=colors[0])
plt.barh(ii,df['chrY'][ii],color=colors[1], align="center",height=0.6,edgecolor=colors[0])
fig = plt.gcf()
fig.set_size_inches(20,14)
plt.yticks(fontsize =20,weight='bold')
plt.yticks(range(df.shape[0]),df['track'])
plt.xticks(fontsize =20,weight='bold')
ax.grid(which='major', linestyle='-', linewidth='0.3')
plt.ylabel("Sample",labelpad=65,fontsize =25,weight='bold')
plt.xlabel("\nMapped reads",fontsize =25,weight='bold')
plt.title("Reads mapped to X and Y chromosome\n",fontsize =30,weight='bold',color='darkblue')
plt.gca().invert_yaxis()
legend_properties = {'weight':'bold','size':'20'}
leg = plt.legend(chr_feature[21:23],title="Contigs",prop=legend_properties,bbox_to_anchor=(1.14,0.65),frameon=True)
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(2)
leg.get_title().set_fontsize(25)
leg.get_title().set_fontweight('bold')
plt.tight_layout()
#plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6)
plt.show()
return fig
def displayTable(plotdf,name):
# Display table
styles = [
hover(),
dict(selector="th", props=[("font-size", "130%"),
("text-align", "center"),
]),
dict(selector="td", props=[("font-size", "120%"),
("text-align", "center"),
]),
dict(selector="caption", props=[("caption-side", "top"),
("text-align", "center"),
("font-size", "100%")])
]
df1 = (plotdf.style.set_table_styles(styles).set_caption(name))
display(df1)
print("\n\n")
def plot_idxstats(newdf,df,samplename):
fig,ax = plt.subplots()
ax.grid(which='major', linestyle='-', linewidth='0.25')
ax.yaxis.set_major_formatter(FuncFormatter(y_fmt))
index=list(range(newdf.shape[1]))
colors = plt.cm.plasma(np.linspace(0,1,newdf.shape[0]))
for ii in range(0,newdf.shape[0]):
plt.plot(index,newdf.iloc[ii],linewidth=2,color=colors[ii],linestyle="-",marker='o',fillstyle='full',markersize=8)
fig = plt.gcf()
fig.set_size_inches(11,8)
plt.xticks(index,chr_feature[2:24],fontsize = 14,weight='bold')
plt.yticks(fontsize = 14,weight='bold')
labels = ax.get_xticklabels()
plt.setp(labels, rotation=40)
legend_properties = {'weight':'bold','size':'14'}
leg = plt.legend(df['track'],title="Sample",prop=legend_properties,bbox_to_anchor=(1.42,1.01),frameon=True)
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(2)
leg.get_title().set_fontsize(16)
leg.get_title().set_fontweight('bold')
plt.xlabel('\nContigs',**axis_font)
plt.ylabel('Mapped Reads',**axis_font,labelpad=40)
plt.title("Mapped reads per contig", **title_font)
plt.tight_layout()
#plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6)
print("\n\n")
plt.show()
return fig
def idxStatsReport(dbname, tablename):
trans = pd.DataFrame(readDBTable(dbname,tablename))
trans.columns = getDBColumnNames(dbname,tablename)
df=trans
#print(df)
#newdf = df[df.columns[0:25]]
newdf = df[chr_feature[2:24]]
#print(newdf)
plotdf = df[chr_feature]
plotdf.columns = chr_index
plotdf.index = [df['track']]
#del plotdf.index.name
#pdf=PdfPages("idx_stats_summary.pdf")
displayTable(plotdf,"Idx Full Stats")
fig = plot_idxstats(newdf,df,"idx_full_stats")
#pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6)
print("\n\n\n")
fig = plotBar(df,"idxStats_X_Y_mapped_reads")
#pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6)
#pdf.close()
#getTables("csvdb")
idxStatsReport("../csvdb","idxstats_reads_per_chromosome")
```
|
github_jupyter
|
```
# Import lib
# ===========================================================
import csv
import pandas as pd
import numpy as np
import random
import time
import collections
import math
import sys
from tqdm import tqdm
from time import sleep
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('fivethirtyeight')
from datascience import *
from scipy import stats
import statsmodels.formula.api as smf
import statsmodels.api as sm
# from statsmodels.genmod.families.links import logit
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve
# Initialize useful data
# ===========================================================
df = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False)
df = df.fillna(value=0)
# resample to get a balanced dataset
df_zero = df.loc[df['CLASS'] == 0]
df_zero = df_zero.sample(n=1000)
df_one = df.loc[df['CLASS'] == 1]
df_one = df_one.sample(n=1000)
# concatenate and reallocate all data
df = pd.concat([df_zero, df_one])
df = df.sample(n = df.shape[0])
all_rows = df.values.tolist()
row_num = len(all_rows)
df.head()
# Divide whole dataset into Input and Output
# ===========================================================
# Features - all columns except 'CLASS'
# Target label - 'CLASS' column
X = df.drop('CLASS', axis=1)
y = df['CLASS']
# One hot encoding
X = pd.get_dummies(X, drop_first=True)
y = pd.get_dummies(y, drop_first=True)
# Train/Test split
train_X, test_X, train_y, test_y = train_test_split(X, y)
# Normalize using StandardScaler
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.transform(test_X)
# Train Model
# ===========================================================
model = LogisticRegression()
start = time.time()
model.fit(train_X, train_y)
pred_y = model.predict(test_X)
score = accuracy_score(test_y, pred_y)
end = time.time()
print("Logistic Regression Model Trained! Time: %.03fs" % (end - start))
# Compare Actual label and Predicted label
# ===========================================================
pred_score = model.predict_proba(test_X)
fpr, tpr, thresholds = roc_curve(test_y, pred_score[:,1])
final = Table().with_column('IDX', [i for i in range(len(pred_score))])
final = final.with_columns('ACT_CLASS', test_y.transpose().values.tolist()[0], 'PRE_CLASS', pred_score[:, 1])
final.show(5)
# Compute TN, TP, FN, FP, etc.
# ===========================================================
ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC'))
step_size = 0.05
for cutoff in np.arange(0, 1 + step_size, step_size):
temp_final = final.with_column('INDICATE', final.apply(lambda x, y: (int(x >= cutoff) << 1) + y, 'PRE_CLASS', 'ACT_CLASS'))
# 00(0) -> TN
# 01(1) -> FN
# 10(2) -> FP
# 11(3) -> TP
group = temp_final.group('INDICATE')
indicator = group.column(0)
counts = group.column(1)
# print(indicator, counts)
output = [cutoff]
idx = 0
for i in range(4):
# print(counts[idx])
if i in indicator:
output.append(counts[idx])
idx += 1
else:
output.append(0)
acc = (output[1] + output[4]) / sum(output[1:])
output.append(acc)
ROC = ROC.with_row(output)
ROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN'))
ROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP'))
ROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN'))
ROC.show()
# Acc Curve by cutoff
# ===========================================================
fig = plt.figure()
plt.xlabel('Cutoff')
plt.ylabel('Accuracy')
plt.title('Accuracy - Cutoff of Logistic Regression')
plt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black')
plt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange')
plt.axis([0, 1, 0, 1.1])
plt.show()
fig.savefig('Logistic ACC.png', bbox_inches='tight')
# ROC_CURVE
# ===========================================================
fig = plt.figure()
plt.xlabel('False Positive Rate')
plt.ylabel('Sensitivity')
plt.title('ROC - Curve of Logistic Regression')
plt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black')
plt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange')
plt.legend(['Null', 'Logistic'])
plt.axis([0, 1, 0, 1.1])
plt.show()
fig.savefig('Logistic ROC.png', bbox_inches='tight')
# Compute AUC
# ===========================================================
length = len(ROC.column('FPR'))
auc = 0
for i in range(length - 1):
auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1])
print("auc = %.03f" %auc)
acc, tpr, fpr = ROC.column('ACC'), ROC.column('SENSITIVITY'), ROC.column('FPR')
acc
tpr
fpr
```
|
github_jupyter
|
# NumPy å
¥é
æ¬ç« ã§ã¯ãPython ã§æ°å€èšç®ãé«éã«è¡ãããã®ã©ã€ãã©ãªïŒ[泚é1](#note1)ïŒã§ãã NumPy ã®äœ¿ãæ¹ãåŠã³ãŸãã
æ¬ç« ã®ç®æšã¯ã[åååž°åæãšéååž°åæ](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)ã®ç« ã§åŠãã éååž°åæãè¡ãã¢ã«ãŽãªãºã ã**NumPy ãçšããŠå®è£
ããããš**ã§ãã
NumPy ã«ãã**倿¬¡å
é
åïŒmultidimensional arrayïŒ**ã®æ±ãæ¹ãç¥ãããšã¯ãä»ã®æ§ã
ãªã©ã€ãã©ãªãå©çšããéã«åœ¹ç«ã¡ãŸãã
äŸãã°ãæ§ã
ãªæ©æ¢°åŠç¿ææ³ãçµ±äžçãªã€ã³ã¿ãŒãã§ãŒã¹ã§å©çšã§ãã **scikit-learn** ãããã¥ãŒã©ã«ãããã¯ãŒã¯ã®èšè¿°ã»åŠç¿ãè¡ãããã®ãã¬ãŒã ã¯ãŒã¯ã§ãã **Chainer** ã¯ãNumPy ã«æ
£ããŠããããšã§ãšãŠã䜿ãããããªããŸãã
ããã§ã¯ããŸã NumPy ã®åºç€çãªäœ¿ç𿹿³ã説æããŸãã
## NumPy ãäœ¿ãæºå
NumPy 㯠Google ColaboratoryïŒä»¥äž ColabïŒäžã®ããŒãããã¯ã«ã¯ããã©ã«ãã§ã€ã³ã¹ããŒã«ãããŠãããããããã§ã¯ã€ã³ã¹ããŒã«ã®æ¹æ³ã¯èª¬æããŸãããèªåã®ã³ã³ãã¥ãŒã¿ã« NumPy ãã€ã³ã¹ããŒã«ãããå Žåã¯ããã¡ããåç
§ããŠãã ãããïŒ[Installing packages](https://scipy.org/install.html)
Colab äžã§ã¯ã€ã³ã¹ããŒã«äœæ¥ã¯å¿
èŠãªããã®ã®ãããŒãããã¯ãéããæç¹ã§ã¯ãŸã `numpy` ã¢ãžã¥ãŒã«ãèªã¿èŸŒãŸããŠããŸããã
ã©ã€ãã©ãªã®æ©èœãå©çšããã«ã¯ããã®ã©ã€ãã©ãªãæäŸããã¢ãžã¥ãŒã«ãèªã¿èŸŒãå¿
èŠããããŸãã
äŸãã° `A` ãšããã¢ãžã¥ãŒã«ãèªã¿èŸŒã¿ãããšããäžçªã·ã³ãã«ãªèšè¿°æ¹æ³ã¯ `import A` ã§ãã
ãã ããã `A` ãšããã¢ãžã¥ãŒã«åãé·ãå Žåã¯ã`import A as B` ã®ããã«ããŠå¥åãä»ããããšãã§ããŸãã
`as` ã䜿ã£ãŠå¥åãäžãããããšã以éãã®ã¢ãžã¥ãŒã«ã¯ãã®å¥åãçšããŠå©çšããããšãã§ããŸãã
`import A as B` ãšæžããšã`A` ãšããã¢ãžã¥ãŒã«ã¯ `B` ãšããååã§å©çšããããšãã§ããŸãã
ãã㯠Python ã®æ©èœãªã®ã§ NumPy 以å€ã®ã¢ãžã¥ãŒã«ãèªã¿èŸŒã¿ããå Žåã«ã䜿çšå¯èœã§ãã
æ
£ç¿çã«ã`numpy` ã«ã¯ãã°ãã° `np` ãšããå¥åãäžããããŸãã
ã³ãŒãäžã§é »ç¹ã«äœ¿çšããã¢ãžã¥ãŒã«ã«ã¯ãçãå¥åãã€ããŠå®çŸ©ããããšãããè¡ãããŸãã
ããã§ã¯ã`numpy` ã `np` ãšããååã§ `import` ããŠã¿ãŸãããã
```
import numpy as np
```
## 倿¬¡å
é
åãå®çŸ©ãã
ãã¯ãã«ã»è¡åã»ãã³ãœã«ãªã©ã¯ãããã°ã©ãã³ã°äžã¯å€æ¬¡å
é
åã«ãã衚çŸã§ããNumPy ã§ã¯ ndarray ãšããã¯ã©ã¹ã§å€æ¬¡å
é
åã衚çŸããŸãïŒ[泚é2](#note2)ïŒãæ©éããããçšããŠãã¯ãã«ãå®çŸ©ããŠã¿ãŸãããã
```
# ãã¯ãã«ã®å®çŸ©
a = np.array([1, 2, 3])
a
```
ãã®ããã«ãPython ãªã¹ã `[1, 2, 3]` ã `np.array()` ã«æž¡ãããšã§ã$[1, 2, 3]$ ãšãããã¯ãã«ã衚ã ndarray ãªããžã§ã¯ããäœãããšãã§ããŸãã
ndarray ãªããžã§ã¯ã㯠`shape` ãšãã**屿§ ïŒattributeïŒ** ãæã£ãŠããããã®å€æ¬¡å
é
åã®**圢 ïŒshapeïŒ** ãä¿åãããŠããŸãã
äžã§å®çŸ©ãã `a` ãšãã ndarray ãªããžã§ã¯ãã®åœ¢ã調ã¹ãŠã¿ãŸãããã
```
a.shape
```
`(3,)` ãšããèŠçŽ æ°ã 1 ã® Python ã®ã¿ãã«ã衚瀺ãããŠããŸãã
ndarray ã®åœ¢ã¯ãèŠçŽ ãæŽæ°ã®ã¿ãã«ã§è¡šãããèŠçŽ æ°ã¯ãã®å€æ¬¡å
é
åã®**次å
æ° ïŒdimensionality, number of dimensionsïŒ** ã衚ããŸãã
圢ã¯ããã®å€æ¬¡å
é
åã®å次å
ã®å€§ãããé ã«äžŠã¹ãæŽæ°ã®ã¿ãã«ã«ãªã£ãŠããŸãã
次å
æ°ã¯ãndarray ã® `ndim` ãšãã屿§ã«ä¿åãããŠããŸãã
```
a.ndim
```
ããã¯ã`len(a.shape)` ãšåãå€ã«ãªããŸãã
ä»ã`a` ãšãã ndarray 㯠1 次å
é
åãªã®ã§ã`a.shape` ã¯èŠçŽ æ°ã 1 ã®ã¿ãã«ã§ã`ndim` ã®å€ã¯ 1 ã§ããïŒ[泚é3](#note3)ïŒã
ã§ã¯æ¬¡ã«ã$3 \times 3$ è¡åãå®çŸ©ããŠã¿ãŸãããã
```
# è¡åã®å®çŸ©
b = np.array(
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
)
b
```
åœ¢ãšæ¬¡å
æ°ã調ã¹ãŸãã
```
print('Shape:', b.shape)
print('Rank:', b.ndim)
```
ããã§ã`size` ãšãã屿§ãèŠãŠã¿ãŸãããã
```
b.size
```
ããã¯ã`b` ãšãã ndarray ãæã€èŠçŽ ã®æ°ã衚ããŠããŸãã
`b` 㯠$3 \times 3$ è¡åãªã®ã§ãèŠçŽ æ°ã¯ 9 ã§ãã
**ã圢ããæ¬¡å
æ°ãããµã€ãºããšããèšèãããããæå³ãããã®ã®éãã確èªããŠãã ããã**
NumPy ã® ndarray ã®äœææ¹æ³ã«ã¯ã`np.array()` ãçšã㊠Python ã®ãªã¹ããã倿¬¡å
é
åãäœãæ¹æ³ä»¥å€ã«ããè²ã
ãªæ¹æ³ããããŸãã
以äžã«ä»£è¡šçãªäŸãããã€ã玹ä»ããŸãã
```
# 圢ãæå®ããŠãèŠçŽ ãå
šãŠ 0 ã§åãããã ndarray ãäœã
a = np.zeros((3, 3))
a
# 圢ãæå®ããŠãèŠçŽ ãå
šãŠ 1 ã§åãããã ndarray ãäœã
b = np.ones((2, 3))
b
# 圢ãšå€ãæå®ããŠãèŠçŽ ãæå®ããå€ã§åãããã ndarray ãäœã
c = np.full((3, 2), 9)
c
# æå®ããã倧ããã®åäœè¡åã衚ã ndarray ãäœã
d = np.eye(5)
d
# 圢ãæå®ããŠã 0 ~ 1 ã®éã®ä¹±æ°ã§èŠçŽ ãåãã ndarray ãäœã
e = np.random.random((4, 5))
e
# 3 ããå§ãŸã 10 ã«ãªããŸã§ 1 ãã€å¢å ããæ°åãäœãïŒ10 ã¯å«ãŸãªãïŒ
f = np.arange(3, 10, 1)
f
```
## 倿¬¡å
é
åã®èŠçŽ ãéžæãã
åç¯ã§ã¯ NumPy ã䜿ã£ãŠå€æ¬¡å
é
åãå®çŸ©ããããã€ãã®æ¹æ³ã玹ä»ããŸããã
æ¬ç¯ã§ã¯ãäœæãã ndarray ã®ãã¡ã®ç¹å®ã®èŠçŽ ãéžæããŠãå€ãåãåºãæ¹æ³ã玹ä»ããŸãã
æãããè¡ãããæ¹æ³ã¯ `[]` ã䜿ã£ã**æ·»åè¡šèš ïŒsubscriptionïŒ** ã«ããèŠçŽ ã®éžæã§ãã
### æŽæ°ã«ããèŠçŽ ã®éžæ
äŸãã°ãäžã§äœæãã `e` ãšãã $4 \times 5$ è¡åã衚ã倿¬¡å
é
åããã1 è¡ 2 åç®ã®å€ãåãåºãã«ã¯ã以äžã®ããã«ããŸãã
```
val = e[0, 1]
val
```
ã1 è¡ 2 åç®ããæå®ããã®ã«ãã€ã³ããã¯ã¹ã¯ `[0, 1]` ã§ããã
ããã¯ãNumPy ã® ndarray ã®èŠçŽ ã¯ Python ãªã¹ããšåãããæ·»åã 0 ããå§ãŸã**ãŒãããŒã¹ã€ã³ããã¯ã¹ ïŒzero-based indexïŒ** ãæ¡çšãããŠããããã§ãã
ã€ãŸãããã®è¡åã® i è¡ j åç®ã®å€ã¯ã`[i - 1, j - 1]` ã§åãåºãããšãã§ããŸãã
### ã¹ã©ã€ã¹ã«ããèŠçŽ ã®éžæ
NumPy ã® ndarray ã«å¯ŸããŠããPython ã®ãªã¹ããšåæ§ã«**ã¹ã©ã€ã¹è¡šèš ïŒslicingïŒ** ãçšããŠéžæãããèŠçŽ ãç¯å²æå®ããããšãã§ããŸãã
ndarray ã¯ããã«ãã«ã³ãåºåãã§è€æ°ã®æ¬¡å
ã«å¯Ÿããã¹ã©ã€ã¹ãæå®ã§ããŸãã
```
# 4 x 5 è¡å e ã®çãäžã® 2 x 3 = 6 åã®å€ãåãåºã
center = e[1:3, 1:4]
center
```
åç¯æåŸã«ãã `e` ã®åºåãèŠè¿ããšãã¡ããã©çãäžã®éšåã® $2 \times 3$ åã®æ°åãåãåºããŠããããšãåãããŸãã
ããã§ã`e` ã®äžãã `[1, 1]` ã®èŠçŽ ãèµ·ç¹ãšã㊠2 è¡ 3 åãåãåºããŠäœããã `center` ã®åœ¢ãã`e` ã®åœ¢ãšæ¯èŒããŠã¿ãŸãããã
```
print('Shape of e:', e.shape)
print('Shape of center:', center.shape)
```
ãŸããã€ã³ããã¯ã¹ãæå®ããããã¹ã©ã€ã¹ãçšããŠåãåºãã ndarray ã®äžéšã«å¯Ÿããå€ã代å
¥ããããšãã§ããŸãã
```
# å
çšã®çãäžã® 6 åã®å€ã 0 ã«ãã
e[1:3, 1:4] = 0
e
```
### æŽæ°é
åã«ããèŠçŽ ã®éžæ
ndarray ã® `[]` ã«ã¯ãæŽæ°ãã¹ã©ã€ã¹ã®ä»ã«ãæŽæ°é
åãæž¡ãããšãã§ããŸãã
æŽæ°é
åãšã¯ãããã§ã¯æŽæ°ãèŠçŽ ãšãã Python ãªã¹ããŸã㯠ndarray ã®ããšãæããŠããŸãã
å
·äœäŸã瀺ããŸãã
ãŸãã$3 \times 3$ è¡åã衚ã `a` ãšãã ndarray ãå®çŸ©ããŸãã
```
a = np.array(
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
)
a
```
ãã® ndarray ããã
1. 1 è¡ 2 åç®ïŒ`a[0, 1]`
2. 3 è¡ 2 åç®ïŒ`a[2, 1]`
3. 2 è¡ 1 åç®ïŒ`a[1, 0]`
ã® 3 ã€ã®èŠçŽ ãéžæããŠäžŠã¹ã圢ã `(3,)` ã§ãããã㪠ndarray ãäœããããšããŸãã
ããã¯ã以äžã®ããã«ãé ã«å¯Ÿè±¡ã®èŠçŽ ãæå®ããŠäžŠã¹ãŠæ°ãã ndarray ã«ããããšã§ããã¡ããå®çŸã§ããŸãã
```
np.array([a[0, 1], a[2, 1], a[1, 0]])
```
ããããåãããšã**éžæãããè¡ãéžæãããåããé ã«ãããããªã¹ããšããŠäžãã**ããšã§ãè¡ããŸãã
```
a[[0, 2, 1], [1, 1, 0]]
```
**éžæããã 3 ã€ã®å€ãã©ã®è¡ã«ããã**ã ãã«çç®ãããšããããã 1 è¡ç®ã3 è¡ç®ã2 è¡ç®ã«ããèŠçŽ ã§ãã
ãŒãããŒã¹ã€ã³ããã¯ã¹ã§ã¯ããããã 0, 2, 1 è¡ç®ã§ãã
ããã `a` ã® `[]` ã«äžãããã 1 ã€ç®ã®ãªã¹ã `[0, 2, 1]` ã®æå³ã§ãã
åæ§ã«ã**åã«çç®**ãããšããŒãããŒã¹ã€ã³ããã¯ã¹ã§ãããã 1, 1, 0 åç®ã®èŠçŽ ã§ãã
ããã `a` ã® `[]` ã«äžãããã 2 ã€ç®ã®ãªã¹ã `[1, 1, 0]` ã®æå³ã§ãã
## ndarray ã®ããŒã¿å
1 ã€ã® ndarray ã®èŠçŽ ã¯ãå
šãŠåãåãæã¡ãŸãã
NumPy ã§ã¯æ§ã
ãªããŒã¿åã䜿ãããšãã§ããŸãããããã§ã¯äžéšã ãã玹ä»ããŸãã
NumPy 㯠Python ãªã¹ããæž¡ã㊠ndarray ãäœãéãªã©ã«ã¯ããã®å€ããããŒã¿åãæšæž¬ããŸãã
ndarray ã®ããŒã¿åã¯ã`dtype` ãšãã屿§ã«ä¿åãããŠããŸãã
```
# æŽæ°ïŒPython ã® int åïŒã®èŠçŽ ããã€ãªã¹ããäžããå Žå
x = np.array([1, 2, 3])
x.dtype
# æµ®åå°æ°ç¹æ°ïŒPython ã® float åïŒã®èŠçŽ ããã€ãªã¹ããäžããå Žå
x = np.array([1., 2., 3.])
x.dtype
```
以äžã®ããã«ã**Python ã® int åã¯èªåçã« NumPy ã® int64 å**ã«ãªããŸããã
ãŸãã**Python ã® float åã¯èªåçã« NumPy ã® float64 å**ã«ãªããŸããã
Python ã® int å㯠NumPy ã® int_ åã«å¯Ÿå¿ã¥ããããŠãããPython ã® float å㯠NumPy ã® float_ åã«å¯Ÿå¿ã¥ããããŠããŸãã
ãã® int_ åã¯ãã©ãããã©ãŒã ã«ãã£ãŠ int64 åãšåãå Žåãš int32 åãšåãå ŽåããããŸãã
float_ åã«ã€ããŠãåæ§ã§ããã©ãããã©ãŒã ã«ãã£ãŠ float64 åãšåãå Žåãš float32 åãšåãå ŽåããããŸãã
ç¹å®ã®åãæå®ã㊠ndarray ãäœæããã«ã¯ã以äžã®ããã«ããŸãã
```
x = np.array([1, 2, 3], dtype=np.float32)
x.dtype
```
ãã®ããã«ã`dtype` ãšããåŒæ°ã« NumPy ã® dtype ãªããžã§ã¯ããæž¡ããŸãã
ãã㯠32 ãããæµ®åå°æ°ç¹æ°åãæå®ããäŸã§ãã
åãããšããæååã§æå®ããããšã«ãã£ãŠãè¡ããŸãã
```
x = np.array([1, 2, 3], dtype='float32')
x.dtype
```
ããã¯ããã«ã以äžã®ããã«çãæžãããšãã§ããŸãã
```
x = np.array([1, 2, 3], dtype='f')
x.dtype
```
äžåºŠããããŒã¿åã§å®çŸ©ããé
åã®ããŒã¿åãå¥ã®ãã®ã«å€æŽããã«ã¯ã`astype` ãçšããŠå€æãè¡ããŸãã
```
x = x.astype(np.float64)
x.dtype
```
## 倿¬¡å
é
åãçšããèšç®
ndarray ã䜿ã£ãŠè¡åããã¯ãã«ãå®çŸ©ããŠãããããçšããŠããã€ãã®èšç®ãè¡ã£ãŠã¿ãŸãããã
ndarray ãšããŠå®çŸ©ããããã¯ãã«ãè¡åå士ã®**èŠçŽ ããšã®å æžä¹é€**ã¯ãPython ã®æ°å€å士ã®ååæŒç®ã«çšãããã `+`ã`-`ã`*`ã`/` ãšããèšå·ã䜿ã£ãŠè¡ããŸãã
ããã§ã¯ãåã圢ã®è¡åã 2 ã€å®çŸ©ãããããã®**èŠçŽ ããšã®**å æžä¹é€ãå®è¡ããŠã¿ãŸãããã
```
# åã圢 (3 x 3) ã®è¡åã 2 ã€å®çŸ©ãã
a = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
])
b = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
# è¶³ãç®
c = a + b
c
# åŒãç®
c = a - b
c
# æãç®
c = a * b
c
# å²ãç®
c = a / b
c
```
NumPy ã§ã¯ãäžãããã倿¬¡å
é
åã«å¯ŸããŠèŠçŽ ããšã«èšç®ãè¡ã颿°ãè²ã
ãšçšæãããŠããŸãã
以äžã«ããã€ãã®äŸã瀺ããŸãã
```
# èŠçŽ ããšã«å¹³æ¹æ ¹ãèšç®ãã
c = np.sqrt(b)
c
# èŠçŽ ããšã«å€ã n ä¹ãã
n = 2
c = np.power(b, n)
c
```
èŠçŽ ããšã«å€ã n ä¹ããèšç®ã¯ã以äžã®ããã«ããŠãæžãããšãã§ããŸãã
```
c ** n
```
ã¯ããã«ç޹ä»ããååæŒç®ã¯ã**åã倧ããã®** 2 ã€ã®è¡åå士ã§è¡ã£ãŠããŸããã
ããã§ã$3 \times 3$ è¡å `a` ãš 3 次å
ãã¯ãã« `b` ãšãã倧ããã®ããšãªãé
åãå®çŸ©ããŠãããããè¶³ããŠã¿ãŸãããã
```
a = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
])
b = np.array([1, 2, 3])
c = a + b
c
```
圢ãåãè¡åå士ã®å Žåãšåæ§ã«èšç®ããããšãã§ããŸããã
ãã㯠NumPy ãèªåçã«**ãããŒããã£ã¹ãïŒbroadcastïŒ**ãšåŒã°ããæäœãè¡ã£ãŠããããã§ãã
ããã«ã€ããŠæ¬¡ç¯ã§èª¬æããŸãã
## ãããŒããã£ã¹ã
è¡åå士ã®èŠçŽ ããšã®ååæŒç®ã¯ãéåžžã¯è¡åã®åœ¢ãåãã§ãªããã°å®çŸ©ã§ããŸããã
ããããåç¯ã®æåŸã§ã¯ $3 \times 3$ è¡åã« 3 次å
ãã¯ãã«ãè¶³ãèšç®ãå®è¡ã§ããŸããã
ãããèŠçŽ ããšã®èšç®ãšåãããã«å®è¡ã§ããçç±ã¯ãNumPy ãèªåçã« 3 次å
ãã¯ãã« `b` ã 3 ã€äžŠã¹ãŠã§ãã $3 \times 3$ è¡åãæ³å®ãã`a` ãšåãåœ¢ã«æããæäœãæé»ã«è¡ã£ãŠããããã§ãã
ãã®æäœãã**ãããŒããã£ã¹ã**ãšåŒã³ãŸãã
ç®è¡æŒç®ãç°ãªã圢ã®é
åå士ã§è¡ãå ŽåãNumPy ã¯èªåçã«å°ããæ¹ã®é
åã**ãããŒããã£ã¹ã**ãã倧ããæ¹ã®é
åãšåœ¢ãåãããŸãã
ãã ãããã®èªåçã«è¡ããããããŒããã£ã¹ãã§ã¯ãè¡ãããç®è¡æŒç®ãã倧ããæ¹ã®é
åã®äžéšã«å¯ŸããŠ**ç¹°ãè¿ãè¡ããã**ããšã§å®çŸããããããå®éã«å°ããæ¹ã®é
åã®ããŒã¿ãã³ããŒããŠå€§ããé
åãã¡ã¢ãªäžã«äœæããããšã¯å¯èœãªéãé¿ããããŸãã
ãŸãããã®ç¹°ãè¿ãã®èšç®ã¯ NumPy ã®å
éšã® C èšèªã«ãã£ãŠå®è£
ãããã«ãŒãã§è¡ããããããé«éã§ãã
ããã·ã³ãã«ãªäŸã§èããŠã¿ãŸãããã
以äžã®ãããªé
å `a` ãããããã®å
šãŠã®èŠçŽ ã 2 åã«ããããšããŸãã
```
a = np.array([1, 2, 3])
a
```
ãã®ãšããäžã€ã®æ¹æ³ã¯ä»¥äžã®ããã«åã圢ã§èŠçŽ ãå
šãŠ 2 ã§ããå¥ã®é
åãå®çŸ©ãããããšèŠçŽ ããšã®ç©ãèšç®ããããæ¹ã§ãã
```
b = np.array([2, 2, 2])
c = a * b
c
```
ããããã¹ã«ã©ã® 2 ããã `a` ã«æããã ãã§ãåãçµæãåŸãããŸãã
```
c = a * 2
c
```
`* 2` ãšããèšç®ãã`c` ã® 3 ã€ã®èŠçŽ ã®**ã©ã®èŠçŽ ã«å¯Ÿããèšç®ãªã®ã**ãæç€ºãããŠããªããããNumPy ã¯ããã**å
šãŠã®èŠçŽ ã«å¯ŸããŠè¡ããšããæå³**ã ãšè§£éããŠãã¹ã«ã©ã® 2 ã `a` ã®èŠçŽ æ° 3 ã ãåŒã䌞ã°ããŠããæããŠãããŸãã
**圢ã®ç°ãªãé
åå士ã®èšç®ããããŒããã£ã¹ãã«ãã£ãŠå¯èœã«ãªãããã«ã¯ã«ãŒã«ããããŸãã**
ããã¯ã**ã2 ã€ã®é
åã®å次å
ãåã倧ããã«ãªã£ãŠããããã©ã¡ããã 1 ã§ããããšã**ã§ãã
ãã®ã«ãŒã«ãæºãããªãå ŽåãNumPy 㯠"ValueError: operands could not be broadcast together with shapes (1 ã€ç®ã®é
åã®åœ¢) (2 ã€ç®ã®é
åã®åœ¢)" ãšãããšã©ãŒãåºããŸãã
ãããŒããã£ã¹ããããé
åã®å次å
ã®ãµã€ãºïŒ[泚é4](#note4)ïŒã¯ãå
¥åãããé
åã®ãã®æ¬¡å
ã®ãµã€ãºã®äžã§æå€§ã®å€ãšåãã«ãªã£ãŠããŸãã
å
¥åãããé
åã¯ã忬¡å
ã®ãµã€ãºãå
¥åã®ãã¡å€§ããæ¹ã®ãµã€ãºãšåãã«ãªããããããŒããã£ã¹ãããããã®æ¡åŒµããããµã€ãºã§èšç®ãããŸãã
ããå°ãå
·äœäŸãèŠãŠã¿ãŸãããã
以äžã®ãã㪠2 ã€ã®é
å `a` ãš `b` ãå®çŸ©ããè¶³ããŸãã
```
# 0 ~ 9 ã®ç¯å²ã®å€ãã©ã³ãã ã«çšããŠåãããã (2, 1, 3) ãš (3, 1) ãšãã倧ããã®é
åãäœã
a = np.random.randint(0, 10, (2, 1, 3))
b = np.random.randint(0, 10, (3, 1))
print('a:\n', a)
print('\na.shape:', a.shape)
print('\nb:\n', b)
print('\nb.shape:', b.shape)
# å ç®
c = a + b
print('\na + b:\n', c)
print('\n(a + b).shape:', c.shape)
```
`a` ã®åœ¢ã¯ `(2, 1, 3)` ã§ã`b` ã®åœ¢ã¯ `(3, 1)` ã§ããã
ãã® 2 ã€ã®é
åã®**æ«å°Ÿæ¬¡å
(trailing dimension)**ïŒ[泚é5](#note5)ïŒ ã¯ãããã 3 ãš 1 ãªã®ã§ãã«ãŒã«ã«ãã£ããæ¬¡å
ãåããµã€ãºã§ããããã©ã¡ããã 1 ã§ããããšããæºãããŠããŸãã
次ã«ãåé
åã®ç¬¬ 2 次å
ã«æ³šç®ããŠã¿ãŸãããã
ãããã 1 ãš 3 ã§ãã
ãããã«ãŒã«ãæºãããŠããŸãã
ããã§ã`a` 㯠3 次å
é
åã§ããã`b` 㯠2 次å
é
åã§ãã
ã€ãŸããæ¬¡å
æ°ãç°ãªã£ãŠããŸãã
ãã®ãããªå Žåã¯ã`b` ã¯**äžçªäžã®æ¬¡å
ã«ãµã€ãºã 1 ã®æ¬¡å
ã远å ããã圢** `(1, 3, 1)` ãšããŠæ±ãããŸãã
ãã㊠2 ã€ã®é
åã®å次å
ããšã®ãµã€ãºã®æå€§å€ããšã£ã圢 `(2, 3, 3)` ã«ãããŒããã£ã¹ããããè¶³ãç®ãè¡ãããŸãã
ãã®ããã«ããã 2 ã€ã®é
åã®ã©ã³ã¯ãç°ãªãå Žåã¯ã次å
æ°ãå°ããæ¹ã®é
åã倧ããæ¹ãšåãæ¬¡å
æ°ã«ãªããŸã§ãã®åœ¢ã®å
é ã«æ°ããªæ¬¡å
ã远å ãããŸãã
ãµã€ãºã 1 ã®æ¬¡å
ãããã€è¿œå ãããŠããèŠçŽ ã®æ°ã¯å€ãããªãããšã«æ³šæããŠãã ããã
èŠçŽ æ°ïŒ`size` 屿§ã§ååŸã§ããå€ïŒã¯ã忬¡å
ã®ãµã€ãºã®æãç®ã«ãªãã®ã§ã1 ãäœåºŠãããŠãå€ã¯å€ãããªãããšããããããæãç«ã€ããšãåãããŸãã
NumPy ããããŒããã£ã¹ãã®ããã«èªåçã«è¡ãæ°ããæ¬¡å
ã®æ¿å
¥ã¯ã`[]` ã䜿ã£ã以äžã®è¡šãªè¡šèšãçšããããšã§**æåã§è¡ãããšãã§ããŸãã**
```
print('Original shape:', b.shape)
b_expanded = b[np.newaxis, :, :]
print('Added new axis to the top:', b_expanded.shape)
b_expanded2 = b[:, np.newaxis, :]
print('Added new axis to the middle:', b_expanded2.shape)
```
`np.newaxis` ãæå®ãããäœçœ®ã«ãæ°ããæ¬¡å
ãæ¿å
¥ãããŸãã
é
åãæã€æ°å€ã®æ°ã¯å€ãã£ãŠããŸããã
ãã®ãããæ¿å
¥ãããæ¬¡å
ã®ãµã€ãºã¯å¿
ã 1 ã«ãªããŸãã
```
b
b_expanded
b_expanded2
```
NumPy ã®ãããŒããã£ã¹ãã¯æ
£ãããŸã§çŽæã«åããããã«æããå ŽåããããããããŸããã
ãããã䜿ãããªããšåãèšç®ã Python ã®ã«ãŒãã䜿ã£ãŠè¡ããããé«éã«è¡ããããããããŒããã£ã¹ããçè§£ããããšã¯éåžžã«éèŠã§ãã
äžã€å
·äœäŸãèŠãŠã¿ãŸãã
$5 \times 5$ è¡å `a` ã«ã3 次å
ãã¯ãã« `b` ãè¶³ããŸãã
ãŸãã`a`ã`b` ããã³çµæãæ ŒçŽããé
å `c` ãå®çŸ©ããŸãã
```
a = np.array([
[0, 1, 2, 1, 0],
[3, 4, 5, 4, 3],
[6, 7, 8, 7, 6],
[3, 4, 5, 4, 4],
[0, 1, 2, 1, 0]
])
b = np.array([1, 2, 3, 4, 5])
# çµæãæ ŒçŽããé
åãå
ã«äœã
c = np.empty((5, 5))
```
`%%timeit` ãšãã Jupyter Notebook ã§äœ¿çšã§ãããã®ã»ã«ã®å®è¡æéãèšæž¬ããããã®ããžãã¯ã䜿ã£ãŠã`a` ã®åè¡ïŒ1 次å
ç®ïŒã« `b` ã®å€ãè¶³ããŠããèšç®ã Python ã®ã«ãŒãã䜿ã£ãŠ 1 è¡ãã€åŠçããŠããã³ãŒãã®å®è¡æéãæž¬ã£ãŠã¿ãŸãã
```
%%timeit
for i in range(a.shape[0]):
c[i, :] = a[i, :] + b
c
```
次ã«ãNumPy ã®ãããŒããã£ã¹ããæŽ»çšããæ¹æ³ã§åãèšç®ãè¡ã£ãŠã¿ãŸãã
```
%%timeit
c = a + b
c
```
èšç®çµæã¯åœç¶åãã«ãªããŸãã
ããããå®è¡æéãæ°åçããªã£ãŠããŸãã
ãã®ããã«ããããŒããã£ã¹ããçè§£ããŠæŽ»çšããããšã§ãèšè¿°ãç°¡åã«ãªãã ãã§ãªããå®è¡é床ãšããç¹ã«ãããŠãæå©ã«ãªããŸãã
## è¡åç©
è¡åã®èŠçŽ ããšã®ç©ã¯ `*` ãçšããŠèšç®ã§ããŸããã
äžæ¹ãéåžžã®è¡åå士ã®ç©ïŒè¡åç©ïŒã®èšç®ã¯ã`*` ã§ã¯ãªããå¥ã®æ¹æ³ã§è¡ããŸãã
æ¹æ³ã¯ 2 çš®é¡ãããŸãã
1ã€ã¯ã`np.dot()` 颿°ãçšããæ¹æ³ã§ãã
`np.dot()` 㯠2 ã€ã®åŒæ°ããšãããããã®è¡åç©ãèšç®ããŠè¿ã颿°ã§ãã
ä»ã`A` ãšããè¡åãš `B` ãšããè¡åããããè¡åç© `AB` ãèšç®ããããšããŸãã
ãã㯠`np.dot(A, B)` ãšæžãããšã§èšç®ã§ããŸãã
ãã `BA` ãèšç®ãããå Žåã¯ã`np.dot(B, A)` ãšæžããŸãã
ãã 1 ã€ã¯ãndarray ãªããžã§ã¯ããæã€ `dot()` ã¡ãœãããäœ¿ãæ¹æ³ã§ãã
ãããçšãããšãåãèšç®ã `A.dot(B)` ãšæžãããšã«ãã£ãŠè¡ããŸãã
```
# è¡å A ã®å®çŸ©
A = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
])
# è¡å B ã®å®çŸ©
B = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
```
å®éã«ãã® $3 \times 3$ ã® 2 ã€ã®è¡åã®è¡åç©ãèšç®ããŠã¿ãŸãããã
```
# è¡åç©ã®èšç® (1)
C = np.dot(A, B)
C
```
åãèšç®ãããäžã€ã®èšè¿°æ¹æ³ã§è¡ã£ãŠã¿ãŸãã
```
C = A.dot(B)
C
# ããŒã¿åã®ç¢ºèªïŒæŽæ°å€ïŒ
a.dtype
```
## åºæ¬çãªçµ±èšéã®æ±ãæ¹
æ¬ç¯ã§ã¯ã倿¬¡å
é
åã«å«ãŸããå€ã®å¹³åã»åæ£ã»æšæºåå·®ã»æå€§å€ã»æå°å€ãšãã£ãçµ±èšå€ãèšç®ããæ¹æ³ã玹ä»ããŸãã
$8 \times 10$ ã®è¡åãäœæãããã®äžã«å«ãŸããå€å
šäœã«æž¡ããããã®çµ±èšå€ãèšç®ããŠã¿ãŸãããã
```
x = np.random.randint(0, 10, (8, 10))
x
# å¹³åå€
x.mean()
# 忣
x.var()
# æšæºåå·®
x.std()
# æå€§å€
x.max()
# æå°å€
x.min()
```
ããã§ã`x` 㯠2 次å
é
åãªã®ã§ã忬¡å
ã«æ²¿ã£ããããã®çµ±èšå€ã®èšç®ãè¡ããŸãã
äŸãã°ãæåŸã®æ¬¡å
å
ã ãã§å¹³åããšããšã8 åã®å¹³åå€ãåŸãããã¯ãã§ãã
å¹³åãèšç®ããã軞ïŒäœæ¬¡å
ç®ã«æ²¿ã£ãŠèšç®ãããïŒã `axis` ãšããåŒæ°ã«æå®ããŸãã
```
x.mean(axis=1)
```
ããã¯ã以äžã®ããã« 1 次å
ç®ã®å€ã®å¹³åãèšç®ããŠãã£ããã®ã䞊ã¹ãŠããã®ãšåãããšã§ãã
ïŒãŒãããŒã¹ã€ã³ããã¯ã¹ã§èããŠããŸãã`x` ã®åœ¢ã¯ `(8, 10)` ãªã®ã§ã0 次å
ç®ã®ãµã€ãºã 8ã1 次å
ç®ã®ãµã€ãºã 10 ã§ããïŒ
```
np.array([
x[0, :].mean(),
x[1, :].mean(),
x[2, :].mean(),
x[3, :].mean(),
x[4, :].mean(),
x[5, :].mean(),
x[6, :].mean(),
x[7, :].mean(),
])
```
## NumPy ãçšããéååž°åæ
[åååž°åæãšéååž°åæ](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)ã®ç« ã§èª¬æããéååž°åæã NumPy ãçšããŠè¡ããŸãããã
4 ã€ã®ããŒã¿ããŸãšããã以äžã®ãããªãã¶ã€ã³è¡åãäžãããããšããŸãã
```
# Xã®å®çŸ©
X = np.array([
[2, 3],
[2, 5],
[3, 4],
[5, 9],
])
X
```
4 ç« ã®è§£èª¬ãšåæ§ã«ãåçãéã¿ãã¯ãã«ã«å«ããŠæ±ãããããã¶ã€ã³è¡åã® 0 åç®ã« 1 ãšããå€ãä»ãå ããŸãã
```
# ããŒã¿æ°ïŒX.shape[0]) ãšåãæ°ã ã 1 ã䞊ãã é
å
ones = np.ones((X.shape[0], 1))
# concatenate ã䜿ãã1 次å
ç®ã« 1 ãä»ãå ãã
X = np.concatenate((ones, X), axis=1)
# å
é ã« 1 ãä»ãå ãã£ããã¶ã€ã³è¡å
X
```
ãŸããç®æšå€ã以äžã§äžãããããšããŸãã
```
# t ã®å®çŸ©
t = np.array([1, 5, 6, 8])
t
```
éååž°åæã¯ãæ£èŠæ¹çšåŒãè§£ãããšã§æé©ãª 1 次æ¹çšåŒã®éã¿ã決å®ããããšãã§ããŸããã
æ£èŠæ¹çšåŒã®è§£ã¯ä»¥äžã®ãããªãã®ã§ããã
$$
{\bf w} = ({\bf X}^{{\rm T}}{\bf X})^{\rm -1}{\bf X}^{\rm T}{\bf t}
$$
ãããã4 ã€ã®ã¹ãããã«åããŠèšç®ããŠãããŸãã
ãŸãã¯ã${\bf X}^{\rm T}{\bf X}$ ã®èšç®ã§ããndarrayã«å¯Ÿã㊠`.T` ã§è»¢çœ®ããé
åãåŸãããŸãã
```
# Step 1
xx = np.dot(X.T, X)
xx
```
次ã«ããã®éè¡åãèšç®ããŸãã
```
# Step 2
xx_inv = np.linalg.inv(xx)
xx_inv
```
éè¡åã®èšç®ã¯ `np.linalg.inv()` ã§è¡ãããšãã§ããŸãã
次ã«ã${\bf X}^{\rm T}{\bf t}$ ã®èšç®ãããŸãã
```
# Step 3
xt = np.dot(X.T, t)
xt
```
æåŸã«ãæ±ãã `xx_inv` ãš `xt` ãæãåãããŸãã
```
# Step 4
w = np.dot(xx_inv, xt)
w
```
**以äžã®èšç®ã¯ã以äžã®ããã« 1 è¡ã§è¡ãããšãã§ããŸãã**
```
w_ = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(t)
w_
```
å®éã«ã¯éè¡åãéœã«æ±ããããšã¯çšã§ãé£ç«äžæ¬¡æ¹çšåŒãè§£ããããªãã¡éè¡åãèšç®ããŠãã¯ãã«ã«æããã®ã«çããèšç®ãã²ãšãŸãšãã«è¡ã颿° `numpy.linalg.solve` ãåŒã¶æ¹ãé床é¢ã§ã粟床é¢ã§ãæå©ã§ãã
```
w_ = np.linalg.solve(X.T.dot(X), X.T.dot(t))
w_
```
æ°åŒã NumPy ã«ããé
åã®èšç®ã«èœãšã蟌ãããšã«æ
£ããŠããã«ã¯å°ãæéãããããŸãããæ
£ãããšå°ãªãéã®ã³ãŒãã§èšè¿°ã§ããã ãã§ãªããé«éã«èšç®ãè¡ãªããããã倧ããªæ©æµããããŸãã
<hr />
<div class="alert alert-info">
**泚é 1**
ã©ã€ãã©ãªãšã¯ãæ±çšæ§ã®é«ãè€æ°ã®é¢æ°ãã¯ã©ã¹ãªã©ãåå©çšå¯èœãªåœ¢ã§ã²ãšãŸãšãŸãã«ãããã®ã§ãPython ã®äžçã§ã¯**ããã±ãŒãž**ãšãåŒã°ããŸãããŸããPython ã§é¢æ°ãã¯ã©ã¹ã®å®çŸ©ãæãªã©ãæžããããã¡ã€ã«ã®ããšã**ã¢ãžã¥ãŒã«**ãšåŒã³ãããã±ãŒãžã¯ã¢ãžã¥ãŒã«ãéãŸã£ããã®ã§ãã
[â²äžãžæ»ã](#ref_note1)
</div>
<div class="alert alert-info">
**泚é 2**
NumPy ã«ã¯ matrix ãšããã¯ã©ã¹ãååšããŸãããæ¬ãã¥ãŒããªã¢ã«ã§ã¯åºæ¬çã«å€æ¬¡å
é
åã衚ã ndarray ããã¯ãã«ãè¡åã衚ãããã«çšããŸãã
[â²äžãžæ»ã](#ref_note2)
</div>
<div class="alert alert-info">
**泚é 3**
ããã¯ããã®å€æ¬¡å
é
åã衚ããã³ãœã«ã®**éæ°ïŒrankã以äžã©ã³ã¯ïŒ**ãšå¯Ÿå¿ããŸãã
[â²äžãžæ»ã](#ref_note3)
</div>
<div class="alert alert-info">
**泚é 4**
ãæ¬¡å
ã®ãµã€ãºããšèšã£ãå Žåã¯ãã®æ¬¡å
ã®å€§ãããæå³ããé
åã® `size` 屿§ãšã¯ç°ãªããã®ãæããŠããŸãã
[â²äžãžæ»ã](#ref_note4)
</div>
<div class="alert alert-info">
**泚é 5**
æ«å°Ÿæ¬¡å
ïŒtrailing dimensionïŒãšã¯ããã®é
åã®åœ¢ã衚ãã¿ãã«ã®äžçªæåŸã®å€ã®ããšãæããŸãã
[â²äžãžæ»ã](#ref_note5)
</div>
|
github_jupyter
|
# HuberRegressorw with StandardScaler
This Code template is for the regression analysis using a Huber Regression and the feature rescaling technique StandardScaler in a pipeline.
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.linear_model import HuberRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features= []
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
### Model
Linear regression model that is robust to outliers.
The Huber Regressor optimizes the squared loss for the samples where |(y - X'w) / sigma| < epsilon and the absolute loss for the samples where |(y - X'w) / sigma| > epsilon, where w and sigma are parameters to be optimized. The parameter sigma makes sure that if y is scaled up or down by a certain factor, one does not need to rescale epsilon to achieve the same robustness. Note that this does not take into account the fact that the different features of X may be of different scales.
This makes sure that the loss function is not heavily influenced by the outliers while not completely ignoring their effect.
#### Data Scaling
Used sklearn.preprocessing.StandardScaler
Standardize features by removing the mean and scaling to unit variance
The standard score of a sample x is calculated as:
z = (x - u) / s
Where u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.
Read more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
```
Input=[("standard",StandardScaler()),("model",HuberRegressor())]
model = Pipeline(Input)
model.fit(x_train,y_train)
```
#### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)
|
github_jupyter
|
# Signal Autoencoder
```
import numpy as np
import scipy as sp
import scipy.stats
import itertools
import logging
import matplotlib.pyplot as plt
import pandas as pd
import torch.utils.data as utils
import math
import time
import tqdm
import torch
import torch.optim as optim
import torch.nn.functional as F
from argparse import ArgumentParser
from torch.distributions import MultivariateNormal
import torch.nn as nn
import torch.nn.init as init
import sys
sys.path.append("../new_flows")
from flows import RealNVP, Planar, MAF
from models import NormalizingFlowModel
####MAF
class VAE_NF(nn.Module):
def __init__(self, K, D):
super().__init__()
self.dim = D
self.K = K
self.encoder = nn.Sequential(
nn.Linear(12, 50),
nn.LeakyReLU(True),
nn.Linear(50, 30),
nn.LeakyReLU(True),
nn.Linear(30, 20),
nn.LeakyReLU(True),
nn.Linear(20, D * 2)
)
self.decoder = nn.Sequential(
nn.Linear(D, 20),
nn.LeakyReLU(True),
nn.Linear(20, 30),
nn.LeakyReLU(True),
nn.Linear(30, 50),
nn.LeakyReLU(True),
nn.Linear(50, 12)
)
flow_init = MAF(dim=D)
flows_init = [flow_init for _ in range(K)]
prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda())
self.flows = NormalizingFlowModel(prior, flows_init)
def forward(self, x):
# Run Encoder and get NF params
enc = self.encoder(x)
mu = enc[:, :self.dim]
log_var = enc[:, self.dim: self.dim * 2]
# Re-parametrize
sigma = (log_var * .5).exp()
z = mu + sigma * torch.randn_like(sigma)
kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
# Construct more expressive posterior with NF
z_k, _, sum_ladj = self.flows(z)
kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch
# Run Decoder
x_prime = self.decoder(z_k)
return x_prime, kl_div
#prong_2 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5")
#prong_3 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5")
rnd_data = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5")
testprior_data = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/ThreeProng_5000_500_500.h5")
dt = rnd_data.values
dt_prior = testprior_data.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
dt = dt[correct]
correct = (dt_prior[:,3]>0) &(dt_prior[:,19]>0) & (dt_prior[:,1]>0) & (dt_prior[:,2]>0)
dt_prior = dt_prior[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
for i in range(13,19):
dt_prior[:,i] = dt_prior[:,i]/dt_prior[:,3]
for i in range(29,35):
dt_prior[:,i] = dt_prior[:,i]/(dt_prior[:,19])
#correct = (dt[:,16]>0) & (dt[:,29]>=0) & (dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
#dt = dt[correct]
#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
#Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#bkg_idx = np.where(idx==0)[0]
##signal_idx = np.where((idx==1)) [0]
#dt = dt[signal_idx]
correct = (dt[:,0]>=2800)
dt = dt[correct]
correct = (dt_prior[:,0]>=2800)
dt_prior = dt_prior[correct]
idx = dt[:,-1]
#sig_idx = np.where((dt_prior[:,3]>450) & (dt_prior[:,3]<550) & (dt_prior[:,19]>100) & (dt_prior[:,19]<200) & (dt_prior[:,0]>4200) & (dt_prior[:,0]<4800))[0]
sig_idx = np.where((dt_prior[:,3]>200) & (dt_prior[:,19]>200) & (dt_prior[:,0]>4000) & (dt_prior[:,0]<6000))[0]
bkg_idx = np.where(idx==0)[0]
#bsmlike = np.where(dt[:,16]>0.9)[0]
#dt = dt[bsmlike]
dt_sig = dt_prior[sig_idx]
dt_bkg = dt[bkg_idx]
#dt = prong_2.values
#correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
#dt = dt[correct]#
#for i in range(13,19):
# dt[:,i] = dt[:,i]/dt[:,3]#
#for i in range(29,35):
# dt[:,i] = dt[:,i]/(dt[:,19])#
##correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
##dt = dt[correct]#
#
##Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
##Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included
##Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#idx = dt[:,-1]
#bkg_idx = np.where(idx==0)[0]
#sig_idx = np.where((idx==1) & (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150))[0]
##signal_idx = np.where((idx==1)) [0]
#dt_sig = dt[sig_idx]
#
#sig_refine_range = (dt_sig[:,0]>3400) & (dt_sig[:,0]<3600)
#dt_sig = dt_sig[sig_refine_range]
dt_sig.shape
plt.hist(dt_sig[:,0],bins=np.arange(0,8000,50));
f.columns[[3,4,5,6,11,12,19,20,21,22,27,28]]
#Y = dt_sig[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]]
#[3,4,5,6,11,12,19,20,21,22,27,28]
Y = dt_sig[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
Y.shape
#if nprong == 3:
# dt = prong_3.values
# correct = (dt[:,3]>20) &(dt[:,19]>20)
# dt = dt[correct]
# for i in range(13,19):
# dt[:,i] = dt[:,i]/dt[:,3]
# for i in range(29,35):
# dt[:,i] = dt[:,i]/(dt[:,19])
# correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
# dt = dt[correct]
# Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]]
# #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
# idx = dt[:,-1]
# bkg_idx = np.where(idx==0)[0]
# signal_idx = np.where((idx==1) & (dt[:,3]>400))[0]
# #signal_idx = np.where((idx==1)) [0]
# Y = Y[signal_idx]
bins = np.linspace(0,1,100)
bins.shape
column = 3
#print(f_rnd.columns[column])
plt.hist(Y[:,0],bins,alpha=0.5,color='b');
#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
#plt.axvline(np.mean(Y[:,column]))
Y.shape
sig_mean = []
sig_std = []
for i in range(12):
mean = np.mean(Y[:,i])
std = np.std(Y[:,i])
sig_mean.append(mean)
sig_std.append(std)
Y[:,i] = (Y[:,i]-mean)/std
sig_mean
sig_std
total_sig = torch.tensor(Y)
total_sig.shape
bins = np.linspace(-3,3,100)
bins.shape
column = 0
#print(f_rnd.columns[column])
plt.hist(Y[:,11],bins,alpha=0.5,color='b');
#plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
#plt.axvline(np.mean(Y[:,column]))
N_EPOCHS = 30
PRINT_INTERVAL = 2000
NUM_WORKERS = 4
LR = 1e-4
#N_FLOWS = 6
#Z_DIM = 8
N_FLOWS = 10
Z_DIM = 8
n_steps = 0
sigmodel = VAE_NF(N_FLOWS, Z_DIM).cuda()
bs = 800
sig_train_iterator = utils.DataLoader(total_sig, batch_size=bs, shuffle=True)
sig_test_iterator = utils.DataLoader(total_sig, batch_size=bs)
sigoptimizer = optim.Adam(sigmodel.parameters(), lr=1e-6)
beta = 1
def sigtrain():
global n_steps
train_loss = []
sigmodel.train()
for batch_idx, x in enumerate(sig_train_iterator):
start_time = time.time()
x = x.float().cuda()
x_tilde, kl_div = sigmodel(x)
mseloss = nn.MSELoss(size_average=False)
huberloss = nn.SmoothL1Loss(size_average=False)
#loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)
loss_recons = mseloss(x_tilde,x ) / x.size(0)
#loss_recons = huberloss(x_tilde,x ) / x.size(0)
loss = loss_recons + beta* kl_div
sigoptimizer.zero_grad()
loss.backward()
sigoptimizer.step()
train_loss.append([loss_recons.item(), kl_div.item()])
if (batch_idx + 1) % PRINT_INTERVAL == 0:
print('\tIter [{}/{} ({:.0f}%)]\tLoss: {} Time: {:5.3f} ms/batch'.format(
batch_idx * len(x), 50000,
PRINT_INTERVAL * batch_idx / 50000,
np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0),
1000 * (time.time() - start_time)
))
n_steps += 1
def sigevaluate(split='valid'):
global n_steps
start_time = time.time()
val_loss = []
sigmodel.eval()
with torch.no_grad():
for batch_idx, x in enumerate(sig_test_iterator):
x = x.float().cuda()
x_tilde, kl_div = sigmodel(x)
mseloss = nn.MSELoss(size_average=False)
huberloss = nn.SmoothL1Loss(size_average=False)
#loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0)
loss_recons = mseloss(x_tilde,x ) / x.size(0)
#loss_recons = huberloss(x_tilde,x ) / x.size(0)
loss = loss_recons + beta * kl_div
val_loss.append(loss.item())
#writer.add_scalar('loss/{}/ELBO'.format(split), loss.item(), n_steps)
#writer.add_scalar('loss/{}/reconstruction'.format(split), loss_recons.item(), n_steps)
#writer.add_scalar('loss/{}/KL'.format(split), kl_div.item(), n_steps)
print('\nEvaluation Completed ({})!\tLoss: {:5.4f} Time: {:5.3f} s'.format(
split,
np.asarray(val_loss).mean(0),
time.time() - start_time
))
return np.asarray(val_loss).mean(0)
ae_def = {
"type":"sig",
"trainon":"3prong",
"features":"12features",
"architecture":"MAF",
"selection":"mjj4500_nojetmasscut",
"trainloss":"MSELoss",
"beta":"beta1",
"zdimnflow":"z8f10",
}
ae_def
#from torchsummary import summary
sigmodel.load_state_dict(torch.load(f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5"))
N_EPOCHS = 10
BEST_LOSS = 0
LAST_SAVED = -1
PATIENCE_COUNT = 0
PATIENCE_LIMIT = 5
for epoch in range(1, 1000):
print("Epoch {}:".format(epoch))
sigtrain()
cur_loss = sigevaluate()
if cur_loss <= BEST_LOSS:
PATIENCE_COUNT = 0
BEST_LOSS = cur_loss
LAST_SAVED = epoch
print("Saving model!")
torch.save(sigmodel.state_dict(),f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5")
else:
PATIENCE_COUNT += 1
print("Not saving model! Last saved: {}".format(LAST_SAVED))
if PATIENCE_COUNT > 10:
print("Patience Limit Reached")
break
sigmodel.load_state_dict(torch.load(f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5"))
sigout = sigmodel(torch.tensor(Y).float().cuda())[0]
sigout = sigout.data.cpu().numpy()
bins = np.linspace(-3,3,100)
bins.shape
column = 3
#print(f_rnd.columns[column]
plt.hist(Y[:,column],bins,alpha=0.5,color='b');
plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
plt.axvline(np.mean(Y[:,column]))
mjj, j1mass, j2mass = [4000, 150, 150]
f = pd.read_hdf(f"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5")
dt = f.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300)
dt = dt[correct]
correct = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100)
dt = dt[correct]
Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
for i in range(12):
Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]
sigout = sigmodel(torch.tensor(Y).float().cuda())[0]
sigout = sigout.data.cpu().numpy()
bins = np.linspace(-3,3,101)
bins.shape
column = 2
#print(f_rnd.columns[column]
#plt.hist(dt[:,column],bins,alpha=0.5,color='b');
plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
plt.hist(Y[:,column],bins,alpha=0.5,color='g');
#plt.axvline(np.mean(Y[:,column]))
bins = np.linspace(-3,3,100)
bins.shape
column = 5
#print(f_rn.columns[column]
plt.hist(Y[:,column],bins,alpha=0.5,color='b');
plt.hist(sigout[:,column],bins,alpha=0.5,color='r');
#plt.hist(out2[:,column],bins,alpha=0.5,color='g');
plt.axvline(np.mean(Y[:,column]))
varyj1mass_wps = ([4000, 150, 150],[4000, 300, 150],[4000, 450, 150],[4000, 500, 150],[4000, 650, 150],[4000, 700, 150],[4000, 850, 150],[4000, 900, 150])
for mjj, j1mass, j2mass in varyj1mass_wps:
f = pd.read_hdf(f"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5")
dt = f.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300)
dt = dt[correct]
correct = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100)
dt = dt[correct]
Y = dt[:,[4,5,6,11,12,20,21,22,27,28]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
print(Y.shape)
for i in range(10):
Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]
total_bb_test = torch.tensor(Y)
#huberloss = nn.SmoothL1Loss(reduction='none')
sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
waic = sigae_bbloss + bbvar
#sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy()
print(waic[0:10])
plt.hist(waic,bins=np.linspace(0,10,1001),density=True);
plt.xlim([0,2])
#np.save(out_file_waic,waic)
np.save(f'sigaetestprior4500500150_wp_{mjj}_{j1mass}_{j2mass}.npy',sigae_bbloss)
losslist = []
for mjj, j1mass, j2mass in varyj1mass_wps:
a = np.load(f'sigae_wp_{mjj}_{j1mass}_{j2mass}.npy')
losslist.append(a)
losslist[1]
plt.hist(losslist[0],bins = np.arange(0,10,.1),alpha=0.2);
plt.hist(losslist[1],bins = np.arange(0,10,.1),alpha=0.2);
plt.hist(losslist[2],bins = np.arange(0,10,.1),alpha=0.2);
plt.hist(losslist[3],bins = np.arange(0,10,.1),alpha=0.2);
plt.hist(losslist[4],bins = np.arange(0,10,.1),alpha=0.2);
inputlist = [
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB2.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB3.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_background.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5',
'/data/t3home000/spark/QUASAR/preprocessing/delphes_output_4500_500_150.h5'
]
ae_def
outputlist_waic = [
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb1.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb2.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb3.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_purebkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_rndbkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_2prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_3prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_4500.npy"
]
outputlist_justloss = [
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb1.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb2.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb3.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy",
f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy"
]
exist_signalflag = [
False,
False,
False,
False,
True,
True,
True,
False,
]
is_signal = [
False,
False,
False,
False,
False,
True,
True,
True
]
nprong = [
None,
None,
None,
None,
None,
'2prong',
'3prong',
'4500'
]
for in_file, out_file_waic, out_file_justloss, sigbit_flag, is_sig, n_prong in zip(inputlist,outputlist_waic,outputlist_justloss,exist_signalflag,is_signal, nprong):
f_bb = pd.read_hdf(in_file)
dt = f_bb.values
#correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) &(dt[:,2]>0) & (dt[:,16]>0) & (dt[:,32]>0)
#dt = dt[correct]
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
#correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1)
#dt = dt[correct]
#correct = (dt[:,3]>100)
#dt = dt[correct]
#correct = (dt[:,19]>20)
#dt = dt[correct]
correct = (dt[:,0]>=2800)
dt = dt[correct]
#bsmlike = np.where(dt[:,16]>0.9)[0]
#dt = dt[bsmlike]
if sigbit_flag:
idx = dt[:,-1]
sigidx = (idx == 1)
bkgidx = (idx == 0)
if is_sig:
dt = dt[sigidx]
else:
dt = dt[bkgidx]
if n_prong == '2prong':
correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600)
dt = dt[correct]
if n_prong == '3prong':
correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600)
dt = dt[correct]
if n_prong == '4500':
correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>100) & (dt[:,19]<200) & (dt[:,0]>4200) & (dt[:,0]<4800)
dt = dt[correct]
Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]]
#Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
print(Y.shape)
for i in range(12):
Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i]
total_bb_test = torch.tensor(Y)
#huberloss = nn.SmoothL1Loss(reduction='none')
sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy()
waic = sigae_bbloss + bbvar
#sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy()
print(waic[0:10])
plt.hist(waic,bins=np.linspace(0,10,1001),density=True);
plt.xlim([0,2])
np.save(out_file_waic,waic)
np.save(out_file_justloss,sigae_bbloss)
print(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy")
loss_prong3 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy")
loss_prong2 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy")
loss_purebkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy")
loss_rndbkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy")
loss_4500 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy")
plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg');
#plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg');
plt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig');
plt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig');
plt.hist(loss_4500,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong 4500');
#plt.yscale('log')
plt.xlabel('Loss (SigAE trained on 2prong sig)')
plt.legend(loc='upper right')
#plt.savefig('sigae_trained_on_2prongsig.png')
plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg');
#plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg');
plt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig');
plt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig');
#plt.yscale('log')
plt.xlabel('Loss (SigAE trained on 2prong sig)')
plt.legend(loc='upper right')
#plt.savefig('sigae_trained_on_2prongsig.png')
len(loss_prong2)
outputlist_waic
outputlist_justloss
sigae_bbloss
ae_def
sigae_bbloss
plt.hist(sigae_bbloss,bins=np.linspace(0,10,1001));
np.save('../data_strings/sigae_2prong_loss_bb3.npy',sigae_bbloss)
X_bkg = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_bkg = X_bkg[bkg_idx]
for i in range(12):
X_bkg[:,i] = (X_bkg[:,i]-sig_mean[i])/sig_std[i]
total_bkg_test = torch.tensor(X_bkg)
sigae_bkgloss = torch.mean((sigmodel(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_sigloss = torch.mean((sigmodel(total_sig.float().cuda())[0]- total_sig.float().cuda())**2,dim=1).data.cpu().numpy()
f_3prong = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5")
f_bb1 = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5')
dt_bb1 = f_bb1.values
X_bb1 = dt_bb1[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_bb1.shape
sig_mean
sig_std
for i in range(12):
X_bb1[:,i] = (X_bb1[:,i]-sig_mean[i])/sig_std[i]
plt.hist(X_bb1[:,0],bins = np.linspace(-2,2,10))
(torch.tensor(dt[i * chunk_size:(i + 1) * chunk_size]) for i in range )
def get_loss(dt):
chunk_size=5000
total_size=1000000
i = 0
i_max = total_size // chunk_size
print(i_max)
gen = (torch.tensor(dt[i*chunk_size: (i + 1) * chunk_size]) for i in range(i_max))
with torch.no_grad():
loss = [
n
for total_in_selection in gen
for n in torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy()
]
return loss
def get_loss(dt):
def generator(dt, chunk_size=5000, total_size=1000000):
i = 0
i_max = total_size // chunk_size
print(i_max)
for i in range(i_max):
start=i * chunk_size
stop=(i + 1) * chunk_size
yield torch.tensor(dt[start:stop])
loss = []
with torch.no_grad():
for total_in_selection in generator(dt,chunk_size=5000, total_size=1000000):
loss.extend(torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy())
return loss
bb1_loss_sig = get_loss(X_bb1)
bb1_loss_sig = np.array(bb1_loss_sig,dtype=np.float)
print(bb1_loss_sig)
plt.hist(bb1_loss_sig,bins=np.linspace(0,100,1001));
np.save('../data_strings/sigaeloss_bb1.npy',bb1_loss_sig)
dt_3prong = f_3prong.values
Z = dt_3prong[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
Z.shape
for i in range(12):
Z[:,i] = (Z[:,i]-sig_mean[i])/sig_std[i]
total_3prong = torch.tensor(Z)
bkgae_bkgloss = torch.mean((model(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy()
bkgae_3prongloss = torch.mean((model(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_3prongloss = torch.mean((sigmodel(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy()
sigae_3prongloss.shape
bins = np.linspace(0,10,1001)
plt.hist(sigae_sigloss,bins,weights = np.ones(len(signal_idx))*10,alpha=0.4,color='r',label='2 prong signal');
plt.hist(sigae_3prongloss,bins,weights = np.ones(100000)*10,alpha=0.5,color='g',label='3 prong signal');
plt.hist(sigae_bkgloss,bins,alpha=0.4,color='b',label='background');
#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.legend(loc='upper right')
plt.xlabel('Signal AE Loss',fontsize=15)
def get_tpr_fpr(sigloss,bkgloss,aetype='sig'):
bins = np.linspace(0,50,1001)
tpr = []
fpr = []
for cut in bins:
if aetype == 'sig':
tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))
fpr.append(np.where(bkgloss<cut)[0].shape[0]/len(bkgloss))
if aetype == 'bkg':
tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))
fpr.append(np.where(bkgloss>cut)[0].shape[0]/len(bkgloss))
return tpr,fpr
def get_precision_recall(sigloss,bkgloss,aetype='bkg'):
bins = np.linspace(0,100,1001)
tpr = []
fpr = []
precision = []
for cut in bins:
if aetype == 'sig':
tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss))
precision.append((np.where(sigloss<cut)[0].shape[0])/(np.where(bkgloss<cut)[0].shape[0]+np.where(sigloss<cut)[0].shape[0]))
if aetype == 'bkg':
tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss))
precision.append((np.where(sigloss>cut)[0].shape[0])/(np.where(bkgloss>cut)[0].shape[0]+np.where(sigloss>cut)[0].shape[0]))
return precision,tpr
tpr_2prong, fpr_2prong = get_tpr_fpr(sigae_sigloss,sigae_bkgloss,'sig')
tpr_3prong, fpr_3prong = get_tpr_fpr(sigae_3prongloss,sigae_bkgloss,'sig')
plt.plot(fpr_2prong,tpr_2prong,label='signal AE')
#plt.plot(VAE_bkg_fpr,VAE_bkg_tpr,label='Bkg VAE-Vanilla')
plt.plot(bkg_fpr4,bkg_tpr4,label='Bkg NFlowVAE-Planar')
plt.xlabel(r'$1-\epsilon_{bkg}$',fontsize=15)
plt.ylabel(r'$\epsilon_{sig}$',fontsize=15)
#plt.semilogy()
#plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.legend(loc='lower right')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.savefig('ROC_Curve_sigae.png')
precision,recall = get_precision_recall(loss_sig,loss_bkg,aetype='bkg')
np.save('NFLOWVAE_PlanarNEW_22var_sigloss.npy',loss_sig)
np.save('NFLOWVAE_PlanarNEW_22var_bkgloss.npy',loss_bkg)
np.save('NFLOWVAE_PlanarNEW_precision.npy',precision)
np.save('NFLOWVAE_PlanarNEW_recall.npy',recall)
np.save('NFLOWVAE_PlanarNEW_bkgAE_fpr.npy',bkg_fpr)
np.save('NFLOWVAE_PlanarNEW_bkgAE_tpr.npy',bkg_tpr)
np.save('NFLOWVAE_PlanarNEW_sigloss.npy',loss_sig)
np.save('NFLOWVAE_PlanarNEW_bkgloss.npy',loss_bkg)
plt.plot(recall,precision)
flows = [1,2,3,4,5,6]
zdim = [1,2,3,4,5]
for N_flows in flows:
for Z_DIM in zdim:
model = VAE_NF(N_FLOWS, Z_DIM).cuda()
optimizer = optim.Adam(model.parameters(), lr=LR)
BEST_LOSS = 99999
LAST_SAVED = -1
PATIENCE_COUNT = 0
PATIENCE_LIMIT = 5
for epoch in range(1, N_EPOCHS):
print("Epoch {}:".format(epoch))
train()
cur_loss = evaluate()
if cur_loss <= BEST_LOSS:
PATIENCE_COUNT = 0
BEST_LOSS = cur_loss
LAST_SAVED = epoch
print("Saving model!")
if mode == 'ROC':
torch.save(model.state_dict(),f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_RND_22var_z{Z_DIM}_f{N_FLOWS}.h5")
else:
torch.save(model.state_dict(), f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_PureBkg_22var_z{Z_DIM}_f{N_FLOWS}.h5")
else:
PATIENCE_COUNT += 1
print("Not saving model! Last saved: {}".format(LAST_SAVED))
if PATIENCE_COUNT > 3:
print("Patience Limit Reached")
break
loss_bkg = get_loss(dt_PureBkg[bkg_idx])
loss_sig = get_loss(dt_PureBkg[signal_idx])
np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_sigloss.npy',loss_sig)
np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_bkgloss.npy',loss_bkg)
```
|
github_jupyter
|
# BetterReads: Optimizing GoodReads review data
This notebook explores how to achieve the best results with the BetterReads algorithm when using review data scraped from GoodReads. It is a short follow-up to the exploration performed in the `03_optimizing_reviews.ipynb` notebook.
We have two options when scraping review data from GoodReads: For any given book, we can either scrape 1,500 reviews, with 300 reviews for each star rating (1 to 5), or we can scrape just the top 300 reviews, of any rating. (This is due to some quirks in the way that reviews are displayed on the GoodReads website; for more information, see my [GoodReadsReviewsScraper script](https://github.com/williecostello/GoodReadsReviewsScraper).)
There are advantages and disadvantages to both options. If we scrape 1,500 reviews, we obviously have more review data to work with; however, the data is artifically class-balanced, such that, for example, we'll still see a good number of negative reviews even if the vast majority of the book's reviews are positive. If we scrape just the top 300 reviews, we will have a more representative dataset, but much less data to work with.
We saw in the `03_optimizing_reviews.ipynb` notebook that the BetterReads algorithm can achieve meaningful and representative results from a dataset with less than 100 reviews. So we should not dismiss the 300 review option simply because it involves less data. We should only dismiss it if its smaller dataset leads to worse results. So let's try these two options out on a particular book and see how the algorithm performs.
```
import numpy as np
import pandas as pd
import random
from sklearn.cluster import KMeans
import tensorflow_hub as hub
# Loads Universal Sentence Encoder locally, from downloaded module
embed = hub.load('../../Universal Sentence Encoder/module/')
# Loads Universal Sentence Encoder remotely, from Tensorflow Hub
# embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
```
## Which set of reviews should we use?
For this notebook we'll work with a new example: Sally Rooney's *Conversations with Friends*.
<img src='https://i.gr-assets.com/images/S/compressed.photo.goodreads.com/books/1500031338l/32187419._SY475_.jpg' width=250 align=center>
We have prepared two datasets, one of 1,500 reviews and another of 300 reviews, as described above. Both datasets were scraped from GoodReads at the same time, so there is some overlap between them. (Note that the total number of reviews in both datasets is less than advertised, since non-English and very short reviews are dropped during data cleaning.)
```
# Set path for processed file
file_path_1500 = 'data/32187419_conversations_with_friends.csv'
file_path_300 = 'data/32187419_conversations_with_friends_top_300.csv'
# Read in processed file as dataframe
df_1500 = pd.read_csv(file_path_1500)
df_300 = pd.read_csv(file_path_300)
print(f'The first dataset consists of {df_1500.shape[0]} sentences from {df_1500["review_index"].nunique()} reviews')
print(f'The second dataset consists of {df_300.shape[0]} sentences from {df_300["review_index"].nunique()} reviews')
```
As we can see above, in comparison to the smaller dataset, the bigger dataset contains approximately three times the number of sentences from four times the number of reviews. And as we can see below, the bigger dataset contains approximately the same number of reviews for each star rating, while the smaller dataset is much more heavily skewed toward 5 star and 4 star reviews.
```
df_1500.groupby('review_index')['rating'].mean().value_counts().sort_index()
df_300.groupby('review_index')['rating'].mean().value_counts().sort_index()
```
On [the book's actual GoodReads page](https://www.goodreads.com/book/show/32187419-conversations-with-friends), its average review rating is listed as 3.82 stars. This is nearly the same as the average review rating of our smaller dataset. The bigger dataset's average review rating, in contrast, is just less than 3. This confirms our earlier suspicion that the smaller dataset presents a more representative sample of the book's full set of reviews.
```
df_300.groupby('review_index')['rating'].mean().mean()
df_1500.groupby('review_index')['rating'].mean().mean()
```
Let's see how these high-level differences affect the output of our algorithm.
```
def load_sentences(file_path):
'''
Function to load and embed a book's sentences
'''
# Read in processed file as dataframe
df = pd.read_csv(file_path)
# Copy sentence column to new variable
sentences = df['sentence'].copy()
# Vectorize sentences
sentence_vectors = embed(sentences)
return sentences, sentence_vectors
def get_clusters(sentences, sentence_vectors, k, n):
'''
Function to extract the n most representative sentences from k clusters, with density scores
'''
# Instantiate the model
kmeans_model = KMeans(n_clusters=k, random_state=24)
# Fit the model
kmeans_model.fit(sentence_vectors);
# Set the number of cluster centre points to look at when calculating density score
centre_points = int(len(sentences) * 0.02)
# Initialize list to store mean inner product value for each cluster
cluster_density_scores = []
# Initialize dataframe to store cluster centre sentences
df = pd.DataFrame()
# Loop through number of clusters
for i in range(k):
# Define cluster centre
centre = kmeans_model.cluster_centers_[i]
# Calculate inner product of cluster centre and sentence vectors
ips = np.inner(centre, sentence_vectors)
# Find the sentences with the highest inner products
top_indices = pd.Series(ips).nlargest(n).index
top_sentences = list(sentences[top_indices])
centre_ips = pd.Series(ips).nlargest(centre_points)
density_score = round(np.mean(centre_ips), 5)
# Append the cluster density score to master list
cluster_density_scores.append(density_score)
# Create new row with cluster's top 10 sentences and density score
new_row = pd.Series([top_sentences, density_score])
# Append new row to master dataframe
df = df.append(new_row, ignore_index=True)
# Rename dataframe columns
df.columns = ['sentences', 'density']
# Sort dataframe by density score, from highest to lowest
df = df.sort_values(by='density', ascending=False).reset_index(drop=True)
# Loop through number of clusters selected
for i in range(k):
# Save density / similarity score & sentence list to variables
sim_score = round(df.loc[i]["density"], 3)
sents = df.loc[i]['sentences'].copy()
print(f'Cluster #{i+1} sentences (density score: {sim_score}):\n')
print(*sents, sep='\n')
print('\n')
model_density_score = round(np.mean(cluster_density_scores), 5)
print(f'Model density score: {model_density_score}')
# Load and embed sentences
sentences_1500, sentence_vectors_1500 = load_sentences(file_path_1500)
sentences_300, sentence_vectors_300 = load_sentences(file_path_300)
# Get cluster sentences for bigger dataset
get_clusters(sentences_1500, sentence_vectors_1500, k=6, n=8)
# Get cluster sentences for smaller dataset
get_clusters(sentences_300, sentence_vectors_300, k=6, n=8)
```
Let's summarize our results. The bigger dataset's sentence clusters can be summed up as follows:
1. Fantastic writing
1. Reading experience (?)
1. Unlikeable characters
1. Plot synopsis
1. Not enjoyable
1. Thematic elements: relationships & emotions
The smaller dataset's clusters can be summed up like this:
1. Fantastic writing
1. Plot synopsis
1. Loved it
1. Unlikeable characters
1. Reading experience
1. Thematic elements: Relationships & emotions
As we can see, the two sets of results are broadly similar; there are no radical differences between the two sets of clusters. The only major difference is that the bigger dataset includes a cluster of sentences expressing dislike of the book, whereas the smaller dataset includes a cluster of sentences expressing love of the book. But this was to be expected, given the relative proportions of positive and negative reviews between the two datasets.
Given these results, we feel that the smaller dataset is preferable. Its clusters seem slightly more internally coherent and to better capture the general sentiment toward the book.
|
github_jupyter
|
# 2.18 Programming for Geoscientists class test 2016
# Test instructions
* This test contains **4** questions each of which should be answered.
* Write your program in a Python cell just under each question.
* You can write an explanation of your solution as comments in your code.
* In each case your solution program must fulfil all of the instructions - please check the instructions carefully and double check that your program fulfils all of the given instructions.
* Save your work regularly.
* At the end of the test you should email your IPython notebook document (i.e. this document) to [Gerard J. Gorman](http://www.imperial.ac.uk/people/g.gorman) at [email protected]
**1.** The following cells contain at least one programming bug each. For each cell add a comment to identify and explain the bug, and correct the program.
```
# Function to calculate wave velocity.
def wave_velocity(k, mu, rho):
vp = sqrt((k+4*mu/3)/rho)
return vp
# Use the function to calculate the velocity of an
# acoustic wave in water.
vp = wave_velocity(k=0, mu=2.29e9, rho=1000)
print "Velocity of acoustic wave in water: %d", vp
data = (3.14, 2.29, 10, 12)
data.append(4)
line = "2015-12-14T06:29:15.740Z,19.4333324,-155.2906647,1.66,2.14,ml,17,248,0.0123,0.36,hv,hv61126056,2015-12-14T06:34:58.500Z,5km W of Volcano, Hawaii,earthquake"
latitude = line.split(',')[1]
longitude = line.split(',')[2]
print "longitude, latitude = (%g, %g)"%(longitude, latitude)
```
**2.** The Ricker wavelet is frequently employed to model seismic data. The amplitude of the Ricker wavelet with peak frequency $f$ at time $t$ is computed as:
$$A = (1-2 \pi^2 f^2 t^2) e^{-\pi^2 f^2 t^2}$$
* Implement a function which calculates the amplitude of the Ricker wavelet for a given peak frequency $f$ and time $t$.
* Use a *for loop* to create a python *list* for time ranging from $-0.5$ to $0.5$, using a peak frequency, $f$, of $10$.
* Using the function created above, calculate a numpy array of the Ricker wavelet amplitudes for these times.
* Plot a graph of time against Ricker wavelet.
**3.** The data file [vp.dat](data/vp.dat) (all of the data files are stored in the sub-folder *data/* of this notebook library) contains a profile of the acoustic velocity with respect to depth. Depth is measured with respect to a reference point; therefore the first few entries contain NaN's indicating that they are actually above ground.
* Write a function to read in the depth and acoustic velocity.
* Ensure you skip the entries that contain NaN's.
* Store depth and velocities in two seperate numpy arrays.
* Plot depth against velocity ensuring you label your axis.
**4.** The file [BrachiopodBiometrics.csv](data/BrachiopodBiometrics.csv) contains the biometrics of Brachiopods found in 3 different locations.
* Read the data file into a Python *dictionary*.
* You should use the samples location as the *key*.
* For each key you should form a Python *list* containing tuples of *length* and *width* of each sample.
* For each location, calculate the mean length and width of the samples.
* Print the result for each location using a formatted print statement. The mean values should only be printed to within one decimal place.
|
github_jupyter
|
# OGGM flowlines: where are they?
In this notebook we show how to access the OGGM flowlines location before, during, and after a run.
Some of the code shown here will make it to the OGGM codebase [eventually](https://github.com/OGGM/oggm/issues/1111).
```
from oggm import cfg, utils, workflow, tasks, graphics
from oggm.core import flowline
import salem
import xarray as xr
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
cfg.initialize(logging_level='WARNING')
```
## Get ready
```
# Where to store the data
cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-flowlines', reset=True)
# Which glaciers?
rgi_ids = ['RGI60-11.00897']
# We start from prepro level 3 with all data ready
gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=40)
gdir = gdirs[0]
gdir
```
## Where is the terminus of the RGI glacier?
There are several ways to get the terminus, depending on what you want. They are also not necessarily exact same:
### Terminus as the lowest point on the glacier
```
# Get the topo data and the glacier mask
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
topo = ds.topo
# Glacier outline raster
mask = ds.glacier_ext
topo.plot();
topo_ext = topo.where(mask==1)
topo_ext.plot();
# Get the terminus
terminus = topo_ext.where(topo_ext==topo_ext.min(), drop=True)
# Project its coordinates from the local UTM to WGS-84
t_lon, t_lat = salem.transform_proj(gdir.grid.proj, 'EPSG:4326', terminus.x[0], terminus.y[0])
print('lon, lat:', t_lon, t_lat)
print('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')
```
### Terminus as the lowest point on the main centerline
```
# Get the centerlines
cls = gdir.read_pickle('centerlines')
# Get the coord of the last point of the main centerline
cl = cls[-1]
i, j = cl.line.coords[-1]
# These coords are in glacier grid coordinates. Let's convert them to lon, lat:
t_lon, t_lat = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326')
print('lon, lat:', t_lon, t_lat)
print('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')
```
### Terminus as the lowest point on the main flowline
"centerline" in the OGGM jargon is not the same as "flowline". Flowlines have a fixed dx and their terminus is not necessarily exact on the glacier outline. Code-wise it's very similar though:
```
# Get the flowlines
cls = gdir.read_pickle('inversion_flowlines')
# Get the coord of the last point of the main centerline
cl = cls[-1]
i, j = cl.line.coords[-1]
# These coords are in glacier grid coordinates. Let's convert them to lon, lat:
t_lon, t_lat = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326')
print('lon, lat:', t_lon, t_lat)
print('google link:', f'https://www.google.com/maps/place/{t_lat},{t_lon}')
```
### Bonus: convert the centerlines to a shapefile
```
output_dir = utils.mkdir('outputs')
utils.write_centerlines_to_shape(gdirs, path=f'{output_dir}/centerlines.shp')
sh = gpd.read_file(f'{output_dir}/centerlines.shp')
sh.plot();
```
Remember: the "centerlines" are not the same things as "flowlines" in OGGM. The later objects undergo further quality checks, such as the impossibility for ice to "climb", i.e. have negative slopes. The flowlines are therefore sometimes shorter than the centerlines:
```
utils.write_centerlines_to_shape(gdirs, path=f'{output_dir}/flowlines.shp', flowlines_output=True)
sh = gpd.read_file(f'{output_dir}/flowlines.shp')
sh.plot();
```
## Flowline geometry after a run: with the new flowline diagnostics (new in v1.6.0!!)
```
# TODO!!! Based on https://github.com/OGGM/oggm/pull/1308
```
## Flowline geometry after a run: with `FileModel`
Let's do a run first:
```
cfg.PARAMS['store_model_geometry'] = True # We want to get back to it later
tasks.init_present_time_glacier(gdir)
tasks.run_constant_climate(gdir, nyears=100, y0=2000);
```
We use a `FileModel` to read the model output:
```
fmod = flowline.FileModel(gdir.get_filepath('model_geometry'))
```
A FileModel behaves like a OGGM's `FlowlineModel`:
```
fmod.run_until(0) # Point the file model to year 0 in the output
graphics.plot_modeloutput_map(gdir, model=fmod) # plot it
fmod.run_until(100) # Point the file model to year 100 in the output
graphics.plot_modeloutput_map(gdir, model=fmod) # plot it
# Bonus - get back to e.g. the volume timeseries
fmod.volume_km3_ts().plot();
```
OK, now create a table of the main flowline's grid points location and bed altitude (this does not change with time):
```
fl = fmod.fls[-1] # Main flowline
i, j = fl.line.xy # xy flowline on grid
lons, lats = gdir.grid.ij_to_crs(i, j, crs='EPSG:4326') # to WGS84
df_coords = pd.DataFrame(index=fl.dis_on_line*gdir.grid.dx)
df_coords.index.name = 'Distance along flowline'
df_coords['lon'] = lons
df_coords['lat'] = lats
df_coords['bed_elevation'] = fl.bed_h
df_coords.plot(x='lon', y='lat');
df_coords['bed_elevation'].plot();
```
Now store a time varying array of ice thickness, surface elevation along this line:
```
years = np.arange(0, 101)
df_thick = pd.DataFrame(index=df_coords.index, columns=years, dtype=np.float64)
df_surf_h = pd.DataFrame(index=df_coords.index, columns=years, dtype=np.float64)
df_bed_h = pd.DataFrame()
for year in years:
fmod.run_until(year)
fl = fmod.fls[-1]
df_thick[year] = fl.thick
df_surf_h[year] = fl.surface_h
df_thick[[0, 50, 100]].plot();
plt.title('Ice thickness at three points in time');
f, ax = plt.subplots()
df_surf_h[[0, 50, 100]].plot(ax=ax);
df_coords['bed_elevation'].plot(ax=ax, color='k');
plt.title('Glacier elevation at three points in time');
```
### Location of the terminus over time
Let's find the indices where the terminus is (i.e. the last point where ice is thicker than 1m), and link these to the lon, lat positions along the flowlines.
The first method uses fancy pandas functions but may be more cryptic for less experienced pandas users:
```
# Nice trick from https://stackoverflow.com/questions/34384349/find-index-of-last-true-value-in-pandas-series-or-dataframe
dis_term = (df_thick > 1)[::-1].idxmax()
# Select the terminus coordinates at these locations
loc_over_time = df_coords.loc[dis_term].set_index(dis_term.index)
# Plot them over time
loc_over_time.plot.scatter(x='lon', y='lat', c=loc_over_time.index, colormap='viridis');
plt.title('Location of the terminus over time');
# Plot them on a google image - you need an API key for this
# api_key = ''
# from motionless import DecoratedMap, LatLonMarker
# dmap = DecoratedMap(maptype='satellite', key=api_key)
# for y in [0, 20, 40, 60, 80, 100]:
# tmp = loc_over_time.loc[y]
# dmap.add_marker(LatLonMarker(tmp.lat, tmp.lon, ))
# print(dmap.generate_url())
```
<img src='https://maps.googleapis.com/maps/api/staticmap?key=AIzaSyDWG_aTgfU7CeErtIzWfdGxpStTlvDXV_o&maptype=satellite&format=png&scale=1&size=400x400&sensor=false&language=en&markers=%7C46.818796056851475%2C10.802746777546085%7C46.81537664036365%2C10.793672904092187%7C46.80792268953582%2C10.777563608554978%7C46.7953190811109%2C10.766412086223571%7C46.79236232808986%2C10.75236937607986%7C46.79236232808986%2C10.75236937607986'>
And now, method 2: less fancy but maybe easier to read?
```
for yr in [0, 20, 40, 60, 80, 100]:
# Find the last index of the terminus
p_term = np.nonzero(df_thick[yr].values > 1)[0][-1]
# Print the location of the terminus
print(f'Terminus pos at year {yr}', df_coords.iloc[p_term][['lon', 'lat']].values)
```
## Comments on "elevation band flowlines"
If you use elevation band flowlines, the location of the flowlines is not known: indeed, the glacier is an even more simplified representation of the real world one. In this case, if you are interested in tracking the terminus position, you may need to use tricks, such as using the retreat from the terminus with time, or similar.
## What's next?
- return to the [OGGM documentation](https://docs.oggm.org)
- back to the [table of contents](welcome.ipynb)
|
github_jupyter
|
# Introduction to Machine Learning Nanodegree
## Project: Finding Donors for *CharityML*
In this project, we employ several supervised algorithms to accurately model individuals' income using data collected from the 1994 U.S. Census. The best candidate algorithm is then chosen from preliminary results and is further optimized to best model the data. The goal with this implementation is to construct a model that accurately predicts whether an individual makes more than \$50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.
The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by Ron Kohavi and Barry Becker, after being published in the article _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. You can find the article by Ron Kohavi [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.
----
## Exploring the Data
Run the code cell below to load necessary Python libraries and load the census data. Note that the last column from this dataset, `'income'`, will be our target label (whether an individual makes more than, or at most, $50,000 annually). All other columns are features about each individual in the census database.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Census dataset
data = pd.read_csv("census.csv")
# Display the first record
display(data.head(5))
```
### Implementation: Data Exploration
A cursory investigation of the dataset will determine how many individuals fit into either group, and will tell us about the percentage of these individuals making more than \$50,000. In the code cell below, the following information is computed:
- The total number of records, `'n_records'`
- The number of individuals making more than \$50,000 annually, `'n_greater_50k'`.
- The number of individuals making at most \$50,000 annually, `'n_at_most_50k'`.
- The percentage of individuals making more than \$50,000 annually, `'greater_percent'`.
```
# Total number of records
n_records = data.shape[0]
# Number of records where individual's income is more than $50,000
n_greater_50k = data['income'].value_counts()[1]
# Number of records where individual's income is at most $50,000
n_at_most_50k = data['income'].value_counts()[0]
# Percentage of individuals whose income is more than $50,000
greater_percent = 100 * (n_greater_50k / (n_greater_50k + n_at_most_50k))
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# Check whether records are consistent
if n_records == (n_greater_50k + n_at_most_50k):
print('Records are consistent!')
```
**Featureset Exploration**
* **age**: continuous.
* **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
* **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
* **education-num**: continuous.
* **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
* **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
* **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
* **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other.
* **sex**: Female, Male.
* **capital-gain**: continuous.
* **capital-loss**: continuous.
* **hours-per-week**: continuous.
* **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
----
## Preparing the Data
Before data can be used as input for machine learning algorithms, it often must be cleaned, formatted, and restructured â this is typically known as **preprocessing**. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
### Transforming Skewed Continuous Features
A dataset may sometimes contain at least one feature whose values tend to lie near a single number, but will also have a non-trivial number of vastly larger or smaller values than that single number. Algorithms can be sensitive to such distributions of values and can underperform if the range is not properly normalized. With the census dataset two features fit this description: '`capital-gain'` and `'capital-loss'`.
Run the code cell below to plot a histogram of these two features. Note the range of the values present and how they are distributed.
```
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
```
For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a <a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">logarithmic transformation</a> on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. Care must be taken when applying this transformation however: The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
Run the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
```
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
```
### Normalizing Numerical Features
In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Applying a scaling to the data does not change the shape of each feature's distribution (such as `'capital-gain'` or `'capital-loss'` above); however, normalization ensures that each feature is treated equally when applying supervised learners. Note that once scaling is applied, observing the data in its raw form will no longer have the same original meaning, as exampled below.
Run the code cell below to normalize each numerical feature. We will use [`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) for this.
```
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
```
### Data Preprocessing
From the table in **Exploring the Data** above, we can see there are several features for each record that are non-numeric. Typically, learning algorithms expect input to be numeric, which requires that non-numeric features (called *categorical variables*) be converted. One popular way to convert categorical variables is by using the **one-hot encoding** scheme. One-hot encoding creates a _"dummy"_ variable for each possible category of each non-numeric feature. For example, assume `someFeature` has three possible entries: `A`, `B`, or `C`. We then encode this feature into `someFeature_A`, `someFeature_B` and `someFeature_C`.
| | someFeature | | someFeature_A | someFeature_B | someFeature_C |
| :-: | :-: | | :-: | :-: | :-: |
| 0 | B | | 0 | 1 | 0 |
| 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 |
| 2 | A | | 1 | 0 | 0 |
Additionally, as with the non-numeric features, we need to convert the non-numeric target label, `'income'` to numerical values for the learning algorithm to work. Since there are only two possible categories for this label ("<=50K" and ">50K"), we can avoid using one-hot encoding and simply encode these two categories as `0` and `1`, respectively. In code cell below, you will need to implement the following:
- Use [`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies) to perform one-hot encoding on the `'features_log_minmax_transform'` data.
- Convert the target label `'income_raw'` to numerical entries.
- Set records with "<=50K" to `0` and records with ">50K" to `1`.
```
# One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# Encode the 'income_raw' data to numerical values
income = income_raw.replace(to_replace = {'<=50K': 0, '>50K': 1})
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
#print(encoded)
```
### Shuffle and Split Data
Now all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. As always, we will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.
Run the code cell below to perform this split.
```
# Import train_test_split
from sklearn.model_selection import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
```
----
## Evaluating Model Performance
In this section, we will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of our choice, and the fourth algorithm is known as a *naive predictor*.
### Metrics and the Naive Predictor
*CharityML*, equipped with their research, knows individuals that make more than \$50,000 are most likely to donate to their charity. Because of this, *CharityML* is particularly interested in predicting who makes more than \$50,000 accurately. It would seem that using **accuracy** as a metric for evaluating a particular model's performace would be appropriate. Additionally, identifying someone that *does not* make more than \$50,000 as someone who does would be detrimental to *CharityML*, since they are looking to find individuals willing to donate. Therefore, a model's ability to precisely predict those that make more than \$50,000 is *more important* than the model's ability to **recall** those individuals. We can use **F-beta score** as a metric that considers both precision and recall:
$$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$
In particular, when $\beta = 0.5$, more emphasis is placed on precision. This is called the **F$_{0.5}$ score** (or F-score for simplicity).
Looking at the distribution of classes (those who make at most 50,000, and those who make more), it's clear most individuals do not make more than 50,000. This can greatly affect accuracy, since we could simply say \"this person does not make more than 50,000\" and generally be right, without ever looking at the data! Making such a statement would be called naive, since we have not considered any information to substantiate the claim. It is always important to consider the naive prediction for your data, to help establish a benchmark for whether a model is performing well. That been said, using that prediction would be pointless: If we predicted all people made less than 50,000, CharityML would identify no one as donors.
#### Note: Recap of accuracy, precision, recall
**Accuracy** measures how often the classifier makes the correct prediction. Itâs the ratio of the number of correct predictions to the total number of predictions (the number of test data points).
**Precision** tells us what proportion of messages we classified as spam, actually were spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classificatio), in other words it is the ratio of
`[True Positives/(True Positives + False Positives)]`
**Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam.
It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of
`[True Positives/(True Positives + False Negatives)]`
For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average(harmonic mean) of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score(we take the harmonic mean as we are dealing with ratios).
### Naive Predictor Performace
If we chose a model that always predicted an individual made more than $50,000, what would that model's accuracy and F-score be on this dataset? You must use the code cell below and assign your results to `'accuracy'` and `'fscore'` to be used later.
**Please note** that the the purpose of generating a naive predictor is simply to show what a base model without any intelligence would look like. In the real world, ideally your base model would be either the results of a previous model or could be based on a research paper upon which you are looking to improve. When there is no benchmark model set, getting a result better than random choice is a place you could start from.
**Notes:**
* When we have a model that always predicts '1' (i.e. the individual makes more than 50k) then our model will have no True Negatives(TN) or False Negatives(FN) as we are not making any negative('0' value) predictions. Therefore our Accuracy in this case becomes the same as our Precision(True Positives/(True Positives + False Positives)) as every prediction that we have made with value '1' that should have '0' becomes a False Positive; therefore our denominator in this case is the total number of records we have in total.
* Our Recall score(True Positives/(True Positives + False Negatives)) in this setting becomes 1 as we have no False Negatives.
```
'''
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
encoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
'''
# Calculate accuracy, precision and recall
TP = np.sum(income)
FP = income.count() - TP
TN, FN = 0, 0
accuracy = (TP + TN) / (TP + TN + FP + FN)
recall = TP / (TP + FN)
precision = TP / (TP + FP)
# Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
beta = 0.5 # Define beta
fscore = (1 + beta**2) * (precision * recall) / (beta**2 * precision + recall)
# Print the results
print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))
```
### Supervised Learning Models
**The following are some of the supervised learning models that are currently available in** [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) **that you may choose from:**
- Gaussian Naive Bayes (GaussianNB)
- Decision Trees
- Ensemble Methods (Bagging, AdaBoost, Random Forest, Gradient Boosting)
- K-Nearest Neighbors (KNeighbors)
- Stochastic Gradient Descent Classifier (SGDC)
- Support Vector Machines (SVM)
- Logistic Regression
### Model Application
List three of the supervised learning models above that are appropriate for this problem that you will test on the census data. For each model chosen
- Describe one real-world application in industry where the model can be applied.
- What are the strengths of the model; when does it perform well?
- What are the weaknesses of the model; when does it perform poorly?
- What makes this model a good candidate for the problem, given what you know about the data?
### Decision Trees
**Describe one real-world application in industry where the model can be applied.**
Decision trees can be used for "Identifying Defective Products in the
Manufacturing Process". [1]
In this regard, decision trees are used as a classification algorithm that is trained on data with features of products that the company manufactures, as well as labels "Defective" and "Non-defective".
After training process, the model should be able to group products into "Defective" and "Non-defective" categories and predict whether a manufactured product is defective or not.
**What are the strengths of the model; when does it perform well?**
1. The data pre-processing step for decision trees requires less effort compared to other algorithms (e.g. no need to normalize/scale data or impute missing values). [2]
2. The way the algorithm works is very intuitive, and thus easier to understand and explain. In addition, they can be used as a white box model. [3]
**What are the weaknesses of the model; when does it perform poorly?**
1. Because decision trees are so simple there is often a need for more complex algorithms (e.g. Random Forest) to achieve a higher accuracy. [3]
2. Decision trees have the tendency to overfit the training set. [3]
3. Decision trees are unstable. The reproducibility of a decision tree model is unreliable since the structure is sensitive to even to small changes in the data. [3]
4. Decision trees can get complex and computationally expensive. [3]
**What makes this model a good candidate for the problem, given what you know about the data?**
I think this model is a good candidate in this situation because, as a white box, and because the features are well-defined, it might provide further insights which CharityML can rely on.
For example, CharityML identified that the most relevant parameter when it comes to determining donation likelihood is individual income.
A decision tree model may find highly accurate predictors of income that can simplify the current process and help draw more valuable conclusions such as this one.
Moreover, due to the algorithms simplicity, the charity members will have the capacity to intuitively understand its basic internal processes.
**References**
[[1]](http://www.kpubs.org/article/articleDownload.kpubs?downType=pdf&articleANo=E1CTBR_2017_v13n2_57)
[[2]](https://medium.com/@dhiraj8899/top-5-advantages-and-disadvantages-of-decision-tree-algorithm-428ebd199d9a)
[[3]](https://botbark.com/2019/12/19/top-6-advantages-and-disadvantages-of-decision-tree-algorithm/)
### Ensemble Methods (AdaBoost)
**Describe one real-world application in industry where the model can be applied.**
The AdaBoost algorithm can be applied for "Telecommunication Fraud Detection". [1]
The model is trained using features of past telecommunication messages (features) along with whether they ended up being fraudulent or not (labels).
Then, the AdaBoost model should be able to predict whether future telecommunication material is fraudulent or not.
**What are the strengths of the model; when does it perform well?**
1. High flexibility. Different classification algorithms (decision trees, SVMs, etc.) can be used as weak learners to finally constitute a strong learner (final model). [2]
2. High precision. Experiments have shown AdaBoost models to achieve relatively high precision when making predictions. [3]
3. Simple preprocessing. AdaBoost algorithms are not too demanding when it comes to preprocessed data, thus more time is saved during the pre-processing step. [4]
**What are the weaknesses of the model; when does it perform poorly?**
1. Sensitive to noise data and outliers. [4]
2. Requires quality data because the boosting technique learns progressively and is prone to error. [4]
3. Low Accuracy when Data is Imbalanced. [3]
4. Training is mildly computationally expensive, and thus it can be time-consuming. [3]
**What makes this model a good candidate for the problem, given what you know about the data?**
AdaBoost will be tried as a alternative to decision trees with stronger predictive capacity.
An AdaBoost model is a good candidate because it can provide improvements over decision trees to valuable metrics such as accuracy and precision.
Since it has been shown that this algorithm can achieve relatively high precision (which is what we are looking for in this problem), this aspect of it will also benefit us.
**References**
[[1]](https://download.atlantis-press.com/article/25896505.pdf)
[[2]](https://www.educba.com/adaboost-algorithm/)
[[3]](https://easyai.tech/en/ai-definition/adaboost/#:~:text=AdaBoost%20is%20adaptive%20in%20a,problems%20than%20other%20learning%20algorithms.)
[[4]](https://blog.paperspace.com/adaboost-optimizer/)
### Support Vector Machines
**Describe one real-world application in industry where the model can be applied.**
SVM's can be applied in bioinformatics. [1]
For example, an SVM model can be trained on data involving features of cancer tumours and then be able to identify whether a tumour is benign or malignant (labels).
**What are the strengths of the model; when does it perform well?**
1. Effective in high dimensional spaces (i.e. when there numerous features). [2]
2. Generally good algorithm. SVMâs are good when we have almost no information about the data. [3]
3. Relatively low risk of overfitting. This is due to its L2 Regularisation feature. [4]
4. High flexibility. Can handle linear & non-linear data due to variety added by different kernel functions. [3]
5. Stability. Since a small change to the data does not greatly affect the hyperplane. [4]
6. SVM is defined by a convex optimisation problem (i.e. no local minima) [4]
**What are the weaknesses of the model; when does it perform poorly?**
1. Training is very computationally expensive (high memory requirement) and thus it can be time-consuming, especially for large datasets [3]
2. Sensitive to noisy data, i.e. when the target classes are overlapping [2]
3. Hyperparameters can be difficult to tune. (Kernel, C parameter, gamma)
e.g. when choosing a Kernel, if you always go with high-dimensional ones you might generate too many support vectors and reduce training speed drastically. [4]
4. Difficult to understand and interpret, particularly with high dimensional data. Also, the final model is not easy to see, so we cannot do small calibrations based on business intuition. [3]
5. Requires feature scaling. [4]
**What makes this model a good candidate for the problem, given what you know about the data?**
Given what we know about the data, SVM would be a good choice since it can handle its multiple dimensions.
It will also add variety when compared to decision trees and AdaBoost, potentially yielding better results due to its vastly different mechanism.
**References**
[[1]](https://data-flair.training/blogs/applications-of-svm/)
[[2]](https://medium.com/@dhiraj8899/top-4-advantages-and-disadvantages-of-support-vector-machine-or-svm-a3c06a2b107)
[[3]](https://statinfer.com/204-6-8-svm-advantages-disadvantages-applications/)
[[4]](http://theprofessionalspoint.blogspot.com/2019/03/advantages-and-disadvantages-of-svm.html)
### Creating a Training and Predicting Pipeline
To properly evaluate the performance of each model you've chosen, it's important that you create a training and predicting pipeline that allows you to quickly and effectively train models using various sizes of training data and perform predictions on the testing data. Your implementation here will be used in the following section.
In the code block below, you will need to implement the following:
- Import `fbeta_score` and `accuracy_score` from [`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics).
- Fit the learner to the sampled training data and record the training time.
- Perform predictions on the test data `X_test`, and also on the first 300 training points `X_train[:300]`.
- Record the total prediction time.
- Calculate the accuracy score for both the training subset and testing set.
- Calculate the F-score for both the training subset and testing set.
- Make sure that you set the `beta` parameter!
```
# Import two metrics from sklearn - fbeta_score and accuracy_score
from sklearn.metrics import fbeta_score, accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner = learner.fit(X_train[:sample_size], y_train[:sample_size])
end = time() # Get end time
# Calculate the training time
results['train_time'] = end - start
# Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[:300])
end = time() # Get end time
# Calculate the total prediction time
results['pred_time'] = end - start
# Compute accuracy on the first 300 training samples
results['acc_train'] = accuracy_score(y_train[:300], predictions_train)
# Compute accuracy on test set using accuracy_score()
results['acc_test'] = accuracy_score(y_test, predictions_test)
# Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = fbeta_score(y_train[:300], predictions_train, beta=beta)
# Compute F-score on the test set which is y_test
results['f_test'] = fbeta_score(y_test, predictions_test, beta=beta)
# Success
print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size))
# Return the results
return results
```
### Initial Model Evaluation
In the code cell, you will need to implement the following:
- Import the three supervised learning models you've discussed in the previous section.
- Initialize the three models and store them in `'clf_A'`, `'clf_B'`, and `'clf_C'`.
- Use a `'random_state'` for each model you use, if provided.
- **Note:** Use the default settings for each model â you will tune one specific model in a later section.
- Calculate the number of records equal to 1%, 10%, and 100% of the training data.
- Store those values in `'samples_1'`, `'samples_10'`, and `'samples_100'` respectively.
**Note:** Depending on which algorithms you chose, the following implementation may take some time to run!
```
# Import the three supervised learning models from sklearn
# Import Algorithms
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
# Initialize the three models
clf_A = DecisionTreeClassifier(random_state=42)
clf_B = AdaBoostClassifier(random_state=42)
clf_C = SVC(random_state=42)
# Calculate the number of samples for 1%, 10%, and 100% of the training data
samples_100 = len(y_train)
samples_10 = int(0.1*len(y_train))
samples_1 = int(0.01*len(y_train))
# Collect results on the learners
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
```
----
## Improving Results
In this final section, you will choose from the three supervised learning models the *best* model to use on the student data. You will then perform a grid search optimization for the model over the entire training set (`X_train` and `y_train`) by tuning at least one parameter to improve upon the untuned model's F-score.
### Choosing the Best Model
Based on the evaluation you performed earlier, in one to two paragraphs, explain to *CharityML* which of the three models you believe to be most appropriate for the task of identifying individuals that make more than \$50,000.
##### AdaBoost
According to the analysis, the most appropriate model for identifying individuals who make more than \$50,000 is the AdaBoost model. This is because of the following reasons:
- AdaBoost yields the best accuracy and F-score on the testing data, meaning that to maximise the number of true potential donors, it is the ideal model to choose.
- The 2nd best competitor (namely, SVM) has a slightly higher tendency to overfit, and is significantly more time-consuming to train.
- AdaBoost is suitable for the given dataset because it yields high precision (i.e. few false positives, which is what we want), and will allow us to interpret the result for potential callibrations more so than an SVM model would.
### Describing the Model in Layman's Terms
In one to two paragraphs, explain to *CharityML*, in layman's terms, how the final model chosen is supposed to work. Be sure that you are describing the major qualities of the model, such as how the model is trained and how the model makes a prediction. Avoid using advanced mathematical jargon, such as describing equations.
##### Introduction
AdaBoost is a model that belongs to a group of models called "Ensemble Methods".
As the name suggests, the model trains weaker models on the data (also known as "weak learners"), and then combines them into a single, more powerful model (which we call a "strong learner").
##### Training the AdaBoost Model
In our case, we feed the model the training data from our dataset, and it fits a simple "weak learner" to the data. Then, it augments the errors made by the first learner, and it fits a second learner to correct its mistakes. Then, a 3rd weak learner does the same for the 2nd one, and this process repeats until enough learners have been trained.
Then, the algorithm assigns a weight to each weak learner based on its performance, and combines all the weak learners into a single **Strong Learner**.
When combining the weak learners, the ones with the stronger weights (i.e. the more successful ones) will get more of a say on how the final model is structured.
##### AdaBoost Predictions
After training the model, we will be able to feed to it unseen examples (i.e. new individuals), and the model will use its knowledge on the previous individuals to predict whether or not they make more than /$50,000 per year.
### Model Tuning
Fine tune the chosen model. Use grid search (`GridSearchCV`) with at least one important parameter tuned with at least 3 different values. You will need to use the entire training set for this. In the code cell below, you will need to implement the following:
- Import [`sklearn.grid_search.GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) and [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html).
- Initialize the classifier you've chosen and store it in `clf`.
- Set a `random_state` if one is available to the same state you set before.
- Create a dictionary of parameters you wish to tune for the chosen model.
- Example: `parameters = {'parameter' : [list of values]}`.
- **Note:** Avoid tuning the `max_features` parameter of your learner if that parameter is available!
- Use `make_scorer` to create an `fbeta_score` scoring object (with $\beta = 0.5$).
- Perform grid search on the classifier `clf` using the `'scorer'`, and store it in `grid_obj`.
- Fit the grid search object to the training data (`X_train`, `y_train`), and store it in `grid_fit`.
**Note:** Depending on the algorithm chosen and the parameter list, the following implementation may take some time to run!
```
# Import 'GridSearchCV', 'make_scorer', and any other necessary libraries
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
# Initialize the classifier
clf = AdaBoostClassifier(random_state=42)
# Create the parameters list you wish to tune, using a dictionary if needed.
parameters = {'n_estimators': [500, 1000, 1500, 2000], 'learning_rate': np.linspace(0.001, 1, 10)}
# Make an fbeta_score scoring object using make_scorer()
scorer = make_scorer(fbeta_score, beta=beta)
# Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV()
grid_obj = GridSearchCV(clf, parameters, scoring=scorer, n_jobs = -1)
# Fit the grid search object to the training data and find the optimal parameters using fit()
start = time()
grid_fit = grid_obj.fit(X_train, y_train)
end = time()
print('Time to tune: ', end - start)
# Get the estimator
best_clf = grid_fit.best_estimator_
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Check hyperparameters
print(clf)
print(best_clf)
# Report the before-and-afterscores
print("Unoptimized model\n------")
print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5)))
print("\nOptimized Model\n------")
print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
```
### Final Model Evaluation
* What is your optimized model's accuracy and F-score on the testing data?
* Are these scores better or worse than the unoptimized model?
* How do the results from your optimized model compare to the naive predictor benchmarks you found earlier in **Question 1**?_
#### Results:
| Metric | Unoptimized Model | Optimized Model |
| :------------: | :---------------: | :-------------: |
| Accuracy Score | 0.8576 | 0.8676 |
| F-score | 0.7246 | 0.7456 |
**Discussion**
My optimised model's accuracy is 86.71% while the F-score (beta = 0.5) is 0.7448.
These scores are slightly better than the optimised model's. Accuracy improved by ~1.2% and F-score by ~2.9%.
The scores are significantly better than the naive predictor's. Accuracy improved by ~350% (3.5+ times higher) and F-score by ~256% (2.5+ times higher).
----
## Feature Importance
An important task when performing supervised learning on a dataset like the census data we study here is determining which features provide the most predictive power. By focusing on the relationship between only a few crucial features and the target label we simplify our understanding of the phenomenon, which is most always a useful thing to do. In the case of this project, that means we wish to identify a small number of features that most strongly predict whether an individual makes at most or more than \$50,000.
Here, we choose a scikit-learn classifier (e.g., adaboost, random forests) that has a `feature_importance_` attribute, which is a function that ranks the importance of features according to the chosen classifier. In the next python cell, we fit this classifier to the training set and use this attribute to determine the top 5 most important features for the census dataset.
### Feature Relevance Observation
When **Exploring the Data**, it was shown there are thirteen available features for each individual on record in the census data. Of these thirteen records, which five features do you believe to be most important for prediction, and in what order would you rank them and why?
**Answer:**
1. **Occupation**. I would expect the job that a person has to be a good predictor of income.
2. **Hours per week**. The more hours you work, the more you earn.
3. **Education Number** Because of the positive correlation between education level and income.
4. **Age** Usually older people who've had longer careers have a higher income.
5. **Native Country** Because a US worker earns significantly more than, say, an Argentina one.
### Feature Importance
Choose a `scikit-learn` supervised learning algorithm that has a `feature_importance_` attribute availble for it. This attribute is a function that ranks the importance of each feature when making predictions based on the chosen algorithm.
In the code cell below, you will need to implement the following:
- Import a supervised learning model from sklearn if it is different from the three used earlier.
- Train the supervised model on the entire training set.
- Extract the feature importances using `'.feature_importances_'`.
```
# Import a supervised learning model that has 'feature_importances_'
from sklearn.ensemble import AdaBoostClassifier
# Train the supervised model on the training set using .fit(X_train, y_train)
model = AdaBoostClassifier().fit(X_train, y_train)
# Extract the feature importances using .feature_importances_
importances = model.feature_importances_
# Plot
vs.feature_plot(importances, X_train, y_train)
```
### Extracting Feature Importance
Observe the visualization created above which displays the five most relevant features for predicting if an individual makes at most or above \$50,000.
* How do these five features compare to the five features you discussed in **Question 6**?
* If you were close to the same answer, how does this visualization confirm your thoughts?
* If you were not close, why do you think these features are more relevant?
**Answer:**
* *How do these five features compare to the five features you discussed in **Question 6**?*
These five features are significantly different to what I predicted in question 6. While I did mention age, hours-per-week and education-num, I failed to mention two of the most significant features: capital-loss and capital-gain, which together amount to about 37% cumulative feature weight.
* *If you were close to the same answer, how does this visualization confirm your thoughts?*
This visualisation confirms that age plays a large role and that hours-per-week and education-num are among the most relevant features.
This is because of the direct and strong correlation between these variables and individual income.
* *If you were not close, why do you think these features are more relevant?*
I was genuinely surprised that occupation did not make it in the top 5. I suppose it was because the mentioned occupations just do not have a large discrepancy in income. Whereas capital-loss and capital-gain varies more among those individuals and more directly affects their income. Similarly, regarding native-country, I suppose most people were from the US or a similarly developed country and hence the feature didn't have great predictive power.
### Feature Selection
How does a model perform if we only use a subset of all the available features in the data? With less features required to train, the expectation is that training and prediction time is much lower â at the cost of performance metrics. From the visualization above, we see that the top five most important features contribute more than half of the importance of **all** features present in the data. This hints that we can attempt to *reduce the feature space* and simplify the information required for the model to learn. The code cell below will use the same optimized model you found earlier, and train it on the same training set *with only the top five important features*.
```
# Import functionality for cloning a model
from sklearn.base import clone
# Reduce the feature space
X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]
X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]
# Train on the "best" model found from grid search earlier
clf = (clone(best_clf)).fit(X_train_reduced, y_train)
# Make new predictions
reduced_predictions = clf.predict(X_test_reduced)
# Report scores from the final model using both versions of data
print("Final Model trained on full data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
print("\nFinal Model trained on reduced data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5)))
```
### Effects of Feature Selection
* How does the final model's F-score and accuracy score on the reduced data using only five features compare to those same scores when all features are used?
* If training time was a factor, would you consider using the reduced data as your training set?
**Answer:**
The model trained on reduced data gets an extra of ~2% of testing examples wrong, and its F-score is ~0.04 less.
If training time was a factor, I would probably still not use the reduced data as my training set.
However, if more training examples yielded a significant improvement, I would recommend using lower-dimension data so that we could accommodate more training examples.
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from sklearn.linear_model import SGDRegressor
#
# load the data
#
df = pd.read_csv('../Datasets/synth_temp.csv')
#
# slice 1902 and forward
#
df = df.loc[df.Year > 1901]
#
# roll up by year
#
df_group_year = df.groupby(['Year']).agg({'RgnAvTemp' : 'mean'})
#
# add the Year column so we can use that in a model
#
df_group_year['Year'] = df_group_year.index
df_group_year = df_group_year.rename(columns = {'RgnAvTemp' : 'AvTemp'})
#
# scale the data
#
X_min = df_group_year.Year.min()
X_range = df_group_year.Year.max() - df_group_year.Year.min()
Y_min = df_group_year.AvTemp.min()
Y_range = df_group_year.AvTemp.max() - df_group_year.AvTemp.min()
scale_X = (df_group_year.Year - X_min) / X_range
#
train_X = scale_X.ravel()
train_Y = ((df_group_year.AvTemp - Y_min) / Y_range).ravel()
#
# create the model object
#
np.random.seed(42)
model = SGDRegressor(
loss = 'squared_loss',
max_iter = 100,
learning_rate = 'constant',
eta0 = 0.0005,
tol = 0.00009,
penalty = 'none')
#
# fit the model
#
model.fit(train_X.reshape((-1, 1)), train_Y)
Beta0 = (Y_min + Y_range * model.intercept_[0] -
Y_range * model.coef_[0] * X_min / X_range)
Beta1 = Y_range * model.coef_[0] / X_range
print(Beta0)
print(Beta1)
#
# generate predictions
#
pred_X = df_group_year['Year']
pred_Y = model.predict(train_X.reshape((-1, 1)))
#
# calcualte the r squared value
#
r2 = r2_score(train_Y, pred_Y)
print('r squared = ', r2)
#
# scale predictions back to real values
#
pred_Y = (pred_Y * Y_range) + Y_min
fig = plt.figure(figsize=(10, 7))
ax = fig.add_axes([1, 1, 1, 1])
#
# Raw data
#
raw_plot_data = df
ax.scatter(raw_plot_data.Year,
raw_plot_data.RgnAvTemp,
label = 'Raw Data',
c = 'red',
s = 1.5)
#
# Annual averages
#
ax.scatter(df_group_year.Year,
df_group_year.AvTemp,
label = 'Annual average',
c = 'k',
s = 10)
#
# linear fit
#
ax.plot(pred_X, pred_Y,
c = "blue",
linestyle = '-.',
linewidth = 4,
label = 'linear fit')
#
# put the model on the plot
#
ax.text(1902, 20,
'Temp = ' +
str(round(Beta0, 2)) +
' + ' +
str(round(Beta1, 4)) +
' * Year',
fontsize = 16)
#
ax.set_title('Mean Air Temperature Measurements',
fontsize = 16)
#
# make the ticks include the first and last years
#
tick_years = [1902] + list(range(1910, 2011, 10))
ax.set_xlabel('Year',
fontsize = 14)
ax.set_ylabel('Temperature ($^\circ$C)',
fontsize = 14)
ax.set_ylim(15, 21)
ax.set_xticks(tick_years)
ax.tick_params(labelsize = 12)
ax.legend(fontsize = 12)
plt.show()
```
|
github_jupyter
|
[View in Colaboratory](https://colab.research.google.com/github/ArunkumarRamanan/Exercises-Machine-Learning-Crash-Course-Google-Developers/blob/master/validation.ipynb)
#### Copyright 2017 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Validation
**Learning Objectives:**
* Use multiple features, instead of a single feature, to further improve the effectiveness of a model
* Debug issues in model input data
* Use a test data set to check if a model is overfitting the validation data
As in the prior exercises, we're working with the [California housing data set](https://developers.google.com/machine-learning/crash-course/california-housing-data-description), to try and predict `median_house_value` at the city block level from 1990 census data.
## Setup
First off, let's load up and prepare our data. This time, we're going to work with multiple features, so we'll modularize the logic for preprocessing the features a bit:
```
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://dl.google.com/mlcc/mledu-datasets/california_housing_train.csv", sep=",")
# california_housing_dataframe = california_housing_dataframe.reindex(
# np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
```
For the **training set**, we'll choose the first 12000 examples, out of the total of 17000.
```
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_examples.describe()
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
training_targets.describe()
```
For the **validation set**, we'll choose the last 5000 examples, out of the total of 17000.
```
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_examples.describe()
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
validation_targets.describe()
```
## Task 1: Examine the Data
Okay, let's look at the data above. We have `9` input features that we can use.
Take a quick skim over the table of values. Everything look okay? See how many issues you can spot. Don't worry if you don't have a background in statistics; common sense will get you far.
After you've had a chance to look over the data yourself, check the solution for some additional thoughts on how to verify data.
### Solution
Click below for the solution.
Let's check our data against some baseline expectations:
* For some values, like `median_house_value`, we can check to see if these values fall within reasonable ranges (keeping in mind this was 1990 data â not today!).
* For other values, like `latitude` and `longitude`, we can do a quick check to see if these line up with expected values from a quick Google search.
If you look closely, you may see some oddities:
* `median_income` is on a scale from about 3 to 15. It's not at all clear what this scale refers toâlooks like maybe some log scale? It's not documented anywhere; all we can assume is that higher values correspond to higher income.
* The maximum `median_house_value` is 500,001. This looks like an artificial cap of some kind.
* Our `rooms_per_person` feature is generally on a sane scale, with a 75th percentile value of about 2. But there are some very large values, like 18 or 55, which may show some amount of corruption in the data.
We'll use these features as given for now. But hopefully these kinds of examples can help to build a little intuition about how to check data that comes to you from an unknown source.
## Task 2: Plot Latitude/Longitude vs. Median House Value
Let's take a close look at two features in particular: **`latitude`** and **`longitude`**. These are geographical coordinates of the city block in question.
This might make a nice visualization â let's plot `latitude` and `longitude`, and use color to show the `median_house_value`.
```
plt.figure(figsize=(13, 8))
ax = plt.subplot(1, 2, 1)
ax.set_title("Validation Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(validation_examples["longitude"],
validation_examples["latitude"],
cmap="coolwarm",
c=validation_targets["median_house_value"] / validation_targets["median_house_value"].max())
ax = plt.subplot(1,2,2)
ax.set_title("Training Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(training_examples["longitude"],
training_examples["latitude"],
cmap="coolwarm",
c=training_targets["median_house_value"] / training_targets["median_house_value"].max())
_ = plt.plot()
```
Wait a second...this should have given us a nice map of the state of California, with red showing up in expensive areas like the San Francisco and Los Angeles.
The training set sort of does, compared to a [real map](https://www.google.com/maps/place/California/@37.1870174,-123.7642688,6z/data=!3m1!4b1!4m2!3m1!1s0x808fb9fe5f285e3d:0x8b5109a227086f55), but the validation set clearly doesn't.
**Go back up and look at the data from Task 1 again.**
Do you see any other differences in the distributions of features or targets between the training and validation data?
### Solution
Click below for the solution.
Looking at the tables of summary stats above, it's easy to wonder how anyone would do a useful data check. What's the right 75<sup>th</sup> percentile value for total_rooms per city block?
The key thing to notice is that for any given feature or column, the distribution of values between the train and validation splits should be roughly equal.
The fact that this is not the case is a real worry, and shows that we likely have a fault in the way that our train and validation split was created.
## Task 3: Return to the Data Importing and Pre-Processing Code, and See if You Spot Any Bugs
If you do, go ahead and fix the bug. Don't spend more than a minute or two looking. If you can't find the bug, check the solution.
When you've found and fixed the issue, re-run `latitude` / `longitude` plotting cell above and confirm that our sanity checks look better.
By the way, there's an important lesson here.
**Debugging in ML is often *data debugging* rather than code debugging.**
If the data is wrong, even the most advanced ML code can't save things.
### Solution
Click below for the solution.
Take a look at how the data is randomized when it's read in.
If we don't randomize the data properly before creating training and validation splits, then we may be in trouble if the data is given to us in some sorted order, which appears to be the case here.
## Task 4: Train and Evaluate a Model
**Spend 5 minutes or so trying different hyperparameter settings. Try to get the best validation performance you can.**
Next, we'll train a linear regressor using all the features in the data set, and see how well we do.
Let's define the same input function we've used previously for loading the data into a TensorFlow model.
```
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of multiple features.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
```
Because we're now working with multiple input features, let's modularize our code for configuring feature columns into a separate function. (For now, this code is fairly simple, as all our features are numeric, but we'll build on this code as we use other types of features in future exercises.)
```
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
```
Next, go ahead and complete the `train_model()` code below to set up the input functions and calculate predictions.
**NOTE:** It's okay to reference the code from the previous exercises, but make sure to call `predict()` on the appropriate data sets.
Compare the losses on training data and validation data. With a single raw feature, our best root mean squared error (RMSE) was of about 180.
See how much better you can do now that we can use multiple features.
Check the data using some of the methods we've looked at before. These might include:
* Comparing distributions of predictions and actual target values
* Creating a scatter plot of predictions vs. target values
* Creating two scatter plots of validation data using `latitude` and `longitude`:
* One plot mapping color to actual target `median_house_value`
* A second plot mapping color to predicted `median_house_value` for side-by-side comparison.
```
def train_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model of multiple features.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# 1. Create input functions.
training_input_fn = # YOUR CODE HERE
predict_training_input_fn = # YOUR CODE HERE
predict_validation_input_fn = # YOUR CODE HERE
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period,
)
# 2. Take a break and compute predictions.
training_predictions = # YOUR CODE HERE
validation_predictions = # YOUR CODE HERE
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
linear_regressor = train_model(
# TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
learning_rate=0.00001,
steps=100,
batch_size=1,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
### Solution
Click below for a solution.
```
def train_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model of multiple features.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(
training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(
training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(
validation_examples, validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period,
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
linear_regressor = train_model(
learning_rate=0.00003,
steps=500,
batch_size=5,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Task 5: Evaluate on Test Data
**In the cell below, load in the test data set and evaluate your model on it.**
We've done a lot of iteration on our validation data. Let's make sure we haven't overfit to the pecularities of that particular sample.
Test data set is located [here](https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv).
How does your test performance compare to the validation performance? What does this say about the generalization performance of your model?
```
california_housing_test_data = pd.read_csv("https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv", sep=",")
#
# YOUR CODE HERE
#
```
### Solution
Click below for the solution.
```
california_housing_test_data = pd.read_csv("https://dl.google.com/mlcc/mledu-datasets/california_housing_test.csv", sep=",")
test_examples = preprocess_features(california_housing_test_data)
test_targets = preprocess_targets(california_housing_test_data)
predict_test_input_fn = lambda: my_input_fn(
test_examples,
test_targets["median_house_value"],
num_epochs=1,
shuffle=False)
test_predictions = linear_regressor.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['predictions'][0] for item in test_predictions])
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(test_predictions, test_targets))
print("Final RMSE (on test data): %0.2f" % root_mean_squared_error)
```
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from hawkes import hawkes, sampleHawkes, plotHawkes, iterative_sampling, extract_samples, sample_counterfactual_superposition, check_monotonicity_hawkes
sys.path.append(os.path.abspath('../'))
from sampling_utils import thinning_T
```
This notebook contains an example of running algorithm 3 (in the paper) for both cases where we have (1) both observed and un-observed events, and (2) the case that we have only the observed events.
# 1. Sampling From Lambda_max
```
# required parameters
mu0 = 1
alpha = 1
w = 1
lambda_max = 3
T = 5
def constant1(x): return mu0
# sampling from hawkes using the superposition property
initial_sample, indicators = thinning_T(0, constant1, lambda_max, T)
events = {initial_sample[i]: indicators[i] for i in range(len(initial_sample))}
all_events = {}
all_events[mu0] = events
iterative_sampling(all_events, events, mu0, alpha, w, lambda_max, T)
# plotting hawkes
sampled_events = list(all_events.keys())[1:]
sampled_events.sort()
sampled_events = np.array(sampled_events)
sampled_lambdas = hawkes(sampled_events, mu0, alpha, w)
plt.figure(figsize=(10, 8))
tvec, l_t = plotHawkes(sampled_events, constant1, alpha, w, T, 10000.0, label= 'intensity', color = 'r+', legend= 'accepted')
plt.plot(sampled_events, sampled_lambdas, 'r^')
plt.legend()
plt.show()
# extract all sampled events from all_events dictionary.
all_samples, all_lambdas = extract_samples(all_events, sampled_events, mu0, alpha, w)
# plots all events, both accepted and rejected with their intensities.
plt.figure(figsize=(10, 8))
plt.plot(tvec, l_t, label = 'Original Intensity')
plt.plot(all_samples, all_lambdas, 'oy', label = 'events')
plt.plot(sampled_events,sampled_lambdas, 'r+', label = 'accepted')
plt.xlabel('time')
plt.ylabel('intensity')
plt.legend()
# sampling from the counterfactual intensity.
new_mu0 = 3
new_alpha = 0.1
real_counterfactuals = sample_counterfactual_superposition(mu0, alpha, new_mu0, new_alpha, all_events, lambda_max, w, T)
```
**The red +s are the counterfactuals.**
```
plt.figure(figsize=(15, 6))
plotHawkes(np.array(real_counterfactuals), lambda t: new_mu0, new_alpha, w, T, 10000.0, label= 'counterfactul intensity', color = 'g+', legend= 'accepted in counterfactual')
plt.plot(tvec, l_t, label = 'Original Intensity')
plt.plot(all_samples, all_lambdas, 'oy', label = 'events')
plt.plot(sampled_events,sampled_lambdas, 'r^')
plt.plot(sampled_events,np.full(len(sampled_events), -0.1), 'r+', label = 'originally accepted')
for xc in real_counterfactuals:
plt.axvline(x=xc, color = 'k', ls = '--', alpha = 0.2)
plt.xlabel('time')
plt.ylabel('intensity')
plt.legend()
```
In the following cell, we will check monotonicity property. Note that this property should hold in **each exponential created by superposition** (please have a look at `check_monotonicity_hawkes` in `hawkes.py` for more details.).
```
check_monotonicity_hawkes(mu0, alpha, new_mu0, new_alpha, all_events, sampled_events, real_counterfactuals, w)
```
# 2. Real-World Scenario
```
# First, we sample from the hawkes process using the Ogata's algorithm (or any other sampling method), but only store the accepted events.
plt.figure(figsize=(10, 8))
mu0 = 1
alpha = 1
w = 1
lambda_max = 3
T = 5
tev, tend, lambdas_original = sampleHawkes(mu0, alpha, w, T, Nev= 100)
tvec, l_t = plotHawkes(tev, lambda t: mu0, alpha, w, T, 10000.0, label = 'Original Intensity', color= 'r+', legend= 'samples')
plt.plot(tev, lambdas_original, 'r^')
plt.legend()
# this list stores functions corresponding to each exponential.
exponentials = []
all_events = {}
exponentials.append(lambda t: mu0)
all_events[mu0] = {}
for i in range(len(tev)):
exponentials.append(lambda t: alpha * np.exp(-w * (t - tev[i])))
all_events[tev[i]] = {}
# we should assign each accepted event to some exponential. (IMPORTANT)
for i in range(len(tev)):
if i == 0:
all_events[mu0][tev[i]] = True
else:
probabilities = [exponentials[j](tev[i]) for j in range(0, i + 1)]
probabilities = [float(i)/sum(probabilities) for i in probabilities]
a = np.random.choice(i + 1, 1, p = probabilities)
if a == 0:
all_events[mu0][tev[i]] = True
else:
all_events[tev[a[0] - 1]][tev[i]] = True
# using the superposition to calculate the difference between lambda_max and the exponentials, and sample from it.
differences = []
differences.append(lambda t: lambda_max - mu0)
for k in range(len(tev)):
f = lambda t: lambda_max - alpha * np.exp(-w * (t - tev[k]))
differences.append(f)
for i in range(len(differences)):
if i == 0:
rejceted, indicators = thinning_T(0, differences[i], lambda_max, T)
else:
rejceted, indicators = thinning_T(tev[i - 1], differences[i], lambda_max, T)
rejceted = {rejceted[j]: False for j in range(len(rejceted)) if indicators[j] == True}
if i == 0:
all_events[mu0].update(rejceted)
all_events[mu0] = {k:v for k,v in sorted(all_events[mu0].items())}
else:
all_events[tev[i - 1]].update(rejceted)
all_events[tev[i - 1]] = {k:v for k,v in sorted(all_events[tev[i - 1]].items())}
all_samples, all_lambdas = extract_samples(all_events, tev, mu0, alpha, w)
plt.figure(figsize=(10, 8))
plt.plot(tvec, l_t, label = 'Original Intensity')
plt.plot(all_samples, all_lambdas, 'oy', label = 'events')
plt.plot(tev,lambdas_original, 'r+', label = 'accepted')
plt.xlabel('time')
plt.ylabel('intensity')
plt.legend()
new_mu0 = 0.1
new_alpha = 1.7
real_counterfactuals = sample_counterfactual_superposition(mu0, alpha, new_mu0, new_alpha, all_events, lambda_max, w, T)
```
**The red +s are the counterfactuals.**
```
plt.figure(figsize=(15, 8))
plotHawkes(np.array(real_counterfactuals), lambda t: new_mu0, new_alpha, w, T, 10000.0, label= 'counterfactual intensity', color= 'g+', legend= 'accepted in counterfactual')
plt.plot(tvec, l_t, label = 'Original Intensity')
plt.plot(all_samples, all_lambdas, 'oy', label = 'events')
plt.plot(tev,lambdas_original, 'r^')
plt.plot(tev,np.full(len(tev), -0.1), 'r+', label = 'originally accepted')
for xc in real_counterfactuals:
plt.axvline(x=xc, color = 'k', ls = '--', alpha = 0.2)
plt.xlabel('time')
plt.ylabel('intensity')
plt.legend()
check_monotonicity_hawkes(mu0, alpha, new_mu0, new_alpha, all_events, tev, real_counterfactuals, w)
```
|
github_jupyter
|
# Two Layer QG Model Example #
Here is a quick overview of how to use the two-layer model. See the
:py:class:`pyqg.QGModel` api documentation for further details.
First import numpy, matplotlib, and pyqg:
```
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
import pyqg
```
## Initialize and Run the Model ##
Here we set up a model which will run for 10 years and start averaging
after 5 years. There are lots of parameters that can be specified as
keyword arguments but we are just using the defaults.
```
year = 24*60*60*360.
m = pyqg.QGModel(tmax=10*year, twrite=10000, tavestart=5*year)
m.run()
```
## Convert Model Outpt to an xarray Dataset ##
Model variables, coordinates, attributes, and metadata can be stored conveniently as an xarray Dataset. (Notice that this feature requires xarray to be installed on your machine. See here for installation instructions: http://xarray.pydata.org/en/stable/getting-started-guide/installing.html#instructions)
```
m_ds = m.to_dataset()
m_ds
```
## Visualize Output ##
Let's assign a new data variable, ``q_upper``, as the **upper layer PV anomaly**. We access the PV values in the Dataset as ``m_ds.q``, which has two levels and a corresponding background PV gradient, ``m_ds.Qy``.
```
m_ds['q_upper'] = m_ds.q.isel(lev=0, time=0) + m_ds.Qy.isel(lev=0)*m_ds.y
m_ds['q_upper'].attrs = {'long_name': 'upper layer PV anomaly'}
m_ds.q_upper.plot.contourf(levels=18, cmap='RdBu_r');
```
## Plot Diagnostics ##
The model automatically accumulates averages of certain diagnostics. We can
find out what diagnostics are available by calling
```
m.describe_diagnostics()
```
To look at the wavenumber energy spectrum, we plot the `KEspec` diagnostic.
(Note that summing along the l-axis, as in this example, does not give us
a true *isotropic* wavenumber spectrum.)
```
kespec_upper = m_ds.KEspec.isel(lev=0).sum('l')
kespec_lower = m_ds.KEspec.isel(lev=1).sum('l')
kespec_upper.plot.line( 'b.-', x='k', xscale='log', yscale='log', label='upper layer')
kespec_lower.plot.line( 'g.-', x='k', xscale='log', yscale='log', label='lower layer')
plt.legend(loc='lower left')
plt.ylim([1e-9, 1e-3]);
plt.xlabel(r'k (m$^{-1}$)'); plt.grid()
plt.title('Kinetic Energy Spectrum');
```
We can also plot the spectral fluxes of energy.
```
ebud = [ m_ds.APEgenspec.sum('l'),
m_ds.APEflux.sum('l'),
m_ds.KEflux.sum('l'),
-m_ds.attrs['pyqg:rek']*m.del2*m_ds.KEspec.isel(lev=1).sum('l')*m.M**2 ]
ebud.append(-np.vstack(ebud).sum(axis=0))
ebud_labels = ['APE gen','APE flux','KE flux','Diss.','Resid.']
[plt.semilogx(m_ds.k, term) for term in ebud]
plt.legend(ebud_labels, loc='upper right')
plt.xlim([m_ds.k.min(), m_ds.k.max()])
plt.xlabel(r'k (m$^{-1}$)'); plt.grid()
plt.title('Spectral Energy Transfers');
```
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
```
This notebook provides a basic example of using the `blg_strain` package to calculate the magnetoelectric susceptibility for strained bilayer graphene.
# Strained Lattice
```
from blg_strain.lattice import StrainedLattice
sl = StrainedLattice(eps=0.01, theta=0)
sl.calculate()
```
Below is a plot of the Brillouin zone (black hexagon) and location of the K/K' points (red markers), which do not coincide with the high-symmetry points of the Brillouin zone.
```
fig = plt.figure()
axes = [fig.add_subplot(x) for x in (121, 222, 224)]
for ax in axes:
sl.plot_bz(ax)
ax.set_aspect(1)
w = 0.02
axes[1].set_xlim(sl.K[0] - w, sl.K[0] + w)
axes[1].set_ylim(sl.K[1] - w, sl.K[1] + w)
axes[2].set_xlim(sl.Kp[0] - w, sl.Kp[0] + w)
axes[2].set_ylim(sl.Kp[1] - w, sl.Kp[1] + w)
```
# Band Structure
```
from blg_strain.bands import BandStructure
bs = BandStructure(sl=sl, window=0.1, Delta=0.01)
bs.calculate(Nkx=200, Nky=200)
```
Below are plots of the energy, one component of the wavefunction, Berry curvature, and orbital magnetic moment in regions of momentum space surrounding the K and K' valleys.
```
fig, axes = plt.subplots(2, 4, figsize=(14, 7))
pcolormesh_kwargs = dict(cmap='cividis', shading='gouraud')
contour_kwargs = dict(colors='k', linewidths=0.5, linestyles='solid')
n = 2 # Band index
m = 1 # component of wavefunction
for i, (axK, axKp, A) in enumerate(zip(axes[0,:],
axes[1,:],
[bs.E[n], bs.Psi[n,m,:,:].real, bs.Omega[n], bs.Mu[n]])):
# K
axK.pcolormesh(bs.Kxa, bs.Kya, A, **pcolormesh_kwargs)
axK.contour(bs.Kxa, bs.Kya, A, **contour_kwargs)
# K'
if i >= 2: # Omega and Mu
A = -A
axKp.pcolormesh(-bs.Kxa, -bs.Kya, A, **pcolormesh_kwargs)
axKp.contour(-bs.Kxa, -bs.Kya, A, **contour_kwargs)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect(1)
axes[0,0].set_title('Conduction band energy')
axes[0,1].set_title(f'Component {m} of wavefunction')
axes[0,2].set_title('Berry curvature')
axes[0,3].set_title('Orbital magnetic moment')
axes[0,0].set_ylabel('$K$', rotation=0, labelpad=30, fontsize=16, va='center')
axes[1,0].set_ylabel('$K\'$', rotation=0, labelpad=30, fontsize=16, va='center')
```
# Filled bands
```
from blg_strain.bands import FilledBands
fb = FilledBands(bs=bs, EF=0.01)
fb.calculate(Nkx=500, Nky=500)
```
Below is a plot of the $x$ component of magnetoelectric susceptibility as a function of doping (carrier density) for the band structure illustrated above.
```
EFs = np.linspace(0, 0.015, 100)
ns = np.empty_like(EFs)
alphas = np.empty_like(EFs)
for i, EF in enumerate(EFs):
fb = FilledBands(bs=bs, EF=EF)
fb.calculate(500, 500)
ns[i] = fb.n
alphas[i] = fb.alpha[0]
fig, ax = plt.subplots()
ax.plot(ns/1e16, alphas)
ax.set_xlabel('Carrier density ($10^{12}$ cm$^{-2}$)')
ax.set_ylabel('Magnetoelectric coefficient (a.u.)')
```
# Saving and Loading
```
base_path = 'example'
sl.save(base_path)
bs.save()
fb.save()
sl_path = '/'.join((base_path, 'StrainedLattice_eps0.010_theta0.000_Run0'))
sl = StrainedLattice.load(sl_path + '.h5')
bs_path = '/'.join((sl_path, 'BandStructure_Nkx200_Nky200_Delta10.000'))
bs = BandStructure.load(bs_path + '.h5')
fb_path = '/'.join((bs_path, 'FilledBands_Nkx500_Nky500_EF15.000'))
fb = FilledBands.load(fb_path + '.h5')
```
## Create and load "summary" file
```
from blg_strain.utils.saver import load
Deltas, EFs, ns, Ds, alphas = load(sl_path)
Deltas, EFs, ns, Ds, alphas
```
|
github_jupyter
|
# Transfer Learning
Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture.
<img src="assets/cnnarchitecture.jpg" width=700px>
VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.
You can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf).
## Pretrained VGGNet
We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This code is already included in 'tensorflow_vgg' directory, sdo you don't have to clone it.
This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. **You'll need to clone the repo into the folder containing this notebook.** Then download the parameter file using the next cell.
```
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
```
## Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining).
```
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
```
## ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py)):
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use
```
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
```
This creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer,
```
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
```
```
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
```
Below I'm running images through the VGG network in batches.
> **Exercise:** Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).
```
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
feed_dict = {input_: images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
```
## Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.
```
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
```
### Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
> **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels.
```
labels[0]
codes.shape
from sklearn import preprocessing
unique_labels = list(set(labels))
lb = preprocessing.LabelBinarizer()
labels_vecs = lb.fit(unique_labels).transform(labels)
```
Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn.
You can create the splitter like so:
```
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
```
Then split the data with
```
splitter = ss.split(x, y)
```
`ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split).
> **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.
```
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(ss.split(codes, labels_vecs))
half_val_len = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
```
If you did it right, you should see these sizes for the training sets:
```
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
```
### Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
> **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.
```
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.float32, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
dense1 = tf.layers.dense(inputs_, 256, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(dense1, 0.2)
dense2 = tf.layers.dense(dropout1, 64, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(dense2, 0.2)
logits = tf.layers.dense(dropout2, len(unique_labels), activation=None)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_, logits=logits))
optimizer = tf.train.AdamOptimizer(0.005).minimize(cost)
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
```
### Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.
```
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
```
### Training
Here, we'll train the network.
> **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the `get_batches` function I wrote before to get your batches like `for x, y in get_batches(train_x, train_y)`. Or write your own!
```
epochs = 10
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
feed = {inputs_: x,
labels_: y}
loss, _ = sess.run([cost, optimizer], feed_dict=feed)
print("Epoch: {}/{}".format(e+1, epochs),
"Iteration: {}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
if iteration % 5 == 0:
feed = {inputs_: val_x,
labels_: val_y}
val_acc = sess.run(accuracy, feed_dict=feed)
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Validation Acc: {:.4f}".format(val_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
```
### Testing
Below you see the test accuracy. You can also see the predictions returned for images.
```
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
```
Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
```
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
```
|
github_jupyter
|
```
%matplotlib inline
import sys
import os
import json
from glob import glob
from collections import defaultdict, OrderedDict
import dinopy
import yaml
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import seaborn
import numpy
import pandas as pd
import networkx
from scipy.special import binom
from scipy import stats
from IPython.display import Image, display
from phasm.io import gfa
from phasm.alignments import AlignmentType
from phasm.assembly_graph import AssemblyGraph
from phasm.bubbles import find_superbubbles
BASE_DIR = os.path.realpath(os.path.join(os.getcwd(), '..'))
with open(os.path.join(BASE_DIR, "config.yml")) as f:
config = yaml.load(f)
seaborn.set_style('whitegrid')
spanning_read_stats = []
candidate_prob_stats = []
bubble_map = defaultdict(dict)
for assembly, asm_config in config['assemblies'].items():
parts = assembly.split('-')
ploidy = int(parts[0].replace("ploidy", ""))
coverage = int(parts[1].replace("x", ""))
asm_folder = os.path.join(BASE_DIR, "assemblies", assembly)
for debugdata in glob("{}/04_phase/component[0-9].bubblechain[0-9]-debugdata.json".format(asm_folder)):
print(debugdata)
graphml = debugdata.replace("04_phase", "03_chain").replace("-debugdata.json", ".graphml")
g = AssemblyGraph(networkx.read_graphml(graphml))
curr_bubble = None
bubble_num = 0
num_candidates = -1
with open(debugdata) as f:
for line in f:
data = json.loads(line)
if data['type'] == "new_bubble":
curr_bubble = data
bubble_map[ploidy, coverage][(data['entrance'], data['exit'])] = data
if data['start_of_block'] == True:
bubble_num = 1
else:
dist_between_bubbles = (
min(e[2] for e in g.out_edges_iter(data['entrance'], data=g.edge_len))
)
spanning_read_stats.append({
'dist': dist_between_bubbles,
'spanning_reads': len(data['rel_read_info']),
'ploidy': ploidy
})
bubble_num += 1
if data['type'] == "candidate_set":
p_sr = data['p_sr']
prior = data['prior']
prob = 10**(p_sr + prior)
entrance = curr_bubble['entrance']
exit = curr_bubble['exit']
candidate_prob_stats.append({
'bubble': (entrance, exit),
'bubble_num': bubble_num,
'candidate_prob': prob,
'ploidy': ploidy,
'coverage': coverage
})
srdf = pd.DataFrame(spanning_read_stats)
srdf['spanning_reads_norm'] = srdf['spanning_reads'] / srdf['ploidy']
g = seaborn.JointGrid(x="dist", y="spanning_reads_norm", data=srdf, size=7)
x_bin_size = 2500
g.ax_marg_x.hist(srdf['dist'], alpha=0.6, bins=numpy.arange(0, srdf['dist'].max()+x_bin_size, x_bin_size))
y_bin_size = 10
g.ax_marg_y.hist(srdf['spanning_reads_norm'], alpha=0.6, orientation="horizontal",
bins=numpy.arange(0, srdf['spanning_reads_norm'].max()+y_bin_size, y_bin_size))
g.plot_joint(seaborn.regplot)
g.annotate(stats.pearsonr)
seaborn.plt.suptitle("Number of spanning reads against the distance between two bubbles,\n normalised for ploidy")
plt.ylim(ymin=0)
plt.xlabel("Distance between two bubbles [bases]")
plt.ylabel("Number of spanning reads")
plt.subplots_adjust(top=0.9)
plt.savefig(os.path.join(BASE_DIR, 'figures', 'spanning-reads.png'), transparent=True, dpi=256)
candidate_df = pd.DataFrame(candidate_prob_stats)
candidate_df.set_index('bubble')
plt.figure()
seaborn.distplot(candidate_df['candidate_prob'], kde=False, hist_kws={"alpha": 0.8})
plt.title("Distribution of candidate extension relative likelihoods")
plt.xlabel("Relative likelihood of an extension")
plt.ylabel("Count")
# plt.xlim(xmax=1.0)
plt.axvline(1e-3, linestyle='--', color='black')
plt.savefig(os.path.join(BASE_DIR, 'figures', 'rel-likelihood-abs.png'), transparent=True, dpi=256)
grouped = candidate_df.groupby(['bubble', 'ploidy'])['candidate_prob']
max_probs = grouped.max()
for bubble, ploidy in grouped.groups.keys():
candidate_df.loc[grouped.groups[bubble, ploidy], 'max_prob'] = max_probs[bubble, ploidy]
candidate_df['relative_prob'] = candidate_df['candidate_prob'] / candidate_df['max_prob']
candidate_df
plt.figure()
seaborn.distplot(candidate_df[candidate_df['relative_prob'] < 1.0]['relative_prob'], kde=False, hist_kws={"alpha": 0.8})
plt.title("Distribution of relative probabilities for each candidate extension\n"
"at each superbubble")
plt.xlabel(r"$RL[E|H]\ /\ \omega$")
plt.ylabel("Count")
plt.savefig(os.path.join(BASE_DIR, "figures", "rl-relative-dist.png"), transparent=True, dpi=256)
c1, c2, c3, c4, c5 = seaborn.color_palette(n_colors=5)
pruning_stats = []
for assembly, asm_config in config['assemblies'].items():
parts = assembly.split('-')
ploidy = int(parts[0].replace("ploidy", ""))
coverage = int(parts[1].replace("x", ""))
if coverage != 60:
continue
asm_folder = os.path.join(BASE_DIR, "assemblies", assembly)
for chain_num, graphml in enumerate(glob("{}/03_chain/component[0-9].bubblechain[0-9].graphml".format(asm_folder))):
print(graphml)
# Calculate effect of pruning
g = AssemblyGraph(networkx.read_graphml(graphml))
bubbles = OrderedDict(find_superbubbles(g, report_nested=False))
bubble_num = 0
for i, bubble in enumerate(reversed(bubbles.items())):
entrance, exit = bubble
num_paths = len(list(networkx.all_simple_paths(g, entrance, exit)))
if not bubble in bubble_map[ploidy, coverage]:
continue
bubble_data = bubble_map[ploidy, coverage][bubble]
if bubble_data['start_of_block']:
bubble_num = 1
else:
bubble_num += 1
kappa = 0.0
pruned = 0
num_candidates_left = sys.maxsize
while num_candidates_left > 500 and kappa < 1.0:
kappa += 0.1
num_candidates_left = len(
candidate_df.query('(bubble == @bubble) and (ploidy == @ploidy) and (relative_prob >= @kappa)')
)
pruned = len(
candidate_df.query('(bubble == @bubble) and (ploidy == @ploidy) and (relative_prob < @kappa)')
)
pruning_stats.append({
'ploidy': ploidy,
'coverage': coverage,
'bubble_num': bubble_num,
'pruned': pruned,
'kappa': kappa
})
pruning_df = pd.DataFrame(pruning_stats)
agg_df = pd.DataFrame(pruning_df.groupby(['bubble_num', 'kappa']).size().rename('counts'))
agg_df.reset_index(level=agg_df.index.names, inplace=True)
agg_df = agg_df.query('kappa <= 1.0')
sum_df = pd.DataFrame(agg_df.groupby('bubble_num')['counts'].sum()).reset_index()
sum_df
for i in sum_df['bubble_num'].unique():
agg_df.loc[agg_df['bubble_num'] == i, 'total'] = int(sum_df['counts'].loc[sum_df['bubble_num'] == i].values[0])
agg_df['fraction'] = agg_df['counts'] / agg_df['total']
agg_df
plt.figure()
g = seaborn.factorplot(x="kappa", y="fraction", col="bubble_num",
kind="bar", col_wrap=3, sharex=False, color=c1,
data=agg_df.query('(bubble_num < 7) and (kappa <= 1.0)'))
seaborn.plt.suptitle('The maximum pruning factor $\kappa$ at different stages of the phasing process')
plt.subplots_adjust(top=0.9, hspace=0.3)
for i, ax in enumerate(g.axes):
ax.set_xlabel("$\kappa$")
if i % 3 == 0:
ax.set_ylabel("Fraction")
ax.set_title("Superbubble {}".format(i+1))
plt.savefig(os.path.join(BASE_DIR, 'figures', 'pruning.png'), transparent=True, dpi=256)
```
|
github_jupyter
|
# LAB 4c: Create Keras Wide and Deep model.
**Learning Objectives**
1. Set CSV Columns, label column, and column defaults
1. Make dataset of features and label from CSV files
1. Create input layers for raw features
1. Create feature columns for inputs
1. Create wide layer, deep dense hidden layers, and output layer
1. Create custom evaluation metric
1. Build wide and deep model tying all of the pieces together
1. Train and evaluate
## Introduction
In this notebook, we'll be using Keras to create a wide and deep model to predict the weight of a baby before it is born.
We'll start by defining the CSV column names, label column, and column defaults for our data inputs. Then, we'll construct a tf.data Dataset of features and the label from the CSV files and create inputs layers for the raw features. Next, we'll set up feature columns for the model inputs and build a wide and deep neural network in Keras. We'll create a custom evaluation metric and build our wide and deep model. Finally, we'll train and evaluate our model.
Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/4c_keras_wide_and_deep_babyweight.ipynb).
## Load necessary libraries
```
import datetime
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
print(tf.__version__)
```
## Verify CSV files exist
In the seventh lab of this series [4a_sample_babyweight](../solutions/4a_sample_babyweight.ipynb), we sampled from BigQuery our train, eval, and test CSV files. Verify that they exist, otherwise go back to that lab and create them.
```
%%bash
ls *.csv
%%bash
head -5 *.csv
```
## Create Keras model
### Lab Task #1: Set CSV Columns, label column, and column defaults.
Now that we have verified that our CSV files exist, we need to set a few things that we will be using in our input function.
* `CSV_COLUMNS` are going to be our header names of our columns. Make sure that they are in the same order as in the CSV files
* `LABEL_COLUMN` is the header name of the column that is our label. We will need to know this to pop it from our features dictionary.
* `DEFAULTS` is a list with the same length as `CSV_COLUMNS`, i.e. there is a default for each column in our CSVs. Each element is a list itself with the default value for that CSV column.
```
# Determine CSV, label, and key columns
# TODO: Create list of string column headers, make sure order matches.
CSV_COLUMNS = [""]
# TODO: Add string name for label column
LABEL_COLUMN = ""
# Set default values for each CSV column as a list of lists.
# Treat is_male and plurality as strings.
DEFAULTS = []
```
### Lab Task #2: Make dataset of features and label from CSV files.
Next, we will write an input_fn to read the data. Since we are reading from CSV files we can save ourself from trying to recreate the wheel and can use `tf.data.experimental.make_csv_dataset`. This will create a CSV dataset object. However we will need to divide the columns up into features and a label. We can do this by applying the map method to our dataset and popping our label column off of our dictionary of feature tensors.
```
def features_and_labels(row_data):
"""Splits features and labels from feature dictionary.
Args:
row_data: Dictionary of CSV column names and tensor values.
Returns:
Dictionary of feature tensors and label tensor.
"""
label = row_data.pop(LABEL_COLUMN)
return row_data, label # features, label
def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
"""Loads dataset using the tf.data API from CSV files.
Args:
pattern: str, file pattern to glob into list of files.
batch_size: int, the number of examples per batch.
mode: tf.estimator.ModeKeys to determine if training or evaluating.
Returns:
`Dataset` object.
"""
# TODO: Make a CSV dataset
dataset = tf.data.experimental.make_csv_dataset()
# TODO: Map dataset to features and label
dataset = dataset.map() # features, label
# Shuffle and repeat for training
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=1000).repeat()
# Take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(buffer_size=1)
return dataset
```
### Lab Task #3: Create input layers for raw features.
We'll need to get the data read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers [(tf.Keras.layers.Input)](https://www.tensorflow.org/api_docs/python/tf/keras/Input) by defining:
* shape: A shape tuple (integers), not including the batch size. For instance, shape=(32,) indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known.
* name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.
* dtype: The data type expected by the input, as a string (float32, float64, int32...)
```
def create_input_layers():
"""Creates dictionary of input layers for each feature.
Returns:
Dictionary of `tf.Keras.layers.Input` layers for each feature.
"""
# TODO: Create dictionary of tf.keras.layers.Input for each dense feature
deep_inputs = {}
# TODO: Create dictionary of tf.keras.layers.Input for each sparse feature
wide_inputs = {}
inputs = {**wide_inputs, **deep_inputs}
return inputs
```
### Lab Task #4: Create feature columns for inputs.
Next, define the feature columns. `mother_age` and `gestation_weeks` should be numeric. The others, `is_male` and `plurality`, should be categorical. Remember, only dense feature columns can be inputs to a DNN.
```
def create_feature_columns(nembeds):
"""Creates wide and deep dictionaries of feature columns from inputs.
Args:
nembeds: int, number of dimensions to embed categorical column down to.
Returns:
Wide and deep dictionaries of feature columns.
"""
# TODO: Create deep feature columns for numeric features
deep_fc = {}
# TODO: Create wide feature columns for categorical features
wide_fc = {}
# TODO: Bucketize the float fields. This makes them wide
# TODO: Cross all the wide cols, have to do the crossing before we one-hot
# TODO: Embed cross and add to deep feature columns
return wide_fc, deep_fc
```
### Lab Task #5: Create wide and deep model and output layer.
So we've figured out how to get our inputs ready for machine learning but now we need to connect them to our desired output. Our model architecture is what links the two together. We need to create a wide and deep model now. The wide side will just be a linear regression or dense layer. For the deep side, let's create some hidden dense layers. All of this will end with a single dense output layer. This is regression so make sure the output layer activation is correct and that the shape is right.
```
def get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units):
"""Creates model architecture and returns outputs.
Args:
wide_inputs: Dense tensor used as inputs to wide side of model.
deep_inputs: Dense tensor used as inputs to deep side of model.
dnn_hidden_units: List of integers where length is number of hidden
layers and ith element is the number of neurons at ith layer.
Returns:
Dense tensor output from the model.
"""
# Hidden layers for the deep side
layers = [int(x) for x in dnn_hidden_units]
deep = deep_inputs
# TODO: Create DNN model for the deep side
deep_out =
# TODO: Create linear model for the wide side
wide_out =
# Concatenate the two sides
both = tf.keras.layers.concatenate(
inputs=[deep_out, wide_out], name="both")
# TODO: Create final output layer
return output
```
### Lab Task #6: Create custom evaluation metric.
We want to make sure that we have some useful way to measure model performance for us. Since this is regression, we would like to know the RMSE of the model on our evaluation dataset, however, this does not exist as a standard evaluation metric, so we'll have to create our own by using the true and predicted labels.
```
def rmse(y_true, y_pred):
"""Calculates RMSE evaluation metric.
Args:
y_true: tensor, true labels.
y_pred: tensor, predicted labels.
Returns:
Tensor with value of RMSE between true and predicted labels.
"""
# TODO: Calculate RMSE from true and predicted labels
pass
```
### Lab Task #7: Build wide and deep model tying all of the pieces together.
Excellent! We've assembled all of the pieces, now we just need to tie them all together into a Keras Model. This is NOT a simple feedforward model with no branching, side inputs, etc. so we can't use Keras' Sequential Model API. We're instead going to use Keras' Functional Model API. Here we will build the model using [tf.keras.models.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics.
```
def build_wide_deep_model(dnn_hidden_units=[64, 32], nembeds=3):
"""Builds wide and deep model using Keras Functional API.
Returns:
`tf.keras.models.Model` object.
"""
# Create input layers
inputs = create_input_layers()
# Create feature columns
wide_fc, deep_fc = create_feature_columns(nembeds)
# The constructor for DenseFeatures takes a list of numeric columns
# The Functional API in Keras requires: LayerConstructor()(inputs)
# TODO: Add wide and deep feature colummns
wide_inputs = tf.keras.layers.DenseFeatures(
feature_columns=#TODO, name="wide_inputs")(inputs)
deep_inputs = tf.keras.layers.DenseFeatures(
feature_columns=#TODO, name="deep_inputs")(inputs)
# Get output of model given inputs
output = get_model_outputs(wide_inputs, deep_inputs, dnn_hidden_units)
# Build model and compile it all together
model = tf.keras.models.Model(inputs=inputs, outputs=output)
# TODO: Add custom eval metrics to list
model.compile(optimizer="adam", loss="mse", metrics=["mse"])
return model
print("Here is our wide and deep architecture so far:\n")
model = build_wide_deep_model()
print(model.summary())
```
We can visualize the wide and deep network using the Keras plot_model utility.
```
tf.keras.utils.plot_model(
model=model, to_file="wd_model.png", show_shapes=False, rankdir="LR")
```
## Run and evaluate model
### Lab Task #8: Train and evaluate.
We've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data. Also, don't forget to add the callback to TensorBoard.
```
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around
NUM_EVALS = 5 # how many times to evaluate
# Enough to get a reasonable sample, but not so much that it slows down
NUM_EVAL_EXAMPLES = 10000
# TODO: Load training dataset
trainds = load_dataset()
# TODO: Load evaluation dataset
evalds = load_dataset().take(count=NUM_EVAL_EXAMPLES // 1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
logdir = os.path.join(
"logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=logdir, histogram_freq=1)
# TODO: Fit model on training dataset and evaluate every so often
history = model.fit()
```
### Visualize loss curve
```
# Plot
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(["loss", "rmse"]):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history["val_{}".format(key)])
plt.title("model {}".format(key))
plt.ylabel(key)
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left");
```
### Save the model
```
OUTPUT_DIR = "babyweight_trained_wd"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(
OUTPUT_DIR, datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
tf.saved_model.save(
obj=model, export_dir=EXPORT_PATH) # with default serving function
print("Exported trained model to {}".format(EXPORT_PATH))
!ls $EXPORT_PATH
```
## Lab Summary:
In this lab, we started by defining the CSV column names, label column, and column defaults for our data inputs. Then, we constructed a tf.data Dataset of features and the label from the CSV files and created inputs layers for the raw features. Next, we set up feature columns for the model inputs and built a wide and deep neural network in Keras. We created a custom evaluation metric and built our wide and deep model. Finally, we trained and evaluated our model.
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
# Hive Command Note
**Outline**
* [Introduction](#intro)
* [Syntax](#syntax)
* [Reference](#refer)
---
Hive is a data warehouse infrastructure tool to process structured data in Hadoop. It resides on top of Hadoop to
summarize Big Data, and makes querying and analyzing easy.
* **Access Hive**: in cmd, type *`hive`*
* **Run hive script**: hive -f xxx.hql
> **Database in HIVE**
Each database is a collection of tables.
[link](http://www.tutorialspoint.com/hive/hive_create_database.htm)
```
# create database
CREATE DATABASE [IF NOT EXISTS] userdb;
# show all the databases
show databases;
# use a certain database, every table we create afterwards will be within the database
use databaseName;
# drop database
DROP DATABASE IF EXISTS userdb;
```
> **Create Table**
1. employees.csv -> HDFS
2. create table & load employees.csv
3. drop employees table (Be careful that by dropping the table, HIVE will actually delete the original csv not just the table itself). Instead, we can create an external table.
* External tables: if you drop them, data in hdfs will NOT be deleted.
**Data Types**
* **Integers**
* *TINYINT*â1 byte integer
* *SMALLINT*â2 byte integer
* *INT*â4 byte integer
* *BIGINT*â8 byte integer
* **Boolean type**
* *BOOLEAN*âTRUE/FALSE
* **Floating point numbers**
* *FLOAT*âsingle precision
* *DOUBLE*âDouble precision
* **Fixed point numbers**
* *DECIMAL*âa fixed point value of user defined scale and precision
* **String types**
* *STRING*âsequence of characters in a specified character set
* *VARCHAR*âsequence of characters in a specified character set with a maximum length
* *CHAR*âsequence of characters in a specified character set with a defined length
* **Date and time types**
* *TIMESTAMP*â a specific point in time, up to nanosecond precision
* *DATE*âa date
* **Binary types**
* *BINARY*âa sequence of bytes
**Complex Types**
* **Structs**: the elements within the type can be accessed using the DOT (.) notation. For example, for a column c of type STRUCT {a INT; b INT}, the a field is accessed by the expression c.a
* format: `<first, second>`
* access: mystruct.first
* **Maps (key-value tuples)**: The elements are accessed using ['element name'] notation. For example in a map M comprising of a mapping from 'group' -> gid the gid value can be accessed using M['group']
* format: index based
* access: myarray[0]
* **Arrays (indexable lists)**: The elements in the array have to be in the same type. Elements can be accessed using the [n] notation where n is an index (zero-based) into the array. For example, for an array A having the elements ['a', 'b', 'c'], A[1] retruns 'b'.
* format: key based
* access: myMap['KEY']
* **ROW FORMAT DELIMITED**: one row per line
* **FIELDS TERMINATED BY ','**: split column by comma
```
# use external table in this example
CREATE EXTERNAL TABLE movies(
userid INT,
movieid INT,
rating INT,
timestamp TIMESTAMP)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t';
CREATE TABLE myemployees(
name STRING,
salary FLOAT,
subordinates ARRAY<STRING>,
deductions MAP<STRING, FLOAT>,
address STRUCT<street:STRING, city:STRING, state:STRING,zip:INT>)
ROW FORMAT DELIMITED # This line is telling Hive to expect the file to contain one row per line. So basically, we are telling Hive that when it finds a new line character that means is a new records.
FIELDS TERMINATED BY ',' # split column by comma
COLLECTION ITEMS TERMINATED BY '#' # split the struct type item by `#`
MAP KEYS TERMINATED BY '-' # split the map type column by `-`
LINES TERMINATED BY '\N'; # separate line by `\N`
```
> **load file from hdfs into hive**
[StackOverFlow: Which is the difference between LOAD DATA INPATH and LOAD DATA LOCAL INPATH in HIVE](https://stackoverflow.com/questions/43204716/which-is-the-difference-between-load-data-inpath-and-load-data-local-inpath-in-h/43205970)
```
# load data into table movie. Noted that the path is hdfs path
# noted that the original file in hdfs://hw5/ will be move to ''hdfs://wolf.xxx.ooo.edu:8000/user/hive/warehouse/jchiu.db/movie/u.data'' after this command
LOAD DATA INPATH 'hw5/u.data' into table movie;
# load data into table movie. Noted that the path is local path
# LOCAL is identifier to specify the local path. It is optional.
# when using LOCAL, the file is copied to the hive directory
LOAD DATA LOCAL INPATH 'localpath' into table movie;
LOAD DATA LOCAL INPATH '/home/public/course/recommendationEngine/u.data' into table movies;
# create an external table
CREATE EXTERNAL TABLE myemployees
LOAD DATA INPATH '...' INTO TABLE employees
```
> **see column name; describe table**
```
# method 1
describe database.tablename;
# method 2
use database;
describe tablename;
```
> **Query**
```
SELECT [ALL | DISTINCT] select_expr, select_expr, ...
FROM table_reference
[WHERE where_condition]
[GROUP BY col_list]
[HAVING having_condition]
[ORDER BY col_list]]
[LIMIT number];
select address.city from employees
```
> **show tables**
```
# if already use database, it'll show tables in this database; if not, it'll show all the tables
show tables;
```
> **drop tables**
[] means optional. When used, we don't need these.
```
DROP TABLE [IF EXISTS] table_name;
```
> **create view in hive**
```
CREATE VIEW [IF NOT EXISTS] emp_30000 AS
SELECT * FROM employee
WHERE salary>30000;
```
> **drop a view**
```
DROP VIEW view_name
```
> **join**
[tutorialspoint: hiveql join](https://www.tutorialspoint.com/hive/hiveql_joins.htm)
Syntax-wise is essentially the same as SQL
> **hive built in aggregation functions**
[treasuredata: hive-aggregate-functions](https://docs.treasuredata.com/articles/hive-aggregate-functions)
> **hive built in operators**
[tutorialspoint: built-in operators](https://www.tutorialspoint.com/hive/hive_built_in_operators.htm)
deal with NULL/NA, equal...etc
> **writing data into the filesystem from queries**
[hive doc](https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-Writingdataintothefilesystemfromqueries)
[Hive INSERT OVERWRITE DIRECTORY command output is not separated by a delimiter. Why?](https://stackoverflow.com/questions/16459790/hive-insert-overwrite-directory-command-output-is-not-separated-by-a-delimiter)
The discussion happened at 2013. Not sure if it's still valid or not.
* If LOCAL keyword is used, Hive will write data to the directory on the local file system.
* Data written to the filesystem is serialized as text with columns separated by ^A and rows separated by newlines. If any of the columns are not of primitive type, then those columns are serialized to JSON format.
```
INSERT OVERWRITE [LOCAL] DIRECTORY directory1
SELECT ... FROM ...
```
* **STORED AS TEXTFILE**: Stored as plain text files. TEXTFILE is the default file format, unless the configuration parameter hive.default.fileformat has a different setting.
```
# in a newer hive version, this should work just fine
INSERT OVERWRITE [LOCAL] DIRECTORY directory1
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t'
SELECT ... FROM ...
# another way to work around this
# concat_ws: concat column together as string
INSERT OVERWRITE DIRECTORY '/user/hadoop/output'
SELECT concat_ws(',', col1, col2)
FROM graph_edges;
```
> **Create User Defined Fucntions (UDF)**
**Steps**
* write in java
* jar file
* import jar file
* use UDF as query
# Lab Material
```
### sample code from lab
CREATE EXTERNAL TABLE employees(
name STRING,
salary FLOAT)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY â,â;
LOAD DATA INPATH âemployees.csvâ into table employees;
CREATE DATABASE msia;
SHOW DATABASES;
DROP DATABASE msia;
USE msia;
SHOW TABLES;
CREATE TABLE employees(
name STRING,
salary FLOAT,
subordinates ARRAY<STRING>,
deductions MAP<STRING, FLOAT>,
address STRUCT<street:STRING, city: STRING, state: STRING, zip: INT>); CREATE TABLE t (
s STRING,
f FLOAT,
a ARRAY<MAP<STRING, STRUCT<p1: INT, p2:INT> >);
ROW FORMAT DELIMITED
FIELDS TERMINATED BY â,â
COLLECTION ITEMS TERMINATED BY â#â
MAP KEYS TERMINATED BY â-â
LINES TERMINATED BY â\nâ;
LOAD DATA INPATH âemployees.csvâ into table employees;
```
---
# <a id='refer'>Reference</a>
* [Tutorialspoint Hive Tutorial](https://www.tutorialspoint.com/hive/index.htm)
* [Hive tutorial doc](https://cwiki.apache.org/confluence/display/Hive/Tutorial)
|
github_jupyter
|
# Loading and working with data in sktime
Python provides a variety of useful ways to represent data, but NumPy arrays and pandas DataFrames are commonly used for data analysis. When using NumPy 2d-arrays or pandas DataFrames to analyze tabular data the rows are commony used to represent each instance (e.g. case or observation) of the data, while the columns are used to represent a given feature (e.g. variable or dimension) for an observation. Since timeseries data also has a time dimension for a given instance and feature, several alternative data formats could be used to represent this data, including nested pandas DataFrame structures, NumPy 3d-arrays, or multi-indexed pandas DataFrames.
Sktime is designed to work with timeseries data stored as nested pandas DataFrame objects. Similar to working with pandas DataFrames with tabular data, this allows instances to be represented by rows and the feature data for each dimension of a problem (e.g. variables or features) to be stored in the DataFrame columns. To accomplish this the timepoints for each instance-feature combination are stored in a single cell in the input Pandas DataFrame ([see Sktime pandas DataFrame format](#sktime_df_format) for more details).
Users can load or convert data into sktime's format in a variety of ways. Data can be loaded directly from a bespoke sktime file format (.ts) ([see Representing data with .ts files](#ts_files)) or supported file formats provided by [other existing data sources](#other_file_types) (such as Weka ARFF and .tsv). Sktime also provides functions to convert data to and from sktime's nested pandas DataFrame format and several other common ways for representing timeseries data using NumPy arrays or pandas DataFrames. [see Converting between sktime and alternative timeseries formats](#convert).
The rest of this sktime tutorial will provide a more detailed description of the sktime pandas DataFrame format, a brief description of the .ts file format, how to load data from other supported formats, and how to convert between other common ways of representing timeseries data in NumPy arrays or pandas DataFrames.
<a id="sktime_df_format"></a>
## Sktime pandas DataFrame format
The core data structure for storing datasets in sktime is a _nested_ pandas DataFrame, where rows of the dataframe correspond to instances (cases or observations), and columns correspond to dimensions of the problem (features or variables). The multiple timepoints and their corresponding values for each instance-feature pair are stored as pandas Series object _nested_ within the applicable DataFrame cell.
For example, for a problem with n cases that each have data across c timeseries dimensions:
DataFrame:
index | dim_0 | dim_1 | ... | dim_c-1
0 | pd.Series | pd.Series | pd.Series | pd.Series
1 | pd.Series | pd.Series | pd.Series | pd.Series
... | ... | ... | ... | ...
n | pd.Series | pd.Series | pd.Series | pd.Series
Representing timeseries data in this way makes it easy to align the timeseries features for a given instance with non-timeseries information. For example, in a classification problem, it is easy to align the timeseries features for an observation with its (index-aligned) target class label:
index | class_val
0 | int
1 | int
... | ...
n | int
While sktime's format uses pandas Series objects in its nested DataFrame structure, other data structures like NumPy arrays could be used to hold the timeseries values in each cell. However, the use of pandas Series objects helps to facilitate simple storage of sparse data and make it easy to accomodate series with non-integer timestamps (such as dates).
<a id="ts_files"></a>
## The .ts file format
One common use case is to load locally stored data. To make this easy, the .ts file format has been created for representing problems in a standard format for use with sktime.
### Representing data with .ts files
A .ts file include two main parts:
* header information
* data
The header information is used to facilitate simple representation of the data through including metadata about the structure of the problem. The header contains the following:
@problemName <problem name>
@timeStamps <true/false>
@univariate <true/false>
@classLabel <true/false> <space delimited list of possible class values>
@data
The data for the problem should begin after the @data tag. In the simplest case where @timestamps is false, values for a series are expressed in a comma-separated list and the index of each value is relative to its position in the list (0, 1, ..., m). An _instance_ may contain 1 to many dimensions, where instances are line-delimited and dimensions within an instance are colon (:) delimited. For example:
2,3,2,4:4,3,2,2
13,12,32,12:22,23,12,32
4,4,5,4:3,2,3,2
This example data has 3 _instances_, corresponding to the three lines shown above. Each instance has 2 _dimensions_ with 4 observations per dimension. For example, the intitial instance's first dimension has the timepoint values of 2, 3, 2, 4 and the second dimension has the values 4, 3, 2, 2.
Missing readings can be specified using ?. For example,
2,?,2,4:4,3,2,2
13,12,32,12:22,23,12,32
4,4,5,4:3,2,3,2
would indicate the second timepoint value of the initial instance's first dimension is missing.
Alternatively, for sparse datasets, readings can be specified by setting @timestamps to true in the header and representing the data with tuples in the form of (timestamp, value) just for the obser. For example, the first instance in the example above could be specified in this representation as:
(0,2),(1,3)(2,2)(3,4):(0,4),(1,3),(2,2),(3,2)
Equivalently, the sparser example
2,5,?,?,?,?,?,5,?,?,?,?,4
could be represented with just the non-missing timestamps as:
(0,2),(0,5),(7,5),(12,4)
When using the .ts file format to store data for timeseries classification problems, the class label for an instance should be specified in the last dimension and @classLabel should be set to true in the header information and be followed by the set of possible class values. For example, if a case consists of a single dimension and has a class value of 1 it would be specified as:
1,4,23,34:1
### Loading from .ts file to pandas DataFrame
A dataset can be loaded from a .ts file using the following method in sktime.utils.data_io.py:
load_from_tsfile_to_dataframe(full_file_path_and_name, replace_missing_vals_with='NaN')
This can be demonstrated using the Arrow Head problem that is included in sktime under sktime/datasets/data
```
import os
import sktime
from sktime.utils.data_io import load_from_tsfile_to_dataframe
DATA_PATH = os.path.join(os.path.dirname(sktime.__file__), "datasets/data")
train_x, train_y = load_from_tsfile_to_dataframe(
os.path.join(DATA_PATH, "ArrowHead/ArrowHead_TRAIN.ts")
)
test_x, test_y = load_from_tsfile_to_dataframe(
os.path.join(DATA_PATH, "ArrowHead/ArrowHead_TEST.ts")
)
```
Train and test partitions of the ArrowHead problem have been loaded into nested dataframes with an associated array of class values. As an example, below are the first 5 rows from the train_x and train_y:
```
train_x.head()
train_y[0:5]
```
<a id="other_file_types"></a>
## Loading other file formats
Researchers who have made timeseries data available have used two other common formats, including:
+ Weka ARFF files
+ UCR .tsv files
### Loading from Weka ARFF files
It is also possible to load data from Weka's attribute-relation file format (ARFF) files. Data for timeseries problems are made available in this format by researchers at the University of East Anglia (among others) at www.timeseriesclassification.com. The `load_from_arff_to_dataframe` method in `sktime.utils.data_io` supports reading data for both univariate and multivariate timeseries problems.
The univariate functionality is demonstrated below using data on the ArrowHead problem again (this time loading from ARFF file).
```
from sktime.utils.data_io import load_from_arff_to_dataframe
X, y = load_from_arff_to_dataframe(
os.path.join(DATA_PATH, "ArrowHead/ArrowHead_TRAIN.arff")
)
X.head()
```
The multivariate BasicMotions problem is used below to illustrate the ability to read multivariate timeseries data from ARFF files into the sktime format.
```
X, y = load_from_arff_to_dataframe(
os.path.join(DATA_PATH, "BasicMotions/BasicMotions_TRAIN.arff")
)
X.head()
```
### Loading from UCR .tsv Format Files
A further option is to load data into sktime from tab separated value (.tsv) files. Researchers at the University of Riverside, California make a variety of timeseries data available in this format at https://www.cs.ucr.edu/~eamonn/time_series_data_2018.
The `load_from_ucr_tsv_to_dataframe` method in `sktime.utils.data_io` supports reading univariate problems. An example with ArrowHead is given below to demonstrate equivalence with loading from the .ts and ARFF file formats.
```
from sktime.utils.data_io import load_from_ucr_tsv_to_dataframe
X, y = load_from_ucr_tsv_to_dataframe(
os.path.join(DATA_PATH, "ArrowHead/ArrowHead_TRAIN.tsv")
)
X.head()
```
<a id="convert"></a>
## Converting between other NumPy and pandas formats
It is also possible to use data from sources other than .ts and .arff files by manually shaping the data into the format described above.
Functions to convert from and to these types to sktime's nested DataFrame format are provided in `sktime.utils.data_processing`
### Using tabular data with sktime
One approach to representing timeseries data is a tabular DataFrame. As usual, each row represents an instance. In the tabular setting each timepoint of the univariate timeseries being measured for each instance are treated as feature and stored as a primitive data type in the DataFrame's cells.
In a univariate setting, where there are `n` instances of the series and each univariate timeseries has `t` timepoints, this would yield a pandas DataFrame with shape (n, t). In practice, this could be used to represent sensors measuring the same signal over time (features) on different machines (instances) or the same economic variable over time (features) for different countries (instances).
The function `from_2d_array_to_nested` converts a (n, t) tabular DataFrame to nested DataFrame with shape (n, 1). To convert from a nested DataFrame to a tabular array the function `from_nested_to_2d_array` can be used.
The example below uses 50 instances with 20 timepoints each.
```
from numpy.random import default_rng
from sktime.utils.data_processing import (
from_2d_array_to_nested,
from_nested_to_2d_array,
is_nested_dataframe,
)
rng = default_rng()
X_2d = rng.standard_normal((50, 20))
print(f"The tabular data has the shape {X_2d.shape}")
```
The `from_2d_array_to_nested` function makes it easy to convert this to a nested DataFrame.
```
X_nested = from_2d_array_to_nested(X_2d)
print(f"X_nested is a nested DataFrame: {is_nested_dataframe(X_nested)}")
print(f"The cell contains a {type(X_nested.iloc[0,0])}.")
print(f"The nested DataFrame has shape {X_nested.shape}")
X_nested.head()
```
This nested DataFrame can also be converted back to a tabular DataFrame using easily.
```
X_2d = from_nested_to_2d_array(X_nested)
print(f"The tabular data has the shape {X_2d.shape}")
```
### Using long-format data with sktime
Timeseries data can also be represented in _long_ format where each row identifies the value for a single timepoint for a given dimension for a given instance.
This format may be encountered in a database where each row stores a single value measurement identified by several identification columns. For example, where `case_id` is an id to identify a specific instance in the data, `dimension_id` is an integer between 0 and d-1 for d dimensions in the data, `reading_id` is the index of timepoints for the associated `case_id` and `dimension_id`, and `value` is the actual value of the observation. E.g.:
| case_id | dim_id | reading_id | value
------------------------------------------------
0 | int | int | int | double
1 | int | int | int | double
2 | int | int | int | double
3 | int | int | int | double
Sktime provides functions to convert to and from the long data format in `sktime.utils.data_processing`.
The `from_long_to_nested` function converts from a long format DataFrame to sktime's nested format (with assumptions made on how the data is initially formatted). Conversely, `from_nested_to_long` converts from a sktime nested DataFrame into a long format DataFrame.
To demonstrate this functionality the method below creates a dataset with a 50 instances (cases), 5 dimensions and 20 timepoints per dimension.
```
from sktime.utils.data_io import generate_example_long_table
X = generate_example_long_table(num_cases=50, series_len=20, num_dims=5)
X.head()
X.tail()
```
As shown below, applying the `from_long_to_nested` method returns a sktime-formatted dataset with individual dimensions represented by columns of the output dataframe.
```
from sktime.utils.data_processing import from_long_to_nested, from_nested_to_long
X_nested = from_long_to_nested(X)
X_nested.head()
```
As expected the result is a nested DataFrame and the cells include nested pandas Series objects.
```
print(f"X_nested is a nested DataFrame: {is_nested_dataframe(X_nested)}")
print(f"The cell contains a {type(X_nested.iloc[0,0])}.")
print(f"The nested DataFrame has shape {X_nested.shape}")
X_nested.iloc[0, 0].head()
```
As shown below, the `from_nested_to_long` function can be used to convert the resulting nested DataFrame (or any nested DataFrame) to a long format DataFrame.
```
X_long = from_nested_to_long(
X_nested,
instance_column_name="case_id",
time_column_name="reading_id",
dimension_column_name="dim_id",
)
X_long.head()
X_long.tail()
```
### Using multi-indexed pandas DataFrames
Pandas deprecated its Panel object in version 0.20.1. Since that time pandas has recommended representing 3-dimensional data using a multi-indexed DataFrame.
Storing timeseries data in a Pandas multi-indexed DataFrame is a natural option since many timeseries problems include data over the instance, feature and time dimensions.
Sktime provides the functions `from_multi_index_to_nested` and `from_nested_to_multi_index` in `sktime.utils.data_processing` to easily convert between pandas multi-indexed DataFrames and sktime's nested DataFrame structure.
The example below illustrates how these functions can be used to convert to and from the nested structure given data with 50 instances, 5 features (columns) and 20 timepoints per feature. In the multi-indexed DataFrame a row represents a unique combination of the instance and timepoint indices. Therefore, the resulting multi-indexed DataFrame should have the shape (1000, 5).
```
from sktime.utils.data_io import make_multi_index_dataframe
from sktime.utils.data_processing import (
from_multi_index_to_nested,
from_nested_to_multi_index,
)
X_mi = make_multi_index_dataframe(n_instances=50, n_columns=5, n_timepoints=20)
print(f"The multi-indexed DataFrame has shape {X_mi.shape}")
print(f"The multi-index names are {X_mi.index.names}")
X_mi.head()
```
The multi-indexed DataFrame can be easily converted to a nested DataFrame with shape (50, 5). Note that the conversion to the nested DataFrame has preserved the column names (it has also preserved the values of the instance index and the pandas Series objects nested in each cell have preserved the time index).
```
X_nested = from_multi_index_to_nested(X_mi, instance_index="case_id")
print(f"X_nested is a nested DataFrame: {is_nested_dataframe(X_nested)}")
print(f"The cell contains a {type(X_nested.iloc[0,0])}.")
print(f"The nested DataFrame has shape {X_nested.shape}")
X_nested.head()
```
Nested DataFrames can also be converted to a multi-indexed Pandas DataFrame
```
X_mi = from_nested_to_multi_index(
X_nested, instance_index="case_id", time_index="reading_id"
)
X_mi.head()
```
### Using NumPy 3d-arrays with sktime
Another common approach for representing timeseries data is to use a 3-dimensional NumPy array with shape (n_instances, n_columns, n_timepoints).
Sktime provides the functions `from_3d_numpy_to_nested` `from_nested_to_3d_numpy` in `sktime.utils.data_processing` to let users easily convert between NumPy 3d-arrays and nested pandas DataFrames.
This is demonstrated using a 3d-array with 50 instances, 5 features (columns) and 20 timepoints, resulting in a 3d-array with shape (50, 5, 20).
```
from sktime.utils.data_processing import (
from_3d_numpy_to_nested,
from_multi_index_to_3d_numpy,
from_nested_to_3d_numpy,
)
X_mi = make_multi_index_dataframe(n_instances=50, n_columns=5, n_timepoints=20)
X_3d = from_multi_index_to_3d_numpy(
X_mi, instance_index="case_id", time_index="reading_id"
)
print(f"The 3d-array has shape {X_3d.shape}")
```
The 3d-array can be easily converted to a nested DataFrame with shape (50, 5). Note that since NumPy array doesn't have indices, the instance index is the numerical range over the number of instances and the columns are automatically assigned. Users can optionally supply their own columns names via the columns_names parameter.
```
X_nested = from_3d_numpy_to_nested(X_3d)
print(f"X_nested is a nested DataFrame: {is_nested_dataframe(X_nested)}")
print(f"The cell contains a {type(X_nested.iloc[0,0])}.")
print(f"The nested DataFrame has shape {X_nested.shape}")
X_nested.head()
```
Nested DataFrames can also be converted to NumPy 3d-arrays.
```
X_3d = from_nested_to_3d_numpy(X_nested)
print(f"The resulting object is a {type(X_3d)}")
print(f"The shape of the 3d-array is {X_3d.shape}")
```
### Converting between NumPy 3d-arrays and pandas multi-indexed DataFrame
Although an example is not provided here, sktime lets users convert data between NumPy 3d-arrays and a multi-indexed pandas DataFrame formats using the functions `from_3d_numpy_to_multi_index` and `from_multi_index_to_3d_numpy` in `sktime.utils.data_processing`.
|
github_jupyter
|
This notebook will show an example of text preprocessing applied to RTL-Wiki dataset.
This dataset was introduced in [1] and later recreated in [2]. You can download it in from http://139.18.2.164/mroeder/palmetto/datasets/rtl-wiki.tar.gz
--------
[1] "Reading Tea Leaves: How Humans Interpret Topic Models" (NIPS 2009)
[2] "Exploring the Space of Topic Coherence Measures" (WSDM 2015)
```
# download corpus and unpack it:
! wget http://139.18.2.164/mroeder/palmetto/datasets/rtl-wiki.tar.gz -O rtl-wiki.tar.gz
! tar xzf rtl-wiki.tar.gz
```
The corpus is a sample of 10000 articles from English Wikipedia in a MediaWiki markup format.
Hence, we need to strip specific wiki formatting. We advise using a `mwparserfromhell` fork optimized to deal with the English Wikipedia.
```
git clone --branch images_and_interwiki https://github.com/bt2901/mwparserfromhell.git
```
```
! git clone --branch images_and_interwiki https://github.com/bt2901/mwparserfromhell.git
```
The Wikipedia dataset is too heterogenous. Building a good topic model here requires a lot of topics or a lot of documents.
To make collection more focused, we will filter out everything which isn't about people. We will use the following criteria to distinguish people and not-people:
```
import re
# all infoboxes related to persons, according to https://en.wikipedia.org/wiki/Wikipedia:List_of_infoboxes
person_infoboxes = {'infobox magic: the gathering player', 'infobox architect', 'infobox mountaineer', 'infobox scientist', 'infobox chess biography', 'infobox racing driver', 'infobox saint', 'infobox snooker player', 'infobox figure skater', 'infobox theological work', 'infobox gaelic athletic association player', 'infobox professional wrestler', 'infobox noble', 'infobox pelotari', 'infobox native american leader', 'infobox pretender', 'infobox amateur wrestler', 'infobox college football player', 'infobox buddha', 'infobox cfl biography', 'infobox playboy playmate', 'infobox cyclist', 'infobox martial artist', 'infobox motorcycle rider', 'infobox motocross rider', 'infobox bandy biography', 'infobox video game player', 'infobox dancer', 'infobox nahua officeholder', 'infobox criminal', 'infobox squash player', 'infobox go player', 'infobox bullfighting career', 'infobox engineering career', 'infobox pirate', 'infobox latter day saint biography', 'infobox sumo wrestler', 'infobox youtube personality', 'infobox national hockey league coach', 'infobox rebbe', 'infobox football official', 'infobox aviator', 'infobox pharaoh', 'infobox classical composer', 'infobox fbi ten most wanted', 'infobox chef', 'infobox engineer', 'infobox nascar driver', 'infobox medical person', 'infobox jewish leader', 'infobox horseracing personality', 'infobox poker player', 'infobox economist', 'infobox peer', 'infobox war on terror detainee', 'infobox philosopher', 'infobox professional bowler', 'infobox champ car driver', 'infobox golfer', 'infobox le mans driver', 'infobox alpine ski racer', 'infobox boxer (amateur)', 'infobox bodybuilder', 'infobox college coach', 'infobox speedway rider', 'infobox skier', 'infobox medical details', 'infobox field hockey player', 'infobox badminton player', 'infobox sports announcer details', 'infobox academic', 'infobox f1 driver', 'infobox ncaa athlete', 'infobox biathlete', 'infobox comics creator', 'infobox rugby league biography', 'infobox fencer', 'infobox theologian', 'infobox religious biography', 'infobox egyptian dignitary', 'infobox curler', 'infobox racing driver series section', 'infobox afl biography', 'infobox speed skater', 'infobox climber', 'infobox rugby biography', 'infobox clergy', 'infobox equestrian', 'infobox member of the knesset', 'infobox pageant titleholder', 'infobox lacrosse player', 'infobox tennis biography', 'infobox gymnast', 'infobox sport wrestler', 'infobox sports announcer', 'infobox surfer', 'infobox darts player', 'infobox christian leader', 'infobox presenter', 'infobox gunpowder plotter', 'infobox table tennis player', 'infobox sailor', 'infobox astronaut', 'infobox handball biography', 'infobox volleyball biography', 'infobox spy', 'infobox wrc driver', 'infobox police officer', 'infobox swimmer', 'infobox netball biography', 'infobox model', 'infobox comedian', 'infobox boxer'}
# is page included in a category with demography information?
demography_re = re.compile("([0-9]+ (deaths|births))|(living people)")
dir_name = "persons"
! mkdir $dir_name
import glob
from bs4 import BeautifulSoup
from mwparserfromhell import mwparserfromhell
from tqdm import tqdm_notebook as tqdm
for filename in tqdm(glob.glob("documents/*.html")):
doc_id = filename.partition("/")[-1]
doc_id = doc_id.rpartition(".")[0] + ".txt"
is_about_person = False
with open(filename, "r") as f:
soup = BeautifulSoup("".join(f.readlines()))
text = soup.findAll('textarea', id="wpTextbox1")[0].contents[0]
text = text.replace("&", "&").replace('<', '<').replace('>', '>')
wikicode = mwparserfromhell.parse(text)
if dir_name == "persons":
for node in wikicode.nodes:
entry_type = str(type(node))
if "Wikilink" in entry_type:
special_link_name, _, cat_name = node.title.lower().strip().partition(":")
if special_link_name == "category":
if demography_re.match(cat_name):
is_about_person = True
if "Template" in entry_type:
name = str(node.name).lower().strip()
if name in person_infoboxes:
is_about_person = True
should_be_saved = is_about_person
else:
should_be_saved = True
if should_be_saved:
with open(f"{dir_name}/{doc_id}", "w") as f2:
stripped_text = wikicode.strip_code()
f2.write(stripped_text)
```
Now we have a folder `persons` which contains 1201 document. Let's take a look at the one of them:
```
! head $dir_name/Eusebius.txt
```
We need to lemmatize texts, remove stopwords and extract informative ngramms.
There's no one "correct" way to do it, but the reasonable baseline is using well-known `nltk` library.
```
import nltk
import string
import pandas as pd
from glob import glob
nltk.data.path.append('/home/evgenyegorov/nltk_data/')
files = glob(dir_name + '/*.txt')
data = []
for path in files:
entry = {}
entry['id'] = path.split('/')[-1].rpartition(".")[0]
with open(path, 'r') as f:
entry['raw_text'] = " ".join(line.strip() for line in f.readlines())
data.append(entry)
wiki_texts = pd.DataFrame(data)
from tqdm import tqdm
tokenized_text = []
for text in tqdm(wiki_texts['raw_text'].values):
tokens = nltk.wordpunct_tokenize(text.lower())
tokenized_text.append(nltk.pos_tag(tokens))
wiki_texts['tokenized'] = tokenized_text
from nltk.corpus import wordnet
def nltk2wn_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return ''
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop = set(stopwords.words('english'))
lemmatized_text = []
wnl = WordNetLemmatizer()
for text in wiki_texts['tokenized'].values:
lemmatized = [wnl.lemmatize(word,nltk2wn_tag(pos))
if nltk2wn_tag(pos) != ''
else wnl.lemmatize(word)
for word, pos in text ]
lemmatized = [word for word in lemmatized
if word not in stop and word.isalpha()]
lemmatized_text.append(lemmatized)
wiki_texts['lemmatized'] = lemmatized_text
```
Ngrams are a powerful feature, and BigARTM is able to take advantage of it (the technical term is 'multimodal topic modeling': our topic model could model a lot of different features linked to a specific document, not just words).
```
from nltk.collocations import BigramAssocMeasures, BigramCollocationFinder
bigram_measures = BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(wiki_texts['lemmatized'])
finder.apply_freq_filter(5)
set_dict = set(finder.nbest(bigram_measures.pmi,32100)[100:])
documents = wiki_texts['lemmatized']
bigrams = []
for doc in documents:
entry = ['_'.join([word_first, word_second])
for word_first, word_second in zip(doc[:-1],doc[1:])
if (word_first, word_second) in set_dict]
bigrams.append(entry)
wiki_texts['bigram'] = bigrams
from collections import Counter
def vowpalize_sequence(sequence):
word_2_frequency = Counter(sequence)
del word_2_frequency['']
vw_string = ''
for word in word_2_frequency:
vw_string += word + ":" + str(word_2_frequency[word]) + ' '
return vw_string
vw_text = []
for index, data in wiki_texts.iterrows():
vw_string = ''
doc_id = data.id
lemmatized = '@lemmatized ' + vowpalize_sequence(data.lemmatized)
bigram = '@bigram ' + vowpalize_sequence(data.bigram)
vw_string = ' |'.join([doc_id, lemmatized, bigram])
vw_text.append(vw_string)
wiki_texts['vw_text'] = vw_text
```
Vowpal Wabbit ("wv") is a text format which is a good fit for multimodal topic modeling. Here, we elected to store dataset in a Bag-of-Words format (for performance reasons), but VW could store everything as a sequence of words as well.
It looks like this:
```
wiki_texts['vw_text'].head().values[0]
wiki_texts[['id','raw_text', 'vw_text']].to_csv('./wiki_data.csv')
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from bedrock_client.bedrock.analyzer.model_analyzer import ModelAnalyzer
from bedrock_client.bedrock.analyzer import ModelTypes
from bedrock_client.bedrock.api import BedrockApi
from bedrock_client.bedrock.metrics.service import ModelMonitoringService
import logging
def load_dataset(filepath, target):
df = pd.read_csv(filepath)
df['large_rings'] = (df['Rings'] > 10).astype(int)
# Ensure nothing missing
original_len = len(df)
df.dropna(how="any", axis=0, inplace=True)
num_rows_dropped = original_len - len(df)
if num_rows_dropped > 0:
print(f"Warning - dropped {num_rows_dropped} rows with NA data.")
y = df[target].values
df.drop(target, axis=1, inplace=True)
return df, y
def train_log_reg_model(X, y, seed=0, C=1, verbose=False):
verbose and print('\nTraining\nScaling...')
scaling = StandardScaler()
X = scaling.fit_transform(X)
verbose and print('Fitting...')
verbose and print('C:', C)
model = LogisticRegression(random_state=seed, C=C, max_iter=4000)
model.fit(X, y)
verbose and print('Chaining pipeline...')
pipe = Pipeline([('scaling', scaling), ('model', model)])
verbose and print('Training Done.')
return pipe
def compute_log_metrics(pipe,
x_test,
y_test,
y_test_onehot):
test_prob = pipe.predict_proba(x_test)
test_pred = pipe.predict(x_test)
acc = metrics.accuracy_score(y_test, test_pred)
precision = metrics.precision_score(y_test, test_pred, average='macro')
recall = metrics.recall_score(y_test, test_pred, average='macro')
f1_score = metrics.f1_score(y_test, test_pred, average='macro')
roc_auc = metrics.roc_auc_score(y_test_onehot, test_prob, average='macro', multi_class='ovr')
avg_prc = metrics.average_precision_score(y_test_onehot, test_prob, average='macro')
print("\nEvaluation\n"
f"\tAccuracy = {acc:.4f}\n"
f"\tPrecision (macro) = {precision:.4f}\n"
f"\tRecall (macro) = {recall:.4f}\n"
f"\tF1 score (macro) = {f1_score:.4f}\n"
f"\tROC AUC (macro) = {roc_auc:.4f}\n"
f"\tAverage precision (macro) = {avg_prc:.4f}")
# Bedrock Logger: captures model metrics
bedrock = BedrockApi(logging.getLogger(__name__))
# `log_chart_data` assumes binary classification
# For multiclass labels, we can use a "micro-average" by
# quantifying score on all classes jointly
# See https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
# This will allow us to use the same `log_chart_data` method
bedrock.log_chart_data(
y_test_onehot.ravel().astype(int).tolist(), # list of int
test_prob.ravel().astype(float).tolist() # list of float
)
bedrock.log_metric("Accuracy", acc)
bedrock.log_metric("Precision (macro)", precision)
bedrock.log_metric("Recall (macro)", recall)
bedrock.log_metric("F1 Score (macro)", f1_score)
bedrock.log_metric("ROC AUC (macro)", roc_auc)
bedrock.log_metric("Avg precision (macro)", avg_prc)
return test_prob, test_pred
x_train, y_train = load_dataset(
filepath="data/abalone_train.csv",
target="Type"
)
x_test, y_test = load_dataset(
filepath="data/abalone_test.csv",
target="Type"
)
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
# sklearn `roc_auc_score` and `average_precision_score` expects
# binary label indicators with shape (n_samples, n_classes)
y_train_onehot = enc.fit_transform(y_train.reshape(-1, 1))
y_test_onehot = enc.fit_transform(y_test.reshape(-1, 1))
# Convert target variable to numeric values
# ModelMonitoringService.export_text expect both features
# and inference to be numeric values
y_train = np.argmax(y_train_onehot, axis=1)
y_test = np.argmax(y_test_onehot, axis=1)
for value, category in enumerate(enc.categories_[0]):
print(f'{category} : {value}')
pipe = train_log_reg_model(x_train,
y_train,
seed=0,
C=1e-1,
verbose=True)
test_prob, test_pred = compute_log_metrics(pipe,
x_test,
y_test,
y_test_onehot)
# Ignore ERROR, this is for testing purposes
CONFIG_FAI = {
'large_rings': {
'privileged_attribute_values': [1],
# privileged group name corresponding to values=[1]
'privileged_group_name': 'Large',
'unprivileged_attribute_values': [0],
# unprivileged group name corresponding to values=[0]
'unprivileged_group_name': 'Small',
}
}
# Train Shap model and calculate xafai metrics
analyzer = (
ModelAnalyzer(pipe[1],
model_name='logistic',
model_type=ModelTypes.LINEAR)
.train_features(x_train)
.test_features(x_test)
.fairness_config(CONFIG_FAI)
.test_labels(y_test)
.test_inference(test_pred)
)
analyzer.analyze()
ModelMonitoringService.export_text(
features=x_train.iteritems(), # assumes numeric values
inference=test_pred.tolist(), # assumes numeric values
)
for item in x_train.iteritems():
print(item)
```
|
github_jupyter
|
<table style="float:left; border:none">
<tr style="border:none">
<td style="border:none">
<a href="http://bokeh.pydata.org/">
<img
src="http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png"
style="width:70px"
>
</a>
</td>
<td style="border:none">
<h1>Bokeh Tutorial — <tt style="display:inline">bokeh.models</tt> interface</h1>
</td>
</tr>
</table>
## Models
NYTimes interactive chart [Usain Bolt vs. 116 years of Olympic sprinters](http://www.nytimes.com/interactive/2012/08/05/sports/olympics/the-100-meter-dash-one-race-every-medalist-ever.html)
The first thing we need is to get the data. The data for this chart is located in the ``bokeh.sampledata`` module as a Pandas DataFrame. You can see the first ten rows below:?
```
from bokeh.sampledata.sprint import sprint
sprint[:10]
```
Next we import some of the Bokeh models that need to be assembled to make a plot. At a minimum, we need to start with ``Plot``, the glyphs (``Circle`` and ``Text``) we want to display, as well as ``ColumnDataSource`` to hold the data and range obejcts to set the plot bounds.
```
from bokeh.io import output_notebook, show
from bokeh.models.glyphs import Circle, Text
from bokeh.models import ColumnDataSource, Range1d, DataRange1d, Plot
output_notebook()
```
## Setting up Data
```
abbrev_to_country = {
"USA": "United States",
"GBR": "Britain",
"JAM": "Jamaica",
"CAN": "Canada",
"TRI": "Trinidad and Tobago",
"AUS": "Australia",
"GER": "Germany",
"CUB": "Cuba",
"NAM": "Namibia",
"URS": "Soviet Union",
"BAR": "Barbados",
"BUL": "Bulgaria",
"HUN": "Hungary",
"NED": "Netherlands",
"NZL": "New Zealand",
"PAN": "Panama",
"POR": "Portugal",
"RSA": "South Africa",
"EUA": "United Team of Germany",
}
gold_fill = "#efcf6d"
gold_line = "#c8a850"
silver_fill = "#cccccc"
silver_line = "#b0b0b1"
bronze_fill = "#c59e8a"
bronze_line = "#98715d"
fill_color = { "gold": gold_fill, "silver": silver_fill, "bronze": bronze_fill }
line_color = { "gold": gold_line, "silver": silver_line, "bronze": bronze_line }
def selected_name(name, medal, year):
return name if medal == "gold" and year in [1988, 1968, 1936, 1896] else None
t0 = sprint.Time[0]
sprint["Abbrev"] = sprint.Country
sprint["Country"] = sprint.Abbrev.map(lambda abbr: abbrev_to_country[abbr])
sprint["Medal"] = sprint.Medal.map(lambda medal: medal.lower())
sprint["Speed"] = 100.0/sprint.Time
sprint["MetersBack"] = 100.0*(1.0 - t0/sprint.Time)
sprint["MedalFill"] = sprint.Medal.map(lambda medal: fill_color[medal])
sprint["MedalLine"] = sprint.Medal.map(lambda medal: line_color[medal])
sprint["SelectedName"] = sprint[["Name", "Medal", "Year"]].apply(tuple, axis=1).map(lambda args: selected_name(*args))
source = ColumnDataSource(sprint)
```
## Basic Plot with Glyphs
```
plot_options = dict(plot_width=800, plot_height=480, toolbar_location=None,
outline_line_color=None, title = "Usain Bolt vs. 116 years of Olympic sprinters")
radius = dict(value=5, units="screen")
medal_glyph = Circle(x="MetersBack", y="Year", radius=radius, fill_color="MedalFill",
line_color="MedalLine", fill_alpha=0.5)
athlete_glyph = Text(x="MetersBack", y="Year", x_offset=10, text="SelectedName",
text_align="left", text_baseline="middle", text_font_size="9pt")
no_olympics_glyph = Text(x=7.5, y=1942, text=["No Olympics in 1940 or 1944"],
text_align="center", text_baseline="middle",
text_font_size="9pt", text_font_style="italic", text_color="silver")
xdr = Range1d(start=sprint.MetersBack.max()+2, end=0) # +2 is for padding
ydr = DataRange1d(range_padding=0.05)
plot = Plot(x_range=xdr, y_range=ydr, **plot_options)
plot.add_glyph(source, medal_glyph)
plot.add_glyph(source, athlete_glyph)
plot.add_glyph(no_olympics_glyph)
show(plot)
```
## Adding Axes and Grids
```
from bokeh.models import Grid, LinearAxis, SingleIntervalTicker
xdr = Range1d(start=sprint.MetersBack.max()+2, end=0) # +2 is for padding
ydr = DataRange1d(range_padding=0.05)
plot = Plot(x_range=xdr, y_range=ydr, **plot_options)
plot.add_glyph(source, medal_glyph)
plot.add_glyph(source, athlete_glyph)
plot.add_glyph(no_olympics_glyph)
xticker = SingleIntervalTicker(interval=5, num_minor_ticks=0)
xaxis = LinearAxis(ticker=xticker, axis_line_color=None, major_tick_line_color=None,
axis_label="Meters behind 2012 Bolt", axis_label_text_font_size="10pt",
axis_label_text_font_style="bold")
plot.add_layout(xaxis, "below")
xgrid = Grid(dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed")
plot.add_layout(xgrid)
yticker = SingleIntervalTicker(interval=12, num_minor_ticks=0)
yaxis = LinearAxis(ticker=yticker, major_tick_in=-5, major_tick_out=10)
plot.add_layout(yaxis, "right")
show(plot)
```
## Adding a Hover Tool
```
from bokeh.models import HoverTool
tooltips = """
<div>
<span style="font-size: 15px;">@Name</span>
<span style="font-size: 10px; color: #666;">(@Abbrev)</span>
</div>
<div>
<span style="font-size: 17px; font-weight: bold;">@Time{0.00}</span>
<span style="font-size: 10px; color: #666;">@Year</span>
</div>
<div style="font-size: 11px; color: #666;">@{MetersBack}{0.00} meters behind</div>
"""
xdr = Range1d(start=sprint.MetersBack.max()+2, end=0) # +2 is for padding
ydr = DataRange1d(range_padding=0.05)
plot = Plot(x_range=xdr, y_range=ydr, **plot_options)
medal = plot.add_glyph(source, medal_glyph) # we need this renderer to configure the hover tool
plot.add_glyph(source, athlete_glyph)
plot.add_glyph(no_olympics_glyph)
xticker = SingleIntervalTicker(interval=5, num_minor_ticks=0)
xaxis = LinearAxis(ticker=xticker, axis_line_color=None, major_tick_line_color=None,
axis_label="Meters behind 2012 Bolt", axis_label_text_font_size="10pt",
axis_label_text_font_style="bold")
plot.add_layout(xaxis, "below")
xgrid = Grid(dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed")
plot.add_layout(xgrid)
yticker = SingleIntervalTicker(interval=12, num_minor_ticks=0)
yaxis = LinearAxis(ticker=yticker, major_tick_in=-5, major_tick_out=10)
plot.add_layout(yaxis, "right")
hover = HoverTool(tooltips=tooltips, renderers=[medal])
plot.add_tools(hover)
show(plot)
from bubble_plot import get_1964_data
def get_plot():
return Plot(
x_range=Range1d(1, 9), y_range=Range1d(20, 100),
title="", plot_width=800, plot_height=400,
outline_line_color=None, toolbar_location=None,
)
df = get_1964_data()
df.head()
# EXERCISE: Add Circles to the plot from the data in `df`.
# With `fertility` for the x coordinates, `life` for the y coordinates.
plot = get_plot()
# EXERCISE: Color the circles by region_color & change the size of the color by population
# EXERCISE: Add axes and grid lines
# EXERCISE: Manually add a legend using Circle & Text. The color key is as follows
region_name_and_color = [
('America', '#3288bd'),
('East Asia & Pacific', '#99d594'),
('Europe & Central Asia', '#e6f598'),
('Middle East & North Africa', '#fee08b'),
('South Asia', '#fc8d59'),
('Sub-Saharan Africa', '#d53e4f')
]
```
|
github_jupyter
|
```
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import theano
from scipy.integrate import odeint
from theano import *
THEANO_FLAGS = "optimizer=fast_compile"
```
# Lotka-Volterra with manual gradients
by [Sanmitra Ghosh](https://www.mrc-bsu.cam.ac.uk/people/in-alphabetical-order/a-to-g/sanmitra-ghosh/)
Mathematical models are used ubiquitously in a variety of science and engineering domains to model the time evolution of physical variables. These mathematical models are often described as ODEs that are characterised by model structure - the functions of the dynamical variables - and model parameters. However, for the vast majority of systems of practical interest it is necessary to infer both the model parameters and an appropriate model structure from experimental observations. This experimental data often appears to be scarce and incomplete. Furthermore, a large variety of models described as dynamical systems show traits of sloppiness (see [Gutenkunst et al., 2007](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.0030189)) and have unidentifiable parameter combinations. The task of inferring model parameters and structure from experimental data is of paramount importance to reliably analyse the behaviour of dynamical systems and draw faithful predictions in light of the difficulties posit by their complexities. Moreover, any future model prediction should encompass and propagate variability and uncertainty in model parameters and/or structure. Thus, it is also important that the inference methods are equipped to quantify and propagate the aforementioned uncertainties from the model descriptions to model predictions. As a natural choice to handle uncertainty, at least in the parameters, Bayesian inference is increasingly used to fit ODE models to experimental data ([Mark Girolami, 2008](https://www.sciencedirect.com/science/article/pii/S030439750800501X)). However, due to some of the difficulties that I pointed above, fitting an ODE model using Bayesian inference is a challenging task. In this tutorial I am going to take up that challenge and will show how PyMC3 could be potentially used for this purpose.
I must point out that model fitting (inference of the unknown parameters) is just one of many crucial tasks that a modeller has to complete in order to gain a deeper understanding of a physical process. However, success in this task is crucial and this is where PyMC3, and probabilistic programming (ppl) in general, is extremely useful. The modeller can take full advantage of the variety of samplers and distributions provided by PyMC3 to automate inference.
In this tutorial I will focus on the fitting exercise, that is estimating the posterior distribution of the parameters given some noisy experimental time series.
## Bayesian inference of the parameters of an ODE
I begin by first introducing the Bayesian framework for inference in a coupled non-linear ODE defined as
$$
\frac{d X(t)}{dt}=\boldsymbol{f}\big(X(t),\boldsymbol{\theta}\big),
$$
where $X(t)\in\mathbb{R}^K$ is the solution, at each time point, of the system composed of $K$ coupled ODEs - the state vector - and $\boldsymbol{\theta}\in\mathbb{R}^D$ is the parameter vector that we wish to infer. $\boldsymbol{f}(\cdot)$ is a non-linear function that describes the governing dynamics. Also, in case of an initial value problem, let the matrix $\boldsymbol{X}(\boldsymbol{\theta}, \mathbf{x_0})$ denote the solution of the above system of equations at some specified time points for the parameters $\boldsymbol{\theta}$ and initial conditions $\mathbf{x_0}$.
Consider a set of noisy experimental observations $\boldsymbol{Y} \in \mathbb{R}^{T\times K}$ observed at $T$ experimental time points for the $K$ states. We can obtain the likelihood $p(\boldsymbol{Y}|\boldsymbol{X})$, where I use the symbol $\boldsymbol{X}:=\boldsymbol{X}(\boldsymbol{\theta}, \mathbf{x_0})$, and combine that with a prior distribution $p(\boldsymbol{\theta})$ on the parameters, using the Bayes theorem, to obtain the posterior distribution as
$$
p(\boldsymbol{\theta}|\boldsymbol{Y})=\frac{1}{Z}p(\boldsymbol{Y}|\boldsymbol{X})p(\boldsymbol{\theta}),
$$
where $Z=\int p(\boldsymbol{Y}|\boldsymbol{X})p(\boldsymbol{\theta}) d\boldsymbol{\theta} $ is the intractable marginal likelihood. Due to this intractability we resort to approximate inference and apply MCMC.
For this tutorial I have chosen two ODEs:
1. The [__Lotka-Volterra predator prey model__ ](http://www.scholarpedia.org/article/Predator-prey_model)
2. The [__Fitzhugh-Nagumo action potential model__](http://www.scholarpedia.org/article/FitzHugh-Nagumo_model)
I will showcase two distinctive approaches (__NUTS__ and __SMC__ step methods), supported by PyMC3, for the estimation of unknown parameters in these models.
## Lotka-Volterra predator prey model
The Lotka Volterra model depicts an ecological system that is used to describe the interaction between a predator and prey species. This ODE given by
$$
\begin{aligned}
\frac{d x}{dt} &=\alpha x -\beta xy \\
\frac{d y}{dt} &=-\gamma y + \delta xy,
\end{aligned}
$$
shows limit cycle behaviour and has often been used for benchmarking Bayesian inference methods. $\boldsymbol{\theta}=(\alpha,\beta,\gamma,\delta, x(0),y(0))$ is the set of unknown parameters that we wish to infer from experimental observations of the state vector $X(t)=(x(t),y(t))$ comprising the concentrations of the prey and the predator species respectively. $x(0), y(0)$ are the initial values of the states needed to solve the ODE, which are also treated as unknown quantities. The predator prey model was recently used to demonstrate the applicability of the NUTS sampler, and the Stan ppl in general, for inference in ODE models. I will closely follow [this](https://mc-stan.org/users/documentation/case-studies/lotka-volterra-predator-prey.html) Stan tutorial and thus I will setup this model and associated inference problem (including the data) exactly as was done for the Stan tutorial. Let me first write down the code to solve this ODE using the SciPy's `odeint`. Note that the methods in this tutorial is not limited or tied to `odeint`. Here I have chosen `odeint` to simply stay within PyMC3's dependencies (SciPy in this case).
```
class LotkaVolterraModel:
def __init__(self, y0=None):
self._y0 = y0
def simulate(self, parameters, times):
alpha, beta, gamma, delta, Xt0, Yt0 = [x for x in parameters]
def rhs(y, t, p):
X, Y = y
dX_dt = alpha * X - beta * X * Y
dY_dt = -gamma * Y + delta * X * Y
return dX_dt, dY_dt
values = odeint(rhs, [Xt0, Yt0], times, (parameters,))
return values
ode_model = LotkaVolterraModel()
```
## Handling ODE gradients
NUTS requires the gradient of the log of the target density w.r.t. the unknown parameters, $\nabla_{\boldsymbol{\theta}}p(\boldsymbol{\theta}|\boldsymbol{Y})$, which can be evaluated using the chain rule of differentiation as
$$ \nabla_{\boldsymbol{\theta}}p(\boldsymbol{\theta}|\boldsymbol{Y}) = \frac{\partial p(\boldsymbol{\theta}|\boldsymbol{Y})}{\partial \boldsymbol{X}}^T \frac{\partial \boldsymbol{X}}{\partial \boldsymbol{\theta}}.$$
The gradient of an ODE w.r.t. its parameters, the term $\frac{\partial \boldsymbol{X}}{\partial \boldsymbol{\theta}}$, can be obtained using local sensitivity analysis, although this is not the only method to obtain gradients. However, just like solving an ODE (a non-linear one to be precise) evaluation of the gradients can only be carried out using some sort of numerical method, say for example the famous Runge-Kutta method for non-stiff ODEs. PyMC3 uses Theano as the automatic differentiation engine and thus all models are implemented by stitching together available primitive operations (Ops) supported by Theano. Even to extend PyMC3 we need to compose models that can be expressed as symbolic combinations of Theano's Ops. However, if we take a step back and think about Theano then it is apparent that neither the ODE solution nor its gradient w.r.t. to the parameters can be expressed symbolically as combinations of Theanoâs primitive Ops. Hence, from Theanoâs perspective an ODE (and for that matter any other form of a non-linear differential equation) is a non-differentiable black-box function. However, one might argue that if a numerical method is coded up in Theano (using say the `scan` Op), then it is possible to symbolically express the numerical method that evaluates the ODE states, and then we can easily use Theanoâs automatic differentiation engine to obtain the gradients as well by differentiating through the numerical solver itself. I like to point out that the former, obtaining the solution, is indeed possible this way but the obtained gradient would be error-prone. Additionally, this entails to a complete âre-inventing the wheelâ as one would have to implement decades old sophisticated numerical algorithms again from scratch in Theano.
Thus, in this tutorial I am going to present the alternative approach which consists of defining new [custom Theano Ops](http://deeplearning.net/software/theano_versions/dev/extending/extending_theano.html), extending Theano, that will wrap both the numerical solution and the vector-Matrix product, $ \frac{\partial p(\boldsymbol{\theta}|\boldsymbol{Y})}{\partial \boldsymbol{X}}^T \frac{\partial \boldsymbol{X}}{\partial \boldsymbol{\theta}}$, often known as the _**vector-Jacobian product**_ (VJP) in automatic differentiation literature. I like to point out here that in the context of non-linear ODEs the term Jacobian is used to denote gradients of the ODE dynamics $\boldsymbol{f}$ w.r.t. the ODE states $X(t)$. Thus, to avoid confusion, from now on I will use the term _**vector-sensitivity product**_ (VSP) to denote the same quantity that the term VJP denotes.
I will start by introducing the forward sensitivity analysis.
## ODE sensitivity analysis
For a coupled ODE system $\frac{d X(t)}{dt} = \boldsymbol{f}(X(t),\boldsymbol{\theta})$, the local sensitivity of the solution to a parameter is defined by how much the solution would change by changes in the parameter, i.e. the sensitivity of the the $k$-th state is simply put the time evolution of its graident w.r.t. the $d$-th parameter. This quantitiy, denoted as $Z_{kd}(t)$, is given by
$$Z_{kd}(t)=\frac{d }{d t} \left\{\frac{\partial X_k (t)}{\partial \theta_d}\right\} = \sum_{i=1}^K \frac{\partial f_k}{\partial X_i (t)}\frac{\partial X_i (t)}{\partial \theta_d} + \frac{\partial f_k}{\partial \theta_d}.$$
Using forward sensitivity analysis we can obtain both the state $X(t)$ and its derivative w.r.t the parameters, at each time point, as the solution to an initial value problem by augmenting the original ODE system with the sensitivity equations $Z_{kd}$. The augmented ODE system $\big(X(t), Z(t)\big)$ can then be solved together using a chosen numerical method. The augmented ODE system needs the initial values for the sensitivity equations. All of these should be set to zero except the ones where the sensitivity of a state w.r.t. its own initial value is sought, that is $ \frac{\partial X_k(t)}{\partial X_k (0)} =1 $. Note that in order to solve this augmented system we have to embark in the tedious process of deriving $ \frac{\partial f_k}{\partial X_i (t)}$, also known as the Jacobian of an ODE, and $\frac{\partial f_k}{\partial \theta_d}$ terms. Thankfully, many ODE solvers calculate these terms and solve the augmented system when asked for by the user. An example would be the [SUNDIAL CVODES solver suite](https://computation.llnl.gov/projects/sundials/cvodes). A Python wrapper for CVODES can be found [here](https://jmodelica.org/assimulo/).
However, for this tutorial I would go ahead and derive the terms mentioned above, manually, and solve the Lotka-Volterra ODEs alongwith the sensitivites in the following code block. The functions `jac` and `dfdp` below calculate $ \frac{\partial f_k}{\partial X_i (t)}$ and $\frac{\partial f_k}{\partial \theta_d}$ respectively for the Lotka-Volterra model. For conveniance I have transformed the sensitivity equation in a matrix form. Here I extended the solver code snippet above to include sensitivities when asked for.
```
n_states = 2
n_odeparams = 4
n_ivs = 2
class LotkaVolterraModel:
def __init__(self, n_states, n_odeparams, n_ivs, y0=None):
self._n_states = n_states
self._n_odeparams = n_odeparams
self._n_ivs = n_ivs
self._y0 = y0
def simulate(self, parameters, times):
return self._simulate(parameters, times, False)
def simulate_with_sensitivities(self, parameters, times):
return self._simulate(parameters, times, True)
def _simulate(self, parameters, times, sensitivities):
alpha, beta, gamma, delta, Xt0, Yt0 = [x for x in parameters]
def r(y, t, p):
X, Y = y
dX_dt = alpha * X - beta * X * Y
dY_dt = -gamma * Y + delta * X * Y
return dX_dt, dY_dt
if sensitivities:
def jac(y):
X, Y = y
ret = np.zeros((self._n_states, self._n_states))
ret[0, 0] = alpha - beta * Y
ret[0, 1] = -beta * X
ret[1, 0] = delta * Y
ret[1, 1] = -gamma + delta * X
return ret
def dfdp(y):
X, Y = y
ret = np.zeros(
(self._n_states, self._n_odeparams + self._n_ivs)
) # except the following entries
ret[
0, 0
] = X # \frac{\partial [\alpha X - \beta XY]}{\partial \alpha}, and so on...
ret[0, 1] = -X * Y
ret[1, 2] = -Y
ret[1, 3] = X * Y
return ret
def rhs(y_and_dydp, t, p):
y = y_and_dydp[0 : self._n_states]
dydp = y_and_dydp[self._n_states :].reshape(
(self._n_states, self._n_odeparams + self._n_ivs)
)
dydt = r(y, t, p)
d_dydp_dt = np.matmul(jac(y), dydp) + dfdp(y)
return np.concatenate((dydt, d_dydp_dt.reshape(-1)))
y0 = np.zeros((2 * (n_odeparams + n_ivs)) + n_states)
y0[6] = 1.0 # \frac{\partial [X]}{\partial Xt0} at t==0, and same below for Y
y0[13] = 1.0
y0[0:n_states] = [Xt0, Yt0]
result = odeint(rhs, y0, times, (parameters,), rtol=1e-6, atol=1e-5)
values = result[:, 0 : self._n_states]
dvalues_dp = result[:, self._n_states :].reshape(
(len(times), self._n_states, self._n_odeparams + self._n_ivs)
)
return values, dvalues_dp
else:
values = odeint(r, [Xt0, Yt0], times, (parameters,), rtol=1e-6, atol=1e-5)
return values
ode_model = LotkaVolterraModel(n_states, n_odeparams, n_ivs)
```
For this model I have set the relative and absolute tolerances to $10^{-6}$ and $10^{-5}$ respectively, as was suggested in the Stan tutorial. This will produce sufficiently accurate solutions. Further reducing the tolerances will increase accuracy but at the cost of increasing the computational time. A thorough discussion on the choice and use of a numerical method for solving the ODE is out of the scope of this tutorial. However, I must point out that the inaccuracies of the ODE solver do affect the likelihood and as a result the inference. This is more so the case for stiff systems. I would recommend interested readers to this nice blog article where this effect is discussed thoroughly for a [cardiac ODE model](https://mirams.wordpress.com/2018/10/17/ode-errors-and-optimisation/). There is also an emerging area of uncertainty quantification that attacks the problem of noise arisng from impreciseness of numerical algorithms, [probabilistic numerics](http://probabilistic-numerics.org/). This is indeed an elegant framework to carry out inference while taking into account the errors coming from the numeric ODE solvers.
## Custom ODE Op
In order to define the custom `Op` I have written down two `theano.Op` classes `ODEGradop`, `ODEop`. `ODEop` essentially wraps the ODE solution and will be called by PyMC3. The `ODEGradop` wraps the numerical VSP and this op is then in turn used inside the `grad` method in the `ODEop` to return the VSP. Note that we pass in two functions: `state`, `numpy_vsp` as arguments to respective Ops. I will define these functions later. These functions act as shims using which we connect the python code for numerical solution of sate and VSP to Theano and thus PyMC3.
```
class ODEGradop(theano.Op):
def __init__(self, numpy_vsp):
self._numpy_vsp = numpy_vsp
def make_node(self, x, g):
x = theano.tensor.as_tensor_variable(x)
g = theano.tensor.as_tensor_variable(g)
node = theano.Apply(self, [x, g], [g.type()])
return node
def perform(self, node, inputs_storage, output_storage):
x = inputs_storage[0]
g = inputs_storage[1]
out = output_storage[0]
out[0] = self._numpy_vsp(x, g) # get the numerical VSP
class ODEop(theano.Op):
def __init__(self, state, numpy_vsp):
self._state = state
self._numpy_vsp = numpy_vsp
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs_storage, output_storage):
x = inputs_storage[0]
out = output_storage[0]
out[0] = self._state(x) # get the numerical solution of ODE states
def grad(self, inputs, output_grads):
x = inputs[0]
g = output_grads[0]
grad_op = ODEGradop(self._numpy_vsp) # pass the VSP when asked for gradient
grad_op_apply = grad_op(x, g)
return [grad_op_apply]
```
I must point out that the way I have defined the custom ODE Ops above there is the possibility that the ODE is solved twice for the same parameter values, once for the states and another time for the VSP. To avoid this behaviour I have written a helper class which stops this double evaluation.
```
class solveCached:
def __init__(self, times, n_params, n_outputs):
self._times = times
self._n_params = n_params
self._n_outputs = n_outputs
self._cachedParam = np.zeros(n_params)
self._cachedSens = np.zeros((len(times), n_outputs, n_params))
self._cachedState = np.zeros((len(times), n_outputs))
def __call__(self, x):
if np.all(x == self._cachedParam):
state, sens = self._cachedState, self._cachedSens
else:
state, sens = ode_model.simulate_with_sensitivities(x, times)
return state, sens
times = np.arange(0, 21) # number of measurement points (see below)
cached_solver = solveCached(times, n_odeparams + n_ivs, n_states)
```
### The ODE state & VSP evaluation
Most ODE systems of practical interest will have multiple states and thus the output of the solver, which I have denoted so far as $\boldsymbol{X}$, for a system with $K$ states solved on $T$ time points, would be a $T \times K$-dimensional matrix. For the Lotka-Volterra model the columns of this matrix represent the time evolution of the individual species concentrations. I flatten this matrix to a $TK$-dimensional vector $vec(\boldsymbol{X})$, and also rearrange the sensitivities accordingly to obtain the desired vector-matrix product. It is beneficial at this point to test the custom Op as described [here](http://deeplearning.net/software/theano_versions/dev/extending/extending_theano.html#how-to-test-it).
```
def state(x):
State, Sens = cached_solver(np.array(x, dtype=np.float64))
cached_solver._cachedState, cached_solver._cachedSens, cached_solver._cachedParam = (
State,
Sens,
x,
)
return State.reshape((2 * len(State),))
def numpy_vsp(x, g):
numpy_sens = cached_solver(np.array(x, dtype=np.float64))[1].reshape(
(n_states * len(times), len(x))
)
return numpy_sens.T.dot(g)
```
## The Hudson's Bay Company data
The Lotka-Volterra predator prey model has been used previously to successfully explain the dynamics of natural populations of predators and prey, such as the lynx and snowshoe hare data of the Hudson's Bay Company. This is the same data (that was shared [here](https://github.com/stan-dev/example-models/tree/master/knitr/lotka-volterra)) used in the Stan example and thus I will use this data-set as the experimental observations $\boldsymbol{Y}(t)$ to infer the parameters.
```
Year = np.arange(1900, 1921, 1)
# fmt: off
Lynx = np.array([4.0, 6.1, 9.8, 35.2, 59.4, 41.7, 19.0, 13.0, 8.3, 9.1, 7.4,
8.0, 12.3, 19.5, 45.7, 51.1, 29.7, 15.8, 9.7, 10.1, 8.6])
Hare = np.array([30.0, 47.2, 70.2, 77.4, 36.3, 20.6, 18.1, 21.4, 22.0, 25.4,
27.1, 40.3, 57.0, 76.6, 52.3, 19.5, 11.2, 7.6, 14.6, 16.2, 24.7])
# fmt: on
plt.figure(figsize=(15, 7.5))
plt.plot(Year, Lynx, color="b", lw=4, label="Lynx")
plt.plot(Year, Hare, color="g", lw=4, label="Hare")
plt.legend(fontsize=15)
plt.xlim([1900, 1920])
plt.xlabel("Year", fontsize=15)
plt.ylabel("Concentrations", fontsize=15)
plt.xticks(Year, rotation=45)
plt.title("Lynx (predator) - Hare (prey): oscillatory dynamics", fontsize=25);
```
## The probablistic model
I have now got all the ingredients needed in order to define the probabilistic model in PyMC3. As I have mentioned previously I will set up the probabilistic model with the exact same likelihood and priors used in the Stan example. The observed data is defined as follows:
$$\log (\boldsymbol{Y(t)}) = \log (\boldsymbol{X(t)}) + \eta(t),$$
where $\eta(t)$ is assumed to be zero mean i.i.d Gaussian noise with an unknown standard deviation $\sigma$, that needs to be estimated. The above multiplicative (on the natural scale) noise model encodes a lognormal distribution as the likelihood:
$$\boldsymbol{Y(t)} \sim \mathcal{L}\mathcal{N}(\log (\boldsymbol{X(t)}), \sigma^2).$$
The following priors are then placed on the parameters:
$$
\begin{aligned}
x(0), y(0) &\sim \mathcal{L}\mathcal{N}(\log(10),1),\\
\alpha, \gamma &\sim \mathcal{N}(1,0.5),\\
\beta, \delta &\sim \mathcal{N}(0.05,0.05),\\
\sigma &\sim \mathcal{L}\mathcal{N}(-1,1).
\end{aligned}
$$
For an intuitive explanation, which I am omitting for brevity, regarding the choice of priors as well as the likelihood model, I would recommend the Stan example mentioned above. The above probabilistic model is defined in PyMC3 below. Note that the flattened state vector is reshaped to match the data dimensionality.
Finally, I use the `pm.sample` method to run NUTS by default and obtain $1500$ post warm-up samples from the posterior.
```
theano.config.exception_verbosity = "high"
theano.config.floatX = "float64"
# Define the data matrix
Y = np.vstack((Hare, Lynx)).T
# Now instantiate the theano custom ODE op
my_ODEop = ODEop(state, numpy_vsp)
# The probabilistic model
with pm.Model() as LV_model:
# Priors for unknown model parameters
alpha = pm.Normal("alpha", mu=1, sd=0.5)
beta = pm.Normal("beta", mu=0.05, sd=0.05)
gamma = pm.Normal("gamma", mu=1, sd=0.5)
delta = pm.Normal("delta", mu=0.05, sd=0.05)
xt0 = pm.Lognormal("xto", mu=np.log(10), sd=1)
yt0 = pm.Lognormal("yto", mu=np.log(10), sd=1)
sigma = pm.Lognormal("sigma", mu=-1, sd=1, shape=2)
# Forward model
all_params = pm.math.stack([alpha, beta, gamma, delta, xt0, yt0], axis=0)
ode_sol = my_ODEop(all_params)
forward = ode_sol.reshape(Y.shape)
# Likelihood
Y_obs = pm.Lognormal("Y_obs", mu=pm.math.log(forward), sd=sigma, observed=Y)
trace = pm.sample(1500, tune=1000, init="adapt_diag")
trace["diverging"].sum()
with LV_model:
pm.traceplot(trace);
import pandas as pd
summary = pm.summary(trace)
STAN_mus = [0.549, 0.028, 0.797, 0.024, 33.960, 5.949, 0.248, 0.252]
STAN_sds = [0.065, 0.004, 0.091, 0.004, 2.909, 0.533, 0.045, 0.044]
summary["STAN_mus"] = pd.Series(np.array(STAN_mus), index=summary.index)
summary["STAN_sds"] = pd.Series(np.array(STAN_sds), index=summary.index)
summary
```
These estimates are almost identical to those obtained in the Stan tutorial (see the last two columns above), which is what we can expect. Posterior predictives can be drawn as below.
```
ppc_samples = pm.sample_posterior_predictive(trace, samples=1000, model=LV_model)["Y_obs"]
mean_ppc = ppc_samples.mean(axis=0)
CriL_ppc = np.percentile(ppc_samples, q=2.5, axis=0)
CriU_ppc = np.percentile(ppc_samples, q=97.5, axis=0)
plt.figure(figsize=(15, 2 * (5)))
plt.subplot(2, 1, 1)
plt.plot(Year, Lynx, "o", color="b", lw=4, ms=10.5)
plt.plot(Year, mean_ppc[:, 1], color="b", lw=4)
plt.plot(Year, CriL_ppc[:, 1], "--", color="b", lw=2)
plt.plot(Year, CriU_ppc[:, 1], "--", color="b", lw=2)
plt.xlim([1900, 1920])
plt.ylabel("Lynx conc", fontsize=15)
plt.xticks(Year, rotation=45)
plt.subplot(2, 1, 2)
plt.plot(Year, Hare, "o", color="g", lw=4, ms=10.5, label="Observed")
plt.plot(Year, mean_ppc[:, 0], color="g", lw=4, label="mean of ppc")
plt.plot(Year, CriL_ppc[:, 0], "--", color="g", lw=2, label="credible intervals")
plt.plot(Year, CriU_ppc[:, 0], "--", color="g", lw=2)
plt.legend(fontsize=15)
plt.xlim([1900, 1920])
plt.xlabel("Year", fontsize=15)
plt.ylabel("Hare conc", fontsize=15)
plt.xticks(Year, rotation=45);
```
# Efficient exploration of the posterior landscape with SMC
It has been pointed out in several papers that the complex non-linear dynamics of an ODE results in a posterior landscape that is extremely difficult to navigate efficiently by many MCMC samplers. Thus, recently the curvature information of the posterior surface has been used to construct powerful geometrically aware samplers ([Mark Girolami and Ben Calderhead, 2011](https://rss.onlinelibrary.wiley.com/doi/epdf/10.1111/j.1467-9868.2010.00765.x)) that perform extremely well in ODE inference problems. Another set of ideas suggest breaking down a complex inference task into a sequence of simpler tasks. In essence the idea is to use sequential-importance-sampling to sample from an artificial sequence of increasingly complex distributions where the first in the sequence is a distribution that is easy to sample from, the prior, and the last in the sequence is the actual complex target distribution. The associated importance distribution is constructed by moving the set of particles sampled at the previous step using a Markov kernel, say for example the MH kernel.
A simple way of building the sequence of distributions is to use a temperature $\beta$, that is raised slowly from $0$ to $1$. Using this temperature variable $\beta$ we can write down the annealed intermediate distribution as
$$p_{\beta}(\boldsymbol{\theta}|\boldsymbol{y})\propto p(\boldsymbol{y}|\boldsymbol{\theta})^{\beta} p(\boldsymbol{\theta}).$$
Samplers that carry out sequential-importance-sampling from these artificial sequence of distributions, to avoid the difficult task of sampling directly from $p(\boldsymbol{\theta}|\boldsymbol{y})$, are known as Sequential Monte Carlo (SMC) samplers ([P Del Moral et al., 2006](https://rss.onlinelibrary.wiley.com/doi/full/10.1111/j.1467-9868.2006.00553.x)). The performance of these samplers are sensitive to the choice of the temperature schedule, that is the set of user-defined increasing values of $\beta$ between $0$ and $1$. Fortunately, PyMC3 provides a version of the SMC sampler ([Jianye Ching and Yi-Chu Chen, 2007](https://ascelibrary.org/doi/10.1061/%28ASCE%290733-9399%282007%29133%3A7%28816%29)) that automatically figures out this temperature schedule. Moreover, the PyMC3's SMC sampler does not require the gradient of the log target density. As a result it is extremely easy to use this sampler for inference in ODE models. In the next example I will apply this SMC sampler to estimate the parameters of the Fitzhugh-Nagumo model.
## The Fitzhugh-Nagumo model
The Fitzhugh-Nagumo model given by
$$
\begin{aligned}
\frac{dV}{dt}&=(V - \frac{V^3}{3} + R)c\\
\frac{dR}{dt}&=\frac{-(V-a+bR)}{c},
\end{aligned}
$$
consisting of a membrane voltage variable $V(t)$ and a recovery variable $R(t)$ is a two-dimensional simplification of the [Hodgkin-Huxley](http://www.scholarpedia.org/article/Conductance-based_models) model of spike (action potential) generation in squid giant axons and where $a$, $b$, $c$ are the model parameters. This model produces a rich dynamics and as a result a complex geometry of the posterior surface that often leads to poor performance of many MCMC samplers. As a result this model was used to test the efficacy of the discussed geometric MCMC scheme and since then has been used to benchmark other novel MCMC methods. Following [Mark Girolami and Ben Calderhead, 2011](https://rss.onlinelibrary.wiley.com/doi/epdf/10.1111/j.1467-9868.2010.00765.x) I will also use artificially generated data from this model to setup the inference task for estimating $\boldsymbol{\theta}=(a,b,c)$.
```
class FitzhughNagumoModel:
def __init__(self, times, y0=None):
self._y0 = np.array([-1, 1], dtype=np.float64)
self._times = times
def _simulate(self, parameters, times):
a, b, c = [float(x) for x in parameters]
def rhs(y, t, p):
V, R = y
dV_dt = (V - V ** 3 / 3 + R) * c
dR_dt = (V - a + b * R) / -c
return dV_dt, dR_dt
values = odeint(rhs, self._y0, times, (parameters,), rtol=1e-6, atol=1e-6)
return values
def simulate(self, x):
return self._simulate(x, self._times)
```
## Simulated Data
For this example I am going to use simulated data that is I will generate noisy traces from the forward model defined above with parameters $\theta$ set to $(0.2,0.2,3)$ respectively and corrupted by i.i.d Gaussian noise with a standard deviation $\sigma=0.5$. The initial values are set to $V(0)=-1$ and $R(0)=1$ respectively. Again following [Mark Girolami and Ben Calderhead, 2011](https://rss.onlinelibrary.wiley.com/doi/epdf/10.1111/j.1467-9868.2010.00765.x) I will assume that the initial values are known. These parameter values pushes the model into the oscillatory regime.
```
n_states = 2
n_times = 200
true_params = [0.2, 0.2, 3.0]
noise_sigma = 0.5
FN_solver_times = np.linspace(0, 20, n_times)
ode_model = FitzhughNagumoModel(FN_solver_times)
sim_data = ode_model.simulate(true_params)
np.random.seed(42)
Y_sim = sim_data + np.random.randn(n_times, n_states) * noise_sigma
plt.figure(figsize=(15, 7.5))
plt.plot(FN_solver_times, sim_data[:, 0], color="darkblue", lw=4, label=r"$V(t)$")
plt.plot(FN_solver_times, sim_data[:, 1], color="darkgreen", lw=4, label=r"$R(t)$")
plt.plot(FN_solver_times, Y_sim[:, 0], "o", color="darkblue", ms=4.5, label="Noisy traces")
plt.plot(FN_solver_times, Y_sim[:, 1], "o", color="darkgreen", ms=4.5)
plt.legend(fontsize=15)
plt.xlabel("Time", fontsize=15)
plt.ylabel("Values", fontsize=15)
plt.title("Fitzhugh-Nagumo Action Potential Model", fontsize=25);
```
## Define a non-differentiable black-box op using Theano @as_op
Remember that I told SMC sampler does not require gradients, this is by the way the case for other samplers such as the Metropolis-Hastings, Slice sampler that are also supported in PyMC3. For all these gradient-free samplers I will show a simple and quick way of wrapping the forward model i.e. the ODE solution in Theano. All we have to do is to simply to use the decorator `as_op` that converts a python function into a basic Theano Op. We also tell Theano using the `as_op` decorator that we have three parameters each being a Theano scalar. The output then is a Theano matrix whose columns are the state vectors.
```
import theano.tensor as tt
from theano.compile.ops import as_op
@as_op(itypes=[tt.dscalar, tt.dscalar, tt.dscalar], otypes=[tt.dmatrix])
def th_forward_model(param1, param2, param3):
param = [param1, param2, param3]
th_states = ode_model.simulate(param)
return th_states
```
## Generative model
Since I have corrupted the original traces with i.i.d Gaussian thus the likelihood is given by
$$\boldsymbol{Y} = \prod_{i=1}^T \mathcal{N}(\boldsymbol{X}(t_i)), \sigma^2\mathbb{I}),$$
where $\mathbb{I}\in \mathbb{R}^{K \times K}$. We place a Gamma, Normal, Uniform prior on $(a,b,c)$ and a HalfNormal prior on $\sigma$ as follows:
$$
\begin{aligned}
a & \sim \mathcal{Gamma}(2,1),\\
b & \sim \mathcal{N}(0,1),\\
c & \sim \mathcal{U}(0.1,1),\\
\sigma & \sim \mathcal{H}(1).
\end{aligned}
$$
Notice how I have used the `start` argument for this example. Just like `pm.sample` `pm.sample_smc` has a number of settings, but I found the default ones good enough for simple models such as this one.
```
draws = 1000
with pm.Model() as FN_model:
a = pm.Gamma("a", alpha=2, beta=1)
b = pm.Normal("b", mu=0, sd=1)
c = pm.Uniform("c", lower=0.1, upper=10)
sigma = pm.HalfNormal("sigma", sd=1)
forward = th_forward_model(a, b, c)
cov = np.eye(2) * sigma ** 2
Y_obs = pm.MvNormal("Y_obs", mu=forward, cov=cov, observed=Y_sim)
startsmc = {v.name: np.random.uniform(1e-3, 2, size=draws) for v in FN_model.free_RVs}
trace_FN = pm.sample_smc(draws, start=startsmc)
pm.plot_posterior(trace_FN, kind="hist", bins=30, color="seagreen");
```
## Inference summary
With `pm.SMC`, do I get similar performance to geometric MCMC samplers (see [Mark Girolami and Ben Calderhead, 2011](https://rss.onlinelibrary.wiley.com/doi/epdf/10.1111/j.1467-9868.2010.00765.x))? I think so !
```
results = [
pm.summary(trace_FN, ["a"]),
pm.summary(trace_FN, ["b"]),
pm.summary(trace_FN, ["c"]),
pm.summary(trace_FN, ["sigma"]),
]
results = pd.concat(results)
true_params.append(noise_sigma)
results["True values"] = pd.Series(np.array(true_params), index=results.index)
true_params.pop()
results
```
## Reconstruction of the phase portrait
Its good to check that we can reconstruct the (famous) pahse portrait for this model based on the obtained samples.
```
params = np.array([trace_FN.get_values("a"), trace_FN.get_values("b"), trace_FN.get_values("c")]).T
params.shape
new_values = []
for ind in range(len(params)):
ppc_sol = ode_model.simulate(params[ind])
new_values.append(ppc_sol)
new_values = np.array(new_values)
mean_values = np.mean(new_values, axis=0)
plt.figure(figsize=(15, 7.5))
plt.plot(
mean_values[:, 0],
mean_values[:, 1],
color="black",
lw=4,
label="Inferred (mean of sampled) phase portrait",
)
plt.plot(
sim_data[:, 0], sim_data[:, 1], "--", color="#ff7f0e", lw=4, ms=6, label="True phase portrait"
)
plt.legend(fontsize=15)
plt.xlabel(r"$V(t)$", fontsize=15)
plt.ylabel(r"$R(t)$", fontsize=15);
```
# Perspectives
### Using some other ODE models
I have tried to keep everything as general as possible. So, my custom ODE Op, the state and VSP evaluator as well as the cached solver are not tied to a specific ODE model. Thus, to use any other ODE model one only needs to implement a `simulate_with_sensitivities` method according to their own specific ODE model.
### Other forms of differential equation (DDE, DAE, PDE)
I hope the two examples have elucidated the applicability of PyMC3 in regards to fitting ODE models. Although ODEs are the most fundamental constituent of a mathematical model, there are indeed other forms of dynamical systems such as a delay differential equation (DDE), a differential algebraic equation (DAE) and the partial differential equation (PDE) whose parameter estimation is equally important. The SMC and for that matter any other non-gradient sampler supported by PyMC3 can be used to fit all these forms of differential equation, of course using the `as_op`. However, just like an ODE we can solve augmented systems of DDE/DAE along with their sensitivity equations. The sensitivity equations for a DDE and a DAE can be found in this recent paper, [C Rackauckas et al., 2018](https://arxiv.org/abs/1812.01892) (Equation 9 and 10). Thus we can easily apply NUTS sampler to these models.
### Stan already supports ODEs
Well there are many problems where I believe SMC sampler would be more suitable than NUTS and thus its good to have that option.
### Model selection
Most ODE inference literature since [Vladislav Vyshemirsky and Mark Girolami, 2008](https://academic.oup.com/bioinformatics/article/24/6/833/192524) recommend the usage of Bayes factor for the purpose of model selection/comparison. This involves the calculation of the marginal likelihood which is a much more nuanced topic and I would refrain from any discussion about that. Fortunately, the SMC sampler calculates the marginal likelihood as a by product so this can be used for obtaining Bayes factors. Follow PyMC3's other tutorials for further information regarding how to obtain the marginal likelihood after running the SMC sampler.
Since we generally frame the ODE inference as a regression problem (along with the i.i.d measurement noise assumption in most cases) we can straight away use any of the supported information criterion, such as the widely available information criterion (WAIC), irrespective of what sampler is used for inference. See the PyMC3's API for further information regarding WAIC.
### Other AD packages
Although this is a slight digression nonetheless I would still like to point out my observations on this issue. The approach that I have presented here for embedding an ODE (also extends to DDE/DAE) as a custom Op can be trivially carried forward to other AD packages such as TensorFlow and PyTorch. I had been able to use TensorFlow's [py_func](https://www.tensorflow.org/api_docs/python/tf/py_func) to build a custom TensorFlow ODE Op and then use that in the [Edward](http://edwardlib.org/) ppl. I would recommend [this](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html) tutorial, for writing PyTorch extensions, to those who are interested in using the [Pyro](http://pyro.ai/) ppl.
```
%load_ext watermark
%watermark -n -u -v -iv -w
```
|
github_jupyter
|
# Quantum Teleportation
This notebook demonstrates quantum teleportation. We first use Qiskit's built-in simulators to test our quantum circuit, and then try it out on a real quantum computer.
## 1. Overview <a id='overview'></a>
Alice wants to send quantum information to Bob. Specifically, suppose she wants to send the qubit state
$\vert\psi\rangle = \alpha\vert0\rangle + \beta\vert1\rangle$.
This entails passing on information about $\alpha$ and $\beta$ to Bob.
There exists a theorem in quantum mechanics which states that you cannot simply make an exact copy of an unknown quantum state. This is known as the no-cloning theorem. As a result of this we can see that Alice can't simply generate a copy of $\vert\psi\rangle$ and give the copy to Bob. We can only copy classical states (not superpositions).
However, by taking advantage of two classical bits and an entangled qubit pair, Alice can transfer her state $\vert\psi\rangle$ to Bob. We call this teleportation because, at the end, Bob will have $\vert\psi\rangle$ and Alice won't anymore.
## 2. The Quantum Teleportation Protocol <a id='how'></a>
To transfer a quantum bit, Alice and Bob must use a third party (Telamon) to send them an entangled qubit pair. Alice then performs some operations on her qubit, sends the results to Bob over a classical communication channel, and Bob then performs some operations on his end to receive Aliceâs qubit.

We will describe the steps on a quantum circuit below. Here, no qubits are actually âsentâ, youâll just have to imagine that part!
First we set up our session:
```
# Do the necessary imports
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import IBMQ, Aer, transpile
from qiskit.visualization import plot_histogram, plot_bloch_multivector, array_to_latex
from qiskit.extensions import Initialize
from qiskit.result import marginal_counts
from qiskit.quantum_info import random_statevector
```
and create our quantum circuit:
```
## SETUP
# Protocol uses 3 qubits and 2 classical bits in 2 different registers
qr = QuantumRegister(3, name="q") # Protocol uses 3 qubits
crz = ClassicalRegister(1, name="crz") # and 2 classical bits
crx = ClassicalRegister(1, name="crx") # in 2 different registers
teleportation_circuit = QuantumCircuit(qr, crz, crx)
```
#### Step 1
A third party, Telamon, creates an entangled pair of qubits and gives one to Bob and one to Alice.
The pair Telamon creates is a special pair called a Bell pair. In quantum circuit language, the way to create a Bell pair between two qubits is to first transfer one of them to the X-basis ($|+\rangle$ and $|-\rangle$) using a Hadamard gate, and then to apply a CNOT gate onto the other qubit controlled by the one in the X-basis.
```
def create_bell_pair(qc, a, b):
"""Creates a bell pair in qc using qubits a & b"""
qc.h(a) # Put qubit a into state |+>
qc.cx(a,b) # CNOT with a as control and b as target
## SETUP
# Protocol uses 3 qubits and 2 classical bits in 2 different registers
qr = QuantumRegister(3, name="q")
crz, crx = ClassicalRegister(1, name="crz"), ClassicalRegister(1, name="crx")
teleportation_circuit = QuantumCircuit(qr, crz, crx)
## STEP 1
# In our case, Telamon entangles qubits q1 and q2
# Let's apply this to our circuit:
create_bell_pair(teleportation_circuit, 1, 2)
# And view the circuit so far:
teleportation_circuit.draw()
```
Let's say Alice owns $q_1$ and Bob owns $q_2$ after they part ways.
#### Step 2
Alice applies a CNOT gate to $q_1$, controlled by $\vert\psi\rangle$ (the qubit she is trying to send Bob). Then Alice applies a Hadamard gate to $|\psi\rangle$. In our quantum circuit, the qubit ($|\psi\rangle$) Alice is trying to send is $q_0$:
```
def alice_gates(qc, psi, a):
qc.cx(psi, a)
qc.h(psi)
## SETUP
# Protocol uses 3 qubits and 2 classical bits in 2 different registers
qr = QuantumRegister(3, name="q")
crz, crx = ClassicalRegister(1, name="crz"), ClassicalRegister(1, name="crx")
teleportation_circuit = QuantumCircuit(qr, crz, crx)
## STEP 1
create_bell_pair(teleportation_circuit, 1, 2)
## STEP 2
teleportation_circuit.barrier() # Use barrier to separate steps
alice_gates(teleportation_circuit, 0, 1)
teleportation_circuit.draw()
```
#### Step 3
Next, Alice applies a measurement to both qubits that she owns, $q_1$ and $\vert\psi\rangle$, and stores this result in two classical bits. She then sends these two bits to Bob.
```
def measure_and_send(qc, a, b):
"""Measures qubits a & b and 'sends' the results to Bob"""
qc.barrier()
qc.measure(a,0)
qc.measure(b,1)
## SETUP
# Protocol uses 3 qubits and 2 classical bits in 2 different registers
qr = QuantumRegister(3, name="q")
crz, crx = ClassicalRegister(1, name="crz"), ClassicalRegister(1, name="crx")
teleportation_circuit = QuantumCircuit(qr, crz, crx)
## STEP 1
create_bell_pair(teleportation_circuit, 1, 2)
## STEP 2
teleportation_circuit.barrier() # Use barrier to separate steps
alice_gates(teleportation_circuit, 0, 1)
## STEP 3
measure_and_send(teleportation_circuit, 0 ,1)
teleportation_circuit.draw()
```
#### Step 4
Bob, who already has the qubit $q_2$, then applies the following gates depending on the state of the classical bits:
00 $\rightarrow$ Do nothing
01 $\rightarrow$ Apply $X$ gate
10 $\rightarrow$ Apply $Z$ gate
11 $\rightarrow$ Apply $ZX$ gate
(*Note that this transfer of information is purely classical*.)
```
# This function takes a QuantumCircuit (qc), integer (qubit)
# and ClassicalRegisters (crz & crx) to decide which gates to apply
def bob_gates(qc, qubit, crz, crx):
# Here we use c_if to control our gates with a classical
# bit instead of a qubit
qc.x(qubit).c_if(crx, 1) # Apply gates if the registers
qc.z(qubit).c_if(crz, 1) # are in the state '1'
## SETUP
# Protocol uses 3 qubits and 2 classical bits in 2 different registers
qr = QuantumRegister(3, name="q")
crz, crx = ClassicalRegister(1, name="crz"), ClassicalRegister(1, name="crx")
teleportation_circuit = QuantumCircuit(qr, crz, crx)
## STEP 1
create_bell_pair(teleportation_circuit, 1, 2)
## STEP 2
teleportation_circuit.barrier() # Use barrier to separate steps
alice_gates(teleportation_circuit, 0, 1)
## STEP 3
measure_and_send(teleportation_circuit, 0, 1)
## STEP 4
teleportation_circuit.barrier() # Use barrier to separate steps
bob_gates(teleportation_circuit, 2, crz, crx)
teleportation_circuit.draw()
```
And voila! At the end of this protocol, Alice's qubit has now teleported to Bob.
## 3. Simulating the Teleportation Protocol <a id='simulating'></a>
### 3.1 How Will We Test the Protocol on a Quantum Computer? <a id='testing'></a>
In this notebook, we will initialize Alice's qubit in a random state $\vert\psi\rangle$ (`psi`). This state will be created using an `Initialize` gate on $|q_0\rangle$. In this chapter we use the function `random_statevector` to choose `psi` for us, but feel free to set `psi` to any qubit state you want.
```
# Create random 1-qubit state
psi = random_statevector(2)
# Display it nicely
display(array_to_latex(psi, prefix="|\\psi\\rangle ="))
# Show it on a Bloch sphere
plot_bloch_multivector(psi)
```
Let's create our initialization instruction to create $|\psi\rangle$ from the state $|0\rangle$:
```
init_gate = Initialize(psi)
init_gate.label = "init"
```
(`Initialize` is technically not a gate since it contains a reset operation, and so is not reversible. We call it an 'instruction' instead). If the quantum teleportation circuit works, then at the end of the circuit the qubit $|q_2\rangle$ will be in this state. We will check this using the statevector simulator.
### 3.2 Using the Simulated Statevector <a id='simulating-sv'></a>
We can use the Aer simulator to verify our qubit has been teleported.
```
## SETUP
qr = QuantumRegister(3, name="q") # Protocol uses 3 qubits
crz = ClassicalRegister(1, name="crz") # and 2 classical registers
crx = ClassicalRegister(1, name="crx")
qc = QuantumCircuit(qr, crz, crx)
## STEP 0
# First, let's initialize Alice's q0
qc.append(init_gate, [0])
qc.barrier()
## STEP 1
# Now begins the teleportation protocol
create_bell_pair(qc, 1, 2)
qc.barrier()
## STEP 2
# Send q1 to Alice and q2 to Bob
alice_gates(qc, 0, 1)
## STEP 3
# Alice then sends her classical bits to Bob
measure_and_send(qc, 0, 1)
## STEP 4
# Bob decodes qubits
bob_gates(qc, 2, crz, crx)
# Display the circuit
qc.draw()
```
We can see below, using the statevector obtained from the aer simulator, that the state of $|q_2\rangle$ is the same as the state $|\psi\rangle$ we created above, while the states of $|q_0\rangle$ and $|q_1\rangle$ have been collapsed to either $|0\rangle$ or $|1\rangle$. The state $|\psi\rangle$ has been teleported from qubit 0 to qubit 2.
```
sim = Aer.get_backend('aer_simulator')
qc.save_statevector()
out_vector = sim.run(qc).result().get_statevector()
plot_bloch_multivector(out_vector)
```
You can run this cell a few times to make sure. You may notice that the qubits 0 & 1 change states, but qubit 2 is always in the state $|\psi\rangle$.
### 3.3 Using the Simulated Counts <a id='simulating-fc'></a>
Quantum teleportation is designed to send qubits between two parties. We do not have the hardware to demonstrate this, but we can demonstrate that the gates perform the correct transformations on a single quantum chip. Here we again use the aer simulator to simulate how we might test our protocol.
On a real quantum computer, we would not be able to sample the statevector, so if we wanted to check our teleportation circuit is working, we need to do things slightly differently. The `Initialize` instruction first performs a reset, setting our qubit to the state $|0\rangle$. It then applies gates to turn our $|0\rangle$ qubit into the state $|\psi\rangle$:
$$ |0\rangle \xrightarrow{\text{Initialize gates}} |\psi\rangle $$
Since all quantum gates are reversible, we can find the inverse of these gates using:
```
inverse_init_gate = init_gate.gates_to_uncompute()
```
This operation has the property:
$$ |\psi\rangle \xrightarrow{\text{Inverse Initialize gates}} |0\rangle $$
To prove the qubit $|q_0\rangle$ has been teleported to $|q_2\rangle$, if we do this inverse initialization on $|q_2\rangle$, we expect to measure $|0\rangle$ with certainty. We do this in the circuit below:
```
## SETUP
qr = QuantumRegister(3, name="q") # Protocol uses 3 qubits
crz = ClassicalRegister(1, name="crz") # and 2 classical registers
crx = ClassicalRegister(1, name="crx")
qc = QuantumCircuit(qr, crz, crx)
## STEP 0
# First, let's initialize Alice's q0
qc.append(init_gate, [0])
qc.barrier()
## STEP 1
# Now begins the teleportation protocol
create_bell_pair(qc, 1, 2)
qc.barrier()
## STEP 2
# Send q1 to Alice and q2 to Bob
alice_gates(qc, 0, 1)
## STEP 3
# Alice then sends her classical bits to Bob
measure_and_send(qc, 0, 1)
## STEP 4
# Bob decodes qubits
bob_gates(qc, 2, crz, crx)
## STEP 5
# reverse the initialization process
qc.append(inverse_init_gate, [2])
# Display the circuit
qc.draw()
```
We can see the `inverse_init_gate` appearing, labelled 'disentangler' on the circuit diagram. Finally, we measure the third qubit and store the result in the third classical bit:
```
# Need to add a new ClassicalRegister
# to see the result
cr_result = ClassicalRegister(1)
qc.add_register(cr_result)
qc.measure(2,2)
qc.draw()
```
and we run our experiment:
```
t_qc = transpile(qc, sim)
t_qc.save_statevector()
counts = sim.run(t_qc).result().get_counts()
qubit_counts = [marginal_counts(counts, [qubit]) for qubit in range(3)]
plot_histogram(qubit_counts)
```
We can see we have a 100% chance of measuring $q_2$ (the purple bar in the histogram) in the state $|0\rangle$. This is the expected result, and indicates the teleportation protocol has worked properly.
## 4. Understanding Quantum Teleportation <a id="understanding-qt"></a>
As you have worked with the Quantum Teleportation's implementation, it is time to understand the mathematics behind the protocol.
#### Step 1
Quantum Teleportation begins with the fact that Alice needs to transmit $|\psi\rangle = \alpha|0\rangle + \beta|1\rangle$ (a random qubit) to Bob. She doesn't know the state of the qubit. For this, Alice and Bob take the help of a third party (Telamon). Telamon prepares a pair of entangled qubits for Alice and Bob. The entangled qubits could be written in Dirac Notation as:
$$ |e \rangle = \frac{1}{\sqrt{2}} (|00\rangle + |11\rangle) $$
Alice and Bob each possess one qubit of the entangled pair (denoted as A and B respectively),
$$|e\rangle = \frac{1}{\sqrt{2}} (|0\rangle_A |0\rangle_B + |1\rangle_A |1\rangle_B) $$
This creates a three qubit quantum system where Alice has the first two qubits and Bob the last one.
$$ \begin{aligned}
|\psi\rangle \otimes |e\rangle &= \frac{1}{\sqrt{2}} (\alpha |0\rangle \otimes (|00\rangle + |11\rangle) + \beta |1\rangle \otimes (|00\rangle + |11\rangle))\\
&= \frac{1}{\sqrt{2}} (\alpha|000\rangle + \alpha|011\rangle + \beta|100\rangle + \beta|111\rangle)
\end{aligned}$$
#### Step 2
Now according to the protocol Alice applies CNOT gate on her two qubits followed by Hadamard gate on the first qubit. This results in the state:
$$
\begin{aligned} &(H \otimes I \otimes I) (CNOT \otimes I) (|\psi\rangle \otimes |e\rangle)\\
&=(H \otimes I \otimes I) (CNOT \otimes I) \frac{1}{\sqrt{2}} (\alpha|000\rangle + \alpha|011\rangle + \beta|100\rangle + \beta|111\rangle) \\
&= (H \otimes I \otimes I) \frac{1}{\sqrt{2}} (\alpha|000\rangle + \alpha|011\rangle + \beta|110\rangle + \beta|101\rangle) \\
&= \frac{1}{2} (\alpha(|000\rangle + |011\rangle + |100\rangle + |111\rangle) + \beta(|010\rangle + |001\rangle - |110\rangle - |101\rangle)) \\
\end{aligned}
$$
Which can then be separated and written as:
$$
\begin{aligned}
= \frac{1}{2}(
& \phantom{+} |00\rangle (\alpha|0\rangle + \beta|1\rangle) \hphantom{\quad )} \\
& + |01\rangle (\alpha|1\rangle + \beta|0\rangle) \hphantom{\quad )}\\[4pt]
& + |10\rangle (\alpha|0\rangle - \beta|1\rangle) \hphantom{\quad )}\\[4pt]
& + |11\rangle (\alpha|1\rangle - \beta|0\rangle) \quad )\\
\end{aligned}
$$
#### Step 3
Alice measures the first two qubit (which she owns) and sends them as two classical bits to Bob. The result she obtains is always one of the four standard basis states $|00\rangle, |01\rangle, |10\rangle,$ and $|11\rangle$ with equal probability.
On the basis of her measurement, Bob's state will be projected to,
$$
|00\rangle \rightarrow (\alpha|0\rangle + \beta|1\rangle)\\
|01\rangle \rightarrow (\alpha|1\rangle + \beta|0\rangle)\\
|10\rangle \rightarrow (\alpha|0\rangle - \beta|1\rangle)\\
|11\rangle \rightarrow (\alpha|1\rangle - \beta|0\rangle)
$$
#### Step 4
Bob, on receiving the bits from Alice, knows he can obtain the original state $|\psi\rangle$ by applying appropriate transformations on his qubit that was once part of the entangled pair.
The transformations he needs to apply are:
$$
\begin{array}{c c c}
\mbox{Bob's State} & \mbox{Bits Received} & \mbox{Gate Applied} \\
(\alpha|0\rangle + \beta|1\rangle) & 00 & I \\
(\alpha|1\rangle + \beta|0\rangle) & 01 & X \\
(\alpha|0\rangle - \beta|1\rangle) & 10 & Z \\
(\alpha|1\rangle - \beta|0\rangle) & 11 & ZX
\end{array}
$$
After this step Bob will have successfully reconstructed Alice's state.
## 5. Teleportation on a Real Quantum Computer <a id='real_qc'></a>
### 5.1 IBM hardware and Deferred Measurement <a id='deferred-measurement'></a>
The IBM quantum computers currently do not support instructions after measurements, meaning we cannot run the quantum teleportation in its current form on real hardware. Fortunately, this does not limit our ability to perform any computations due to the _deferred measurement principle_ discussed in chapter 4.4 of [1]. The principle states that any measurement can be postponed until the end of the circuit, i.e. we can move all the measurements to the end, and we should see the same results.

Any benefits of measuring early are hardware related: If we can measure early, we may be able to reuse qubits, or reduce the amount of time our qubits are in their fragile superposition. In this example, the early measurement in quantum teleportation would have allowed us to transmit a qubit state without a direct quantum communication channel.
While moving the gates allows us to demonstrate the "teleportation" circuit on real hardware, it should be noted that the benefit of the teleportation process (transferring quantum states via classical channels) is lost.
Let us re-write the `bob_gates` function to `new_bob_gates`:
```
def new_bob_gates(qc, a, b, c):
qc.cx(b, c)
qc.cz(a, c)
```
And create our new circuit:
```
qc = QuantumCircuit(3,1)
# First, let's initialize Alice's q0
qc.append(init_gate, [0])
qc.barrier()
# Now begins the teleportation protocol
create_bell_pair(qc, 1, 2)
qc.barrier()
# Send q1 to Alice and q2 to Bob
alice_gates(qc, 0, 1)
qc.barrier()
# Alice sends classical bits to Bob
new_bob_gates(qc, 0, 1, 2)
# We undo the initialization process
qc.append(inverse_init_gate, [2])
# See the results, we only care about the state of qubit 2
qc.measure(2,0)
# View the results:
qc.draw()
```
### 5.2 Executing <a id='executing'></a>
```
# First, see what devices we are allowed to use by loading our saved accounts
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
# get the least-busy backend at IBM and run the quantum circuit there
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
backend = least_busy(provider.backends(filters=lambda b: b.configuration().n_qubits >= 3 and
not b.configuration().simulator and b.status().operational==True))
t_qc = transpile(qc, backend, optimization_level=3)
job = backend.run(t_qc)
job_monitor(job) # displays job status under cell
# Get the results and display them
exp_result = job.result()
exp_counts = exp_result.get_counts(qc)
print(exp_counts)
plot_histogram(exp_counts)
```
As we see here, there are a few results in which we measured $|1\rangle$. These arise due to errors in the gates and the qubits. In contrast, our simulator in the earlier part of the notebook had zero errors in its gates, and allowed error-free teleportation.
```
print(f"The experimental error rate : {exp_counts['1']*100/sum(exp_counts.values()):.3f}%")
```
## 6. References <a id='references'></a>
[1] M. Nielsen and I. Chuang, Quantum Computation and Quantum Information, Cambridge Series on Information and the Natural Sciences (Cambridge University Press, Cambridge, 2000).
[2] Eleanor Rieffel and Wolfgang Polak, Quantum Computing: a Gentle Introduction (The MIT Press Cambridge England, Massachusetts, 2011).
```
import qiskit.tools.jupyter
%qiskit_version_table
```
|
github_jupyter
|
# Train convolutional network for sentiment analysis.
Based on
"Convolutional Neural Networks for Sentence Classification" by Yoon Kim
http://arxiv.org/pdf/1408.5882v2.pdf
For `CNN-non-static` gets to 82.1% after 61 epochs with following settings:
embedding_dim = 20
filter_sizes = (3, 4)
num_filters = 3
dropout_prob = (0.7, 0.8)
hidden_dims = 100
For `CNN-rand` gets to 78-79% after 7-8 epochs with following settings:
embedding_dim = 20
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
For `CNN-static` gets to 75.4% after 7 epochs with following settings:
embedding_dim = 100
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
* it turns out that such a small data set as "Movie reviews with one
sentence per review" (Pang and Lee, 2005) requires much smaller network
than the one introduced in the original article:
- embedding dimension is only 20 (instead of 300; 'CNN-static' still requires ~100)
- 2 filter sizes (instead of 3)
- higher dropout probabilities and
- 3 filters per filter size is enough for 'CNN-non-static' (instead of 100)
- embedding initialization does not require prebuilt Google Word2Vec data.
Training Word2Vec on the same "Movie reviews" data set is enough to
achieve performance reported in the article (81.6%)
Another distinct difference is sliding MaxPooling window of length=2
instead of MaxPooling over whole feature map as in the article
```
import numpy as np
import data_helpers
from w2v import train_word2vec
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Merge, Convolution1D, MaxPooling1D
from sklearn.cross_validation import train_test_split
np.random.seed(2)
model_variation = 'CNN-rand' # CNN-rand | CNN-non-static | CNN-static
print('Model variation is %s' % model_variation)
# Model Hyperparameters
sequence_length = 56
embedding_dim = 20
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
# Training parameters
batch_size = 32
num_epochs = 2
# Word2Vec parameters, see train_word2vec
min_word_count = 1 # Minimum word count
context = 10 # Context window size
print("Loading data...")
x, y, vocabulary, vocabulary_inv = data_helpers.load_data()
if model_variation=='CNN-non-static' or model_variation=='CNN-static':
embedding_weights = train_word2vec(x, vocabulary_inv, embedding_dim, min_word_count, context)
if model_variation=='CNN-static':
x = embedding_weights[0][x]
elif model_variation=='CNN-rand':
embedding_weights = None
else:
raise ValueError('Unknown model variation')
data = np.append(x,y,axis = 1)
train, test = train_test_split(data, test_size = 0.15,random_state = 0)
X_test = test[:,:56]
Y_test = test[:,56:58]
X_train = train[:,:56]
Y_train = train[:,56:58]
train_rows = np.random.randint(0,X_train.shape[0],2500)
X_train = X_train[train_rows]
Y_train = Y_train[train_rows]
print("Vocabulary Size: {:d}".format(len(vocabulary)))
def initialize():
global graph_in
global convs
graph_in = Input(shape=(sequence_length, embedding_dim))
convs = []
#Buliding the first layer (Convolution Layer) of the network
def build_layer_1(filter_length):
conv = Convolution1D(nb_filter=num_filters,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1)(graph_in)
return conv
#Adding a max pooling layer to the model(network)
def add_max_pooling(conv):
pool = MaxPooling1D(pool_length=2)(conv)
return pool
#Adding a flattening layer to the model(network), before adding a dense layer
def add_flatten(conv_or_pool):
flatten = Flatten()(conv_or_pool)
return flatten
def add_sequential(graph):
#main sequential model
model = Sequential()
if not model_variation=='CNN-static':
model.add(Embedding(len(vocabulary), embedding_dim, input_length=sequence_length,
weights=embedding_weights))
model.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))
model.add(graph)
model.add(Dense(2))
model.add(Activation('sigmoid'))
return model
#1.Convolution 2.Flatten
def one_layer_convolution():
initialize()
conv = build_layer_1(3)
flatten = add_flatten(conv)
convs.append(flatten)
out = convs[0]
graph = Model(input=graph_in, output=out)
model = add_sequential(graph)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#1.Convolution 2.Max Pooling 3.Flatten
def two_layer_convolution():
initialize()
conv = build_layer_1(3)
pool = add_max_pooling(conv)
flatten = add_flatten(pool)
convs.append(flatten)
out = convs[0]
graph = Model(input=graph_in, output=out)
model = add_sequential(graph)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=num_epochs, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#1.Convolution 2.Max Pooling 3.Flatten 4.Convolution 5.Flatten
def three_layer_convolution():
initialize()
conv = build_layer_1(3)
pool = add_max_pooling(conv)
flatten = add_flatten(pool)
convs.append(flatten)
conv = build_layer_1(4)
flatten = add_flatten(conv)
convs.append(flatten)
if len(filter_sizes)>1:
out = Merge(mode='concat')(convs)
else:
out = convs[0]
graph = Model(input=graph_in, output=out)
model = add_sequential(graph)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#1.Convolution 2.Max Pooling 3.Flatten 4.Convolution 5.Max Pooling 6.Flatten
def four_layer_convolution():
initialize()
conv = build_layer_1(3)
pool = add_max_pooling(conv)
flatten = add_flatten(pool)
convs.append(flatten)
conv = build_layer_1(4)
pool = add_max_pooling(conv)
flatten = add_flatten(pool)
convs.append(flatten)
if len(filter_sizes)>1:
out = Merge(mode='concat')(convs)
else:
out = convs[0]
graph = Model(input=graph_in, output=out)
model = add_sequential(graph)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=num_epochs, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
%%time
#1.Convolution 2.Flatten
one_layer_convolution()
%%time
#1.Convolution 2.Max Pooling 3.Flatten
two_layer_convolution()
%%time
#1.Convolution 2.Max Pooling 3.Flatten 4.Convolution 5.Flatten
three_layer_convolution()
%%time
#1.Convolution 2.Max Pooling 3.Flatten 4.Convolution 5.Max Pooling 6.Flatten
four_layer_convolution()
```
|
github_jupyter
|
[](https://www.pythonista.io)
# Esquema de *OpenAPI*.
https://swagger.io/docs/specification/basic-structure/
## Estructura.
* Versión de *OpenAPI*.
* Información (```info```).
* Etiquetas (```tags```).
* Servidores (```servers```).
* Componentes (```components```).
* Esquemas (```schemas```).
* Cuerpos de petición (```requestBodies```)
* Rutas (```paths```).
## Versión de *Open API*.
```yaml
openapi: <versión>
```
https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#versions
## información.
``` yaml
info:
description: <Descripción de la API>
version: <Version de la API>
title: <TÃtulo de la documentación de la API>
termsOfService: <URL de los términos de servicio>
contact:
name: <Nombre del contacto>
email: <Correo electrónico del contacto>
url: <URL de referencia>
license:
nombre: <Nombre de la licencia>
url: <URL de la licencia>
externalDocs:
description: <Descripción de documentos externos>
url: <URL de la licencia>
```
https://swagger.io/docs/specification/api-general-info/
## Etiquetas:
```yaml
tags:
- name: <nombre de la etiqueta 1>
description: <descripción de la etiqueta 1>
- name: <nombre de la etiqueta 2>
description: <descripción de la etiqueta 2>
```
https://swagger.io/docs/specification/grouping-operations-with-tags/
## Servidores:
``` yaml
servers:
- url: <URL del servidor 1>
description: <descripción del servidor 1>
- url: <URL del servidor 2
description: <descripción del servidor 2>
```
## Componentes.
https://swagger.io/docs/specification/components/
* Esquemas (*schemas*)
* Cuerpos de peticiones (*requestBodies*)
``` yaml
components:
requestBodies:
- <esquema de peticion 1>
- <esquema de peticion 2>
schemas:
- <esquema 1>
- <esquema 2>
parameters:
- <parámetro 1>
- <parámetro 2>
responses:
- <respuesta 1>
- <respuesta 1>
headers:
- <encabezado 1>
- <encabezado 2>
examples:
- <ejemplo 1>
- <ejemplo 2>
callbacks:
- <URL 1>
- <URL 2>
```
## Rutas.
https://swagger.io/docs/specification/paths-and-operations/
```
"/<segmento 1>{<parámetro 1>}<segmento 2>{<parámetro 2>}"
```
**Ejemplos:**
* ```/api/{clave}```
* ```/api/{clave}-{id}/mensajes```
* ```/auth/logout```
``` yaml
paths:
<ruta 1>:
<método 1>
<metodo 2>
<parameters>:
<parámetro 1>
<parámetro 2>
```
### Parámetros.
Los parámetros son datos obtenidos a partir de la ruta o de la consulta enviado en la petición.
``` yaml
parameters:
- name: <Nombre del parámetro>
in: <Fuente>
description: <Descripción del parámetro>
required: <booleano>
example: <Ejemplo del parámetro>
schema:
<esquema>
```
### Métodos.
``` yaml
<método>:
tags:
- <etiqueta 1>
- <etiqueta 2>
summary: <Resumen de la funcionalidad>
description: <Descripción de la funcionalidad>
parameters:
- <parámetro 1>
- <parámetro 2>
responses:
<código de estado 1>
<código de estado 2>
requestBody:
<esquema de petición>
```
### Códigos de estado.
``` yaml
<número de código de estado 1>:
description: <Descripción de la funcionalidad>
content:
<tipo de aplicación>:
<esquema del contenido de la respuesta>
```
### Contenidos de respuesta.
https://swagger.io/docs/specification/describing-responses/
https://swagger.io/docs/specification/data-models/representing-xml/
## Esquemas.
https://swagger.io/docs/specification/data-models/
## Tipos de datos.
https://swagger.io/docs/specification/data-models/data-types/
### Tipo ```string```.
https://swagger.io/docs/specification/data-models/data-types/#string
### Tipos ```number``` e ```integer```.
https://swagger.io/docs/specification/data-models/data-types/#numbers
### Tipo ```boolean```.
https://swagger.io/docs/specification/data-models/data-types/#boolean
### Tipo ```array```.
https://swagger.io/docs/specification/data-models/data-types/#array
### Tipo ```object```.
https://swagger.io/docs/specification/data-models/data-types/#object
## Enums.
``` yaml
type: <tipo>
enum:
- <elemento 1>
- <elemento 2>
```
https://swagger.io/docs/specification/data-models/enums/
## Referencias.
```
$ref: "ruta"
```
https://swagger.io/docs/specification/using-ref/
### Referencias dentro del documento.
``` #/<nivel 1>/<nivel 2>/... /<nivel n>/<elemento> ```
## Ejemplos.
https://swagger.io/docs/specification/adding-examples/
#
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2022.</p>
|
github_jupyter
|
# COVIDvu - US regions visualizer <img src='resources/American-flag.png' align = 'right'>
---
## Runtime prerequisites
```
%%capture --no-stderr requirementsOutput
displayRequirementsOutput = False
%pip install -r requirements.txt
from covidvu.utils import autoReloadCode; autoReloadCode()
if displayRequirementsOutput:
requirementsOutput.show()
```
---
## Pull latest datasets
```
%sx ./refreshdata local patch
```
---
## Confirmed, deaths, recovered datasets
```
import os
import numpy as np
import pandas as pd
from covidvu.cryostation import Cryostation
pd.options.mode.chained_assignment = None
databasePath = './database/virustrack.db'
storage = Cryostation(databasePath=databasePath)
confirmedCases = storage.timeSeriesFor(regionType = 'province',
countryName = 'US',
casesType = 'confirmed', disableProgressBar=False)
confirmedDeaths = storage.timeSeriesFor(regionType = 'province',
countryName = 'US',
casesType = 'deaths', disableProgressBar=False)
```
---
## Cases by US state
```
from ipywidgets import fixed
from ipywidgets import interact
from ipywidgets import widgets
from covidvu import visualize
statesUS = list(confirmedCases.columns)
multiState = widgets.SelectMultiple(
options=statesUS,
value=['New York'],
description='State',
disabled=False
)
log = widgets.Checkbox(value=False, description='Log scale')
```
### Confirmed cases
```
interact(visualize.plotTimeSeriesInteractive,
df=fixed(confirmedCases),
selectedColumns=multiState,
log=log,
yLabel=fixed('Total confirmed cases'),
title=fixed('COVID-19 total confirmed cases in US states')
);
def viewTopStates(n):
return pd.DataFrame(confirmedCases.iloc[-1,:].sort_values(ascending=False).iloc[1:n]).style.background_gradient(cmap="Reds")
interact(viewTopStates, n=widgets.IntSlider(min=1, max=len(statesUS), step=1, value=5));
```
---
## Cases by US region
```
regionsUS = list(confirmedCases.columns)
multiRegion = widgets.SelectMultiple(
options=regionsUS,
value=['New York'],
description='State',
disabled=False
)
interact(visualize.plotTimeSeriesInteractive,
df=fixed(confirmedCases),
selectedColumns=multiRegion,
log=log,
yLabel=fixed('Total confirmed cases'),
title=fixed('COVID-19 total confirmed cases in US regions')
);
```
---
© the COVIDvu Contributors. All rights reserved.
|
github_jupyter
|
## Libraries
```
### Uncomment the next two lines to,
### install tensorflow_hub and tensorflow datasets
#!pip install tensorflow_hub
#!pip install tensorflow_datasets
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow_hub as hub
import tensorflow_datasets as tfds
from tensorflow.keras import layers
```
### Download and Split data into Train and Validation
```
def get_data():
(train_set, validation_set), info = tfds.load(
'tf_flowers',
with_info=True,
as_supervised=True,
split=['train[:70%]', 'train[70%:]'],
)
return train_set, validation_set, info
train_set, validation_set, info = get_data()
num_examples = info.splits['train'].num_examples
num_classes = info.features['label'].num_classes
print('Total Number of Classes: {}'.format(num_classes))
print('Total Number of Training Images: {}'.format(len(train_set)))
print('Total Number of Validation Images: {} \n'.format(len(validation_set)))
img_shape = 224
batch_size = 32
def format_image(image, label):
image = tf.image.resize(image, (img_shape, img_shape))/255.0
return image, label
train_batches = train_set.shuffle(num_examples//4).map(format_image).batch(batch_size).prefetch(1)
validation_batches = validation_set.map(format_image).batch(batch_size).prefetch(1)
```
### Getting MobileNet model's learned features
```
def get_mobilenet_features():
URL = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
global img_shape
feature_extractor = hub.KerasLayer(URL, input_shape=(img_shape, img_shape,3))
return feature_extractor
### Freezing the layers of transferred model (MobileNet)
feature_extractor = get_mobilenet_features()
feature_extractor.trainable = False
```
## Deep Learning Model - Transfer Learning using MobileNet
```
def create_transfer_learned_model(feature_extractor):
global num_classes
model = tf.keras.Sequential([
feature_extractor,
layers.Dense(num_classes, activation='softmax')
])
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
return model
```
### Training the last classification layer of the model
Achieved Validation Accuracy: 90.10% (significant improvement over simple architecture)
```
epochs = 6
model = create_transfer_learned_model(feature_extractor)
history = model.fit(train_batches,
epochs=epochs,
validation_data=validation_batches)
```
### Plotting Accuracy and Loss Curves
```
def create_plots(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
global epochs
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
create_plots(history)
```
### Prediction
```
def predict():
global train_batches, info
image_batch, label_batch = next(iter(train_batches.take(1)))
image_batch = image_batch.numpy()
label_batch = label_batch.numpy()
predicted_batch = model.predict(image_batch)
predicted_batch = tf.squeeze(predicted_batch).numpy()
class_names = np.array(info.features['label'].names)
predicted_ids = np.argmax(predicted_batch, axis=-1)
predicted_class_names = class_names[predicted_ids]
return image_batch, label_batch, predicted_ids, predicted_class_names
image_batch, label_batch, predicted_ids, predicted_class_names = predict()
print("Labels: ", label_batch)
print("Predicted labels: ", predicted_ids)
def plot_figures():
global image_batch, predicted_ids, label_batch
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.subplots_adjust(hspace = 0.3)
plt.imshow(image_batch[n])
color = "blue" if predicted_ids[n] == label_batch[n] else "red"
plt.title(predicted_class_names[n].title(), color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (blue: correct, red: incorrect)")
plot_figures()
```
|
github_jupyter
|
```
import math
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import bayes_mvs as bayesest
import os
import time
from szsimulator import Szsimulator
%matplotlib inline
mean_size = 3 # micron
doubling_time = 18 #min
tmax = 180 #min
sample_time = 2 #min
div_steps = 10
ncells = 1000
gr = np.log(2)/doubling_time
kd = div_steps*gr/(mean_size)
ncells = 2000
sampling_time = sample_time
rprom = 10 # RNA mean concentration
pprom = 1000 # prot mean concentration
gammar = 5*gr # RNA Active degradation rate
kr = rprom*(gr+gammar) # RNA transcription rate
kp = pprom*gr/rprom # Protein translation rate
pop = np.zeros([ncells,6])
indexes = np.int(tmax/sampling_time)
rarray = np.zeros([ncells,indexes])
parray = np.zeros([ncells,indexes])
tarray = np.zeros([indexes])
szarray = np.zeros([ncells,indexes])
cellindex = 0
indexref = 0
start = time.time()
for cell in pop:
if ncells > 100:
if cellindex/ncells > indexref:
print(str(np.int(100*cellindex/ncells))+"%")
indexref += 0.1
#Initialize the simulator
sim = Szsimulator(tmax = tmax, sample_time = sample_time, ncells=1, gr = gr, k = kd, steps = div_steps)
#_______________
#Example of a direct SSA simulation
cell[0] = mean_size #Initial size
cell[1] = mean_size*rprom #Initial RNA number
cell[2] = mean_size*pprom #Initial Protein number
cell[3] = (1/gr)*np.log(1-(gr/(kr*cell[0]))*np.log(np.random.rand())) #time to thenext rna creation
cell[4] = -np.log(np.random.rand())/(gammar*cell[1]) #time to the next rna degradation
cell[5] = -np.log(np.random.rand())/(kp*cell[1]) #time to next protein creation
t=0
reactions=[[0,1,0,0,0,0],[0,-1,0,0,0,0],[0,0,1,0,0,0]] #Reactions (RNA creation, RNA active degradation, Protein creation)
nextt = 0
index = 0
ndiv = 0
while t<tmax: #iterating over time
nr = cell[1]
nprot = cell[2]
sz = cell[0]
tnextarr = [cell[3],cell[4],cell[5]]
tau = np.min(tnextarr)
cell += reactions[np.argmin(tnextarr)]
#------------------
sim.simulate(tmax=tau,export = False) #Simulate size dynamics for that given time
#--------------------
cell[0] = sim.get_sz(0) #Taking the cell size after that simulation
if sim.get_ndiv(0) > ndiv: #Check if cell got divided
cell[1] = np.random.binomial(nr,0.5) # RNA segregated binomially
cell[2] = np.random.binomial(nprot,0.5) # Protein segregated binomially
ndiv += 1 # New number of divisions
nr = cell[1] #Refreshing RNA number
nprot = cell[2] #Refreshing Protein number
sz = cell[0] #Refreshing size number
cell[3] = (1/gr)*np.log(1-(gr/(kr*cell[0]))*np.log(np.random.rand())) #time to thenext rna creation
cell[4] = -np.log(np.random.rand())/(gammar*cell[1]) #time to the next rna degradation
cell[5] = -np.log(np.random.rand())/(kp*cell[1]) #time to next protein creation
t+=tau
if t > nextt and index<len(tarray): #storing data
rarray[cellindex,index] = nr/sz # RNA concentration
parray[cellindex,index] = nprot/sz # Protein concentration
szarray[cellindex,index] = sz # Cell size
tarray[index] = t # Time
index += 1
nextt += sampling_time
cellindex += 1
print('It took', np.int(time.time()-start), 'seconds.')
data=pd.DataFrame(np.transpose(np.array(szarray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnszarray=[]
cvszarray=[]
errcv2sz=[]
errmnsz=[]
for m in range(len(data)):
szs=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnszarray.append(mean_cntr[0])
errmnsz.append(mean_cntr[1][1]-mean_cntr[0])
cvszarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2sz.append(errv)
data['time'] = tarray
data['Mean_sz'] = mnszarray
data['Error_mean'] = errmnsz
data['sz_CV2'] = cvszarray
data['Error_CV2'] = errcv2sz
if not os.path.exists('./data/SSA'):
os.makedirs('./data/SSA')
data.to_csv("./data/SSA/szsim.csv")
tmax=9*doubling_time
dt=0.0001*doubling_time
lamb=1
a=gr
nsteps=div_steps
k=kd
v0=mean_size
#psz1=[]
ndivs=10
t=0
bigdeltat=0.1
steps=int(np.floor(tmax/dt))
u=np.zeros([ndivs,nsteps])#(DIVS,STEPS)
u[0]=np.zeros(nsteps)
u[0][0]=1#P_00
allmeandivs4=[]#average divisions along the time
allvardiv4=[] # variace of pn along the time
allmeansz4=[]
allvarsz4=[]
time4=[]#time array
yenvol=[]
xenvol=[]
start=0
count=int(np.floor(tmax/(dt*1000)))-1
count2=0
start = time.time()
for l in range(steps):
utemp=u
for n in range(len(utemp)):#n=divs,
for m in range(len(utemp[n])):#m=steps
if (m==0):#m=steps
if(n==0):#n=divs
dun=-k*v0**lamb*np.exp(lamb*a*t)*(utemp[0][0])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*((2**lamb)*utemp[n-1][len(utemp[n])-1]-utemp[n][0])
u[n][m]+=dun*dt
elif(m==len(utemp[n])-1):
if(n==len(utemp)-1):
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][len(utemp[n])-2])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
else:
arg=lamb*(a*t-n*np.log(2))
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
t+=dt
count=count+1
if count==int(np.floor(tmax/(dt*1000))):
time4.append(t/doubling_time)
mean=0
for n in range(len(utemp)):
pnval=np.sum(u[n])
mean+=n*pnval
allmeandivs4.append(mean/mean_size)
var=0
for n in range(len(utemp)):#divs
pnval=np.sum(u[n])
var+=(n-mean)**2*pnval
allvardiv4.append(np.sqrt(var))
pn=np.zeros(ndivs)
sizen=np.zeros(ndivs)
meansz=0
for ll in range(len(utemp)):
pnltemp=np.sum(u[ll])#prob of n divs
pn[ll]=pnltemp#
sizen[ll]=np.exp(a*t)/2**ll#
meansz+=pnltemp*v0*np.exp(a*t)/2**ll
allmeansz4.append(meansz)
varsz=0
for ll in range(len(utemp)):
pnltemp=np.sum(u[ll])
varsz+=(v0*np.exp(a*t)/2**ll-meansz)**2*pnltemp
allvarsz4.append(varsz)
count=0
count2+=1
if(count2==100):
print(str(int(100*t/tmax))+"%")
count2=0
print('It took', np.int(time.time()-start), 'seconds.')
fig, ax = plt.subplots(1,2, figsize=(12,4))
#ax[0].plot(tarray,mnszarray)
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label='SSA')
#ax[1].plot(tarray,cvszarray)
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvszarray)-np.array(errcv2sz),np.array(cvszarray)+np.array(errcv2sz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[0].plot(np.array(time4),np.array(allmeansz4),lw=2,c='#006599',label="Numerical")
ax[1].plot(np.array(time4),np.array(allvarsz4)/np.array(allmeansz4)**2,lw=2,c='#006599')
ax[0].set_ylabel("$s$ ($\mu$m)",size=20)
ax[1].set_ylabel("$C_V^2(s)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([1,1.2*np.max(mnszarray)])
ax[1].set_ylim([0,1.2*np.max(cvszarray)])
for l in [0,1]:
ax[l].set_xlim([0,tmax/doubling_time])
taqui=np.arange(0,(tmax+1)/doubling_time,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
taqui=np.arange(0,0.15,step=0.02)
ax[1].set_yticks(np.array(taqui))
ax[0].legend(fontsize=15)
if not os.path.exists('./figures/SSA'):
os.makedirs('./figures/SSA')
plt.savefig('./figures/SSA/size_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/SSA/size_statistics.png',bbox_inches='tight')
data=pd.DataFrame(np.transpose(np.array(rarray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnrnaarray=[]
cvrnaarray=[]
errcv2rna=[]
errmnrna=[]
for m in range(len(data)):
rnas=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(rnas,alpha=0.95)
mnrnaarray.append(mean_cntr[0])
errmnrna.append(mean_cntr[1][1]-mean_cntr[0])
cvrnaarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2rna.append(errv)
data['time'] = tarray
data['Mean_RNA'] = mnrnaarray
data['Error_mean'] = errmnrna
data['RNA_CV2'] = cvrnaarray
data['Error_CV2'] = errcv2rna
if not os.path.exists('./data/SSA'):
os.makedirs('./data/SSA')
data.to_csv("./data/SSA/RNAsim.csv")
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(np.array(tarray)/doubling_time,mnrnaarray,c="#BD0025")
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnrnaarray)-np.array(errmnrna),np.array(mnrnaarray)+np.array(errmnrna),
alpha=1, edgecolor='#FF3333', facecolor='#FF3333',linewidth=0)
ax[1].plot(np.array(tarray)/doubling_time,cvrnaarray,c="#BD0025")
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvrnaarray)-np.array(errcv2rna),np.array(cvrnaarray)+np.array(errcv2rna),
alpha=1, edgecolor='#FF3333', facecolor='#FF3333',linewidth=0)
ax[0].set_ylabel("RNA",size=20)
ax[1].set_ylabel("$C_V^2(r)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([0,1.2*np.max(mnrnaarray)])
ax[1].set_ylim([0,1.2*np.max(cvrnaarray)])
for l in [0,1]:
ax[l].set_xlim([0,tmax/doubling_time])
taqui=np.arange(0,(tmax+1)/doubling_time,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
taqui=np.arange(0,1.2*np.max(cvrnaarray),step=np.round(.2*np.max(cvrnaarray),2))
ax[1].set_yticks(np.array(taqui))
if not os.path.exists('./figures/SSA'):
os.makedirs('./figures/SSA')
plt.savefig('./figures/SSA/rna_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/SSA/rna_statistics.png',bbox_inches='tight')
data=pd.DataFrame(np.transpose(np.array(parray)))
ind=0
newcol=[]
for name in data.columns:
newcol.append("mom"+str(ind))
ind+=1
data.columns=newcol
mnprotarray=[]
cvprotarray=[]
errcv2prot=[]
errmnprot=[]
for m in range(len(data)):
rnas=data.loc[m, :].values.tolist()
mean_cntr, var_cntr, std_cntr = bayesest(rnas,alpha=0.95)
mnprotarray.append(mean_cntr[0])
errmnprot.append(mean_cntr[1][1]-mean_cntr[0])
cvprotarray.append(var_cntr[0]/mean_cntr[0]**2)
errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2prot.append(errv)
data['time'] = tarray
data['Mean_prot'] = mnrnaarray
data['Error_mean'] = errmnrna
data['prot_CV2'] = cvrnaarray
data['Error_CV2'] = errcv2rna
if not os.path.exists('./data/SSA'):
os.makedirs('./data/SSA')
data.to_csv("./data/SSA/protsim.csv")
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(np.array(tarray)/doubling_time,mnprotarray,c="#3BB000")
ax[0].fill_between(np.array(tarray)/doubling_time,np.array(mnprotarray)-np.array(errmnprot),np.array(mnprotarray)+np.array(errmnprot),
alpha=1, edgecolor='#4BE000', facecolor='#4BE000',linewidth=0)
ax[1].plot(np.array(tarray)/doubling_time,cvprotarray,c="#3BB000")
ax[1].fill_between(np.array(tarray)/doubling_time,np.array(cvprotarray)-np.array(errcv2prot),np.array(cvprotarray)+np.array(errcv2prot),
alpha=1, edgecolor='#4BE000', facecolor='#4BE000',linewidth=0)
ax[0].set_ylabel("Protein",size=20)
ax[1].set_ylabel("$C_V^2(p)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].set_ylim([0,1.2*np.max(mnprotarray)])
ax[1].set_ylim([0,1.2*np.max(cvprotarray)])
for l in [0,1]:
ax[l].set_xlim([0,tmax/doubling_time])
taqui=np.arange(0,(tmax+1)/doubling_time,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.5)
taqui=np.arange(0,1.2*np.max(cvprotarray),step=np.round(.2*np.max(cvprotarray),4))
ax[1].set_yticks(np.array(taqui))
if not os.path.exists('./figures'):
os.makedirs('./figures')
if not os.path.exists('./figures/SSA'):
os.makedirs('./figures/SSA')
plt.savefig('./figures/SSA/prot_statistics.svg',bbox_inches='tight')
plt.savefig('./figures/SSA/prot_statistics.png',bbox_inches='tight')
```
|
github_jupyter
|
# Text Data in scikit-learn
```
import matplotlib.pyplot as plt
import sklearn
sklearn.set_config(display='diagram')
from pathlib import Path
import tarfile
from urllib import request
data_path = Path("data")
extracted_path = Path("data") / "train"
imdb_path = data_path / "aclImdbmini.tar.gz"
def untar_imdb():
if extracted_path.exists():
print("imdb dataset already extracted")
return
with tarfile.open(imdb_path, "r") as tar_f:
tar_f.extractall(data_path)
# This may take some time to run since it will download and extracted
untar_imdb()
```
## CountVectorizer
```
sample_text = ["Can we go to the hill? I finished my homework.",
"The hill is very tall. Please be careful"]
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer()
vect.fit(sample_text)
vect.get_feature_names()
X = vect.transform(sample_text)
X
X.toarray()
```
### Bag of words
```
sample_text
X_inverse = vect.inverse_transform(X)
X_inverse[0]
X_inverse[1]
```
## Loading text data with scikit-learn
```
from sklearn.datasets import load_files
reviews_train = load_files(extracted_path, categories=["neg", "pos"])
raw_text_train, raw_y_train = reviews_train.data, reviews_train.target
raw_text_train = [doc.replace(b"<br />", b" ") for doc in raw_text_train]
import numpy as np
np.unique(raw_y_train)
np.bincount(raw_y_train)
len(raw_text_train)
raw_text_train[5]
```
## Split dataset
```
from sklearn.model_selection import train_test_split
text_train, text_test, y_train, y_test = train_test_split(
raw_text_train, raw_y_train, stratify=raw_y_train, random_state=0)
```
### Transform training data
```
vect = CountVectorizer()
X_train = vect.fit_transform(text_train)
len(text_train)
X_train
```
### Transform testing set
```
len(text_test)
X_test = vect.transform(text_test)
X_test
```
### Extract feature names
```
feature_names = vect.get_feature_names()
feature_names[10000:10020]
feature_names[::3000]
```
### Linear model for classification
```
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='liblinear', random_state=42).fit(X_train, y_train)
lr.score(X_test, y_test)
def plot_important_features(coef, feature_names, top_n=20, ax=None, rotation=40):
if ax is None:
ax = plt.gca()
feature_names = np.asarray(feature_names)
coef = coef.reshape(-1)
inds = np.argsort(coef)
low = inds[:top_n]
high = inds[-top_n:]
important = np.hstack([low, high])
myrange = range(len(important))
colors = ['red'] * top_n + ['blue'] * top_n
ax.bar(myrange, coef[important], color=colors)
ax.set_xticks(myrange)
ax.set_xticklabels(feature_names[important], rotation=rotation, ha="right")
ax.set_xlim(-.7, 2 * top_n)
ax.set_frame_on(False)
feature_names = vect.get_feature_names()
fig, ax = plt.subplots(figsize=(15, 6))
plot_important_features(lr.coef_, feature_names, top_n=20, ax=ax)
```
## Exercise 1
1. Train a `sklearn.ensemble.RandomForestClassifier` on the training set, `X_train` and `y_train`.
2. Evalute the accuracy on the test set.
3. What are the top 20 important features accourind go `feature_importances_` of the random forst.
```
# %load solutions/01-ex01-solutions.py
```
## CountVectorizer Options
```
sample_text = ["Can we go to the hill? I finished my homework.",
"The hill is very tall. Please be careful"]
vect = CountVectorizer()
vect.fit(sample_text)
vect.get_feature_names()
```
### Stop words
```
vect = CountVectorizer(stop_words='english')
vect.fit(sample_text)
vect.get_feature_names()
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
print(list(ENGLISH_STOP_WORDS))
```
### Max features
```
vect = CountVectorizer(max_features=4, stop_words='english')
vect.fit(sample_text)
vect.get_feature_names()
```
### Min frequency on the imdb dataset
With `min_df=1` (default)
```
X_train.shape
```
With `min_df=4`
```
vect = CountVectorizer(min_df=4)
X_train_min_df_4 = vect.fit_transform(text_train)
X_train_min_df_4.shape
lr_df_4 = LogisticRegression(solver='liblinear', random_state=42).fit(X_train_min_df_4, y_train)
X_test_min_df_4 = vect.transform(text_test)
```
#### Scores with different min frequencies
```
lr_df_4.score(X_test_min_df_4, y_test)
lr.score(X_test, y_test)
```
## Pipelines and Vectorizers
```
from sklearn.pipeline import Pipeline
log_reg = Pipeline([
('vectorizer', CountVectorizer()),
('classifier', LogisticRegression(random_state=42, solver='liblinear'))
])
log_reg
text_train[:2]
log_reg.fit(text_train, y_train)
log_reg.score(text_train, y_train)
log_reg.score(text_test, y_test)
```
## Exercise 2
1. Create a pipeline with a `CountVectorizer` with `min_df=5` and `stop_words='english'` and a `RandomForestClassifier`.
2. What is the score of the random forest on the test dataset?
```
# %load solutions/01-ex02-solutions.py
```
## Bigrams
`CountVectorizer` takes a `ngram_range` parameter
```
sample_text
cv = CountVectorizer(ngram_range=(1, 1)).fit(sample_text)
print("Vocabulary size:", len(cv.vocabulary_))
print("Vocabulary:", cv.get_feature_names())
cv = CountVectorizer(ngram_range=(2, 2)).fit(sample_text)
print("Vocabulary size:", len(cv.vocabulary_))
print("Vocabulary:")
print(cv.get_feature_names())
cv = CountVectorizer(ngram_range=(1, 2)).fit(sample_text)
print("Vocabulary size:", len(cv.vocabulary_))
print("Vocabulary:")
print(cv.get_feature_names())
```
## n-grams with stop words
```
cv_n_gram = CountVectorizer(ngram_range=(1, 2), min_df=4, stop_words="english")
cv_n_gram.fit(text_train)
len(cv_n_gram.vocabulary_)
print(cv_n_gram.get_feature_names()[::2000])
pipe_cv_n_gram = Pipeline([
('vectorizer', cv_n_gram),
('classifier', LogisticRegression(random_state=42, solver='liblinear'))
])
pipe_cv_n_gram.fit(text_train, y_train)
pipe_cv_n_gram.score(text_test, y_test)
feature_names = pipe_cv_n_gram['vectorizer'].get_feature_names()
fig, ax = plt.subplots(figsize=(15, 6))
plot_important_features(pipe_cv_n_gram['classifier'].coef_.ravel(), feature_names, top_n=20, ax=ax)
```
## Tf-idf rescaling
```
sample_text
from sklearn.feature_extraction.text import TfidfVectorizer
tfidvect = TfidfVectorizer().fit(sample_text)
tfid_trans = tfidvect.transform(sample_text)
tfid_trans.toarray()
```
## Train on the imdb dataset
```
log_reg_tfid = Pipeline([
('vectorizer', TfidfVectorizer(ngram_range=(1, 2), min_df=4,
stop_words="english")),
('classifier', LogisticRegression(random_state=42, solver='liblinear'))
])
log_reg_tfid.fit(text_train, y_train)
log_reg_tfid.score(text_test, y_test)
```
## Exercise 3
0. Load data from `fetch_20newsgroups`:
```python
from sklearn.datasets import fetch_20newsgroups
categories = [
'alt.atheism',
'sci.space',
]
remove = ('headers', 'footers', 'quotes')
data_train = fetch_20newsgroups(subset='train', categories=categories,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
remove=remove)
X_train, y_train = data_train.data, data_train.target
X_test, y_test = data_test.data, data_test.target
```
1. How many samples are there in the training dataset and test dataset?
1. Construct a pipeline with a `TfidfVectorizer` and `LogisticRegression`.
1. Evalute the pipeline on the test set.
1. Plot the feature importances using `plot_important_features`.
```
# %load solutions/01-ex03-solutions.py
```
|
github_jupyter
|
# Create a general MODFLOW model from the NHDPlus dataset
Project specific variables are imported in the model_spec.py and gen_mod_dict.py files that must be included in the notebook directory. The first first includes pathnames to data sources that will be different for each user. The second file includes a dictionary of model-specific information such as cell size, default hydraulic parameter values, and scenario defintion (e.g. include bedrock, number of layers, etc.). There are examples in the repository. Run the following cells up to the "Run to here" cell to get a pull-down menu of models in the model_dict. Then, without re-running that cell, run all the remaining cells. Re-running the following cell would re-set the model to the first one in the list, which you probably don't want. If you use the notebook option to run all cells below, it runs the cell you're in, so if you use that option, move to the next cell (below the pull-down menu of models) first.
```
__author__ = 'Jeff Starn'
%matplotlib notebook
from model_specs import *
from gen_mod_dict import *
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy as fp
import pandas as pd
import gdal
gdal.UseExceptions()
import shutil
# from model_specs import *
# from gen_mod_dict import *
from ipywidgets import interact, Dropdown
from IPython.display import display
for key, value in model_dict.items():
md = key
ms = model_dict[md]
print('trying {}'.format(md))
try:
pass
except:
pass
models = list(model_dict.keys())
models.sort()
model_area = Dropdown(
options=models,
description='Model:',
background_color='cyan',
border_color='black',
border_width=2)
display(model_area)
```
### Run to here to initiate notebook
First time using this notebook in this session (before restarting the notebook), run the cells up to this point. Then select your model from the dropdown list above. Move your cursor to this cell and use the toolbar menu Cell --> Run All Below. After the first time, if you want to run another model, select your model and start running from this cell--you don't need to re-run the cells from the beginning.
## Preliminary stuff
```
md = model_area.value
ms = model_dict[md]
print('The model being processed is {}\n'.format(md))
```
Set pathnames and create workspace directories for geographic data (from Notebook 1) and this model.
```
geo_ws = os.path.join(proj_dir, ms['ws'])
model_ws = os.path.join(geo_ws, scenario_dir)
array_pth = os.path.join(model_ws, 'arrays')
try:
shutil.rmtree(array_pth)
except:
pass
try:
shutil.rmtree(model_ws)
except:
pass
os.makedirs(model_ws)
head_file_name = '{}.hds'.format(md)
head_file_pth = os.path.join(model_ws, head_file_name)
print (model_ws)
```
Replace entries from the default K_dict with the model specific K values from model_dict if they exist.
```
for key, value in K_dict.items():
if key in ms.keys():
K_dict[key] = ms[key]
```
Replace entries from the default rock_riv_dict with the model specific values from model_dict if they exist. rock_riv_dict has various attributes of bedrock and stream geometry.
```
for key, value in rock_riv_dict.items():
if key in ms.keys():
rock_riv_dict[key] = ms[key]
```
Assign values to variables used in this notebook using rock_riv_dict
```
min_thk = rock_riv_dict['min_thk']
stream_width = rock_riv_dict['stream_width']
stream_bed_thk = rock_riv_dict['stream_bed_thk']
river_depth = rock_riv_dict['river_depth']
bedrock_thk = rock_riv_dict['bedrock_thk']
stream_bed_kadjust = rock_riv_dict['stream_bed_kadjust']
```
## Read the information for a model domain processed using Notebook 1
Read the model_grid data frame from a csv file. Extract grid dimensions and ibound array.
```
model_file = os.path.join(geo_ws, 'model_grid.csv')
model_grid = pd.read_csv(model_file, index_col='node_num', na_values=['nan', hnoflo])
NROW = model_grid.row.max() + 1
NCOL = model_grid.col.max() + 1
num_cells = NROW * NCOL
ibound = model_grid.ibound.reshape(NROW, NCOL)
inactive = (ibound == 0)
```
## Translate geologic information into hydrologic properties
```
# # old geology used in general models prior to 4/5/2016
# coarse_deposits = (model_grid.coarse_flag == 2)
# coarse_is_1 = coarse_deposits.reshape(NROW, NCOL)
```
This version replaces Soller's Surfmat with the Quaternary Atlas. Look-up table for coarse deposits (zone = 1) from Dick Yager's new_unit. All other categories are lumped with fine deposits (zone = 0).
* alluvium = 1
* ice contact = 9
* lacustrine coarse = 11
* outwash = 17
Create a dictionary that maps the K_dict from gen_mod_dict to zone numbers (key=zone number, value=entry in K_dict). Make sure these correspond with the correct units. If you're using the defaults, it is correct.
```
zone_dict = {0 : 'K_fine', 1 : 'K_coarse', 2 : 'K_lakes', 3 : 'K_bedrock'}
```
Perform the mapping from zone number to K to create the Kh1d array.
```
zones1d = np.zeros(( NROW, NCOL ), dtype=np.int32)
qa = model_grid.qu_atlas.reshape( NROW, NCOL )
zones1d[qa == 1] = 1
zones1d[qa == 9] = 1
zones1d[qa == 11] = 1
zones1d[qa == 17] = 1
la = model_grid.lake.reshape( NROW, NCOL )
zones1d[la == 1] = 2
Kh1d = np.zeros(( NROW, NCOL ), dtype=np.float32)
for key, val in zone_dict.items():
Kh1d[zones1d == key] = K_dict[val]
model_grid['K0'] = Kh1d.ravel()
```
## Process boundary condition information
Create a dictionary of stream information for the drain or river package.
River package input also needs the elevation of the river bed. Don't use both packages. The choice is made by commenting/uncommenting sections of the modflow function. Replace segment_len (segment length) with the conductance. The river package has not been tested.
```
drn_flag = (model_grid.stage != np.nan) & (model_grid.ibound == 1)
drn_data = model_grid.loc[drn_flag, ['lay', 'row', 'col', 'stage', 'segment_len', 'K0']]
drn_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'K0']
dcond = drn_data.K0 *stream_bed_kadjust* drn_data.segment_len * stream_width / stream_bed_thk
drn_data['segment_len'] = dcond
drn_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
drn_data.drop('K0', axis=1, inplace=True)
drn_data.dropna(axis='index', inplace=True)
drn_data.insert(drn_data.shape[1], 'iface', 6)
drn_recarray = drn_data.to_records(index=False)
drn_dict = {0 : drn_recarray}
riv_flag = (model_grid.stage != np.nan) & (model_grid.ibound == 1)
riv_data = model_grid.loc[riv_flag, ['lay', 'row', 'col', 'stage', 'segment_len',
'reach_intermit', 'K0']]
riv_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'rbot', 'K0']
riv_data[['rbot']] = riv_data.stage - river_depth
rcond = riv_data.K0 * stream_bed_kadjust* riv_data.segment_len * stream_width / stream_bed_thk
riv_data['segment_len'] = rcond
riv_data.rename(columns={'segment_len' : 'rcond'}, inplace=True)
riv_data.drop('K0', axis=1, inplace=True)
riv_data.dropna(axis='index', inplace=True)
riv_data.insert(riv_data.shape[1], 'iface', 6)
riv_recarray = riv_data.to_records(index=False)
riv_dict = {0 : riv_recarray}
```
Create a dictionary of information for the general-head boundary package.
Similar to the above cell. Not tested.
```
if model_grid.ghb.sum() > 0:
ghb_flag = model_grid.ghb == 1
ghb_data = model_grid.loc[ghb_flag, ['lay', 'row', 'col', 'top', 'segment_len', 'K0']]
ghb_data.columns = ['k', 'i', 'j', 'stage', 'segment_len', 'K0']
gcond = ghb_data.K0 * L * L / stream_bed_thk
ghb_data['segment_len'] = gcond
ghb_data.rename(columns={'segment_len' : 'cond'}, inplace=True)
ghb_data.drop('K0', axis=1, inplace=True)
ghb_data.dropna(axis='index', inplace=True)
ghb_data.insert(ghb_data.shape[1], 'iface', 6)
ghb_recarray = ghb_data.to_records(index=False)
ghb_dict = {0 : ghb_recarray}
```
### Create 1-layer model to get initial top-of-aquifer on which to drape subsequent layering
Get starting heads from top elevations. The top is defined as the model-cell-mean NED elevation except in streams, where it is interpolated between MaxElevSmo and MinElevSmo in the NHD (called 'stage' in model_grid). Make them a little higher than land so that drains don't accidentally go dry too soon.
```
top = model_grid.top.reshape(NROW, NCOL)
strt = top * 1.05
```
Modify the bedrock surface, ensuring that it is always at least min_thk below the top elevation. This calculation will be revisited for the multi-layer case.
```
bedrock = model_grid.bedrock_el.reshape(NROW, NCOL)
thk = top - bedrock
thk[thk < min_thk] = min_thk
bot = top - thk
```
## Create recharge array
This version replaces the Wolock/Yager recharge grid with the GWRP SWB grid.
```
## used in general models prior to 4/5/2016
# rech = model_grid.recharge.reshape(NROW, NCOL)
```
Replace rech array with
* calculate total recharge for the model domain
* calculate areas of fine and coarse deposits
* apportion recharge according to the ratio specified in gen_mod_dict.py
* write the values to an array
```
r_swb = model_grid.swb.reshape(NROW, NCOL) / 365.25
rech_ma = np.ma.MaskedArray(r_swb, mask=inactive)
coarse_ma = np.ma.MaskedArray(zones1d != 0, mask=inactive)
fine_ma = np.ma.MaskedArray(zones1d == 0, mask=inactive)
total_rech = rech_ma.sum()
Af = fine_ma.sum()
Ac = coarse_ma.sum()
Rf = total_rech / (rech_fact * Ac + Af)
Rc = rech_fact * Rf
rech = np.zeros_like(r_swb)
rech[zones1d != 0] = Rc
rech[zones1d == 0] = Rf
```
## Define a function to create and run MODFLOW
```
def modflow(md, mfpth, model_ws, nlay=1, top=top, strt=strt, nrow=NROW, ncol=NCOL, botm=bedrock,
ibound=ibound, hk=Kh1d, rech=rech, stream_dict=drn_dict, delr=L, delc=L,
hnoflo=hnoflo, hdry=hdry, iphdry=1):
strt_dir = os.getcwd()
os.chdir(model_ws)
ml = fp.modflow.Modflow(modelname=md, exe_name=mfpth, version='mfnwt',
external_path='arrays')
# add packages (DIS has to come before either BAS or the flow package)
dis = fp.modflow.ModflowDis(ml, nlay=nlay, nrow=NROW, ncol=NCOL, nper=1, delr=L, delc=L,
laycbd=0, top=top, botm=botm, perlen=1.E+05, nstp=1, tsmult=1,
steady=True, itmuni=4, lenuni=2, extension='dis',
unitnumber=11)
bas = fp.modflow.ModflowBas(ml, ibound=ibound, strt=strt, ifrefm=True,
ixsec=False, ichflg=False, stoper=None, hnoflo=hnoflo, extension='bas',
unitnumber=13)
upw = fp.modflow.ModflowUpw(ml, laytyp=1, layavg=0, chani=1.0, layvka=1, laywet=0, ipakcb=53,
hdry=hdry, iphdry=iphdry, hk=hk, hani=1.0, vka=1.0, ss=1e-05,
sy=0.15, vkcb=0.0, noparcheck=False, extension='upw',
unitnumber=31)
rch = fp.modflow.ModflowRch(ml, nrchop=3, ipakcb=53, rech=rech, irch=1,
extension='rch', unitnumber=19)
drn = fp.modflow.ModflowDrn(ml, ipakcb=53, stress_period_data=drn_dict,
dtype=drn_dict[0].dtype,
extension='drn', unitnumber=21, options=['NOPRINT', 'AUX IFACE'])
riv = fp.modflow.ModflowRiv(ml, ipakcb=53, stress_period_data=riv_dict,
dtype=riv_dict[0].dtype,
extension='riv', unitnumber=18, options=['NOPRINT', 'AUX IFACE'])
if GHB:
ghb = fp.modflow.ModflowGhb(ml, ipakcb=53, stress_period_data=ghb_dict,
dtype=ghb_dict[0].dtype,
extension='ghb', unitnumber=23, options=['NOPRINT', 'AUX IFACE'])
oc = fp.modflow.ModflowOc(ml, ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None, cboufm=None,
compact=True, stress_period_data={(0, 0): ['save head', 'save budget']},
extension=['oc', 'hds', 'ddn', 'cbc'], unitnumber=[14, 51, 52, 53])
# nwt = fp.modflow.ModflowNwt(ml, headtol=0.0001, fluxtol=500, maxiterout=1000,
# thickfact=1e-05, linmeth=2, iprnwt=1, ibotav=0, options='COMPLEX')
nwt = fp.modflow.ModflowNwt(ml, headtol=0.0001, fluxtol=500, maxiterout=100, thickfact=1e-05,
linmeth=2, iprnwt=1, ibotav=1, options='SPECIFIED', dbdtheta =0.80,
dbdkappa = 0.00001, dbdgamma = 0.0, momfact = 0.10, backflag = 1,
maxbackiter=30, backtol=1.05, backreduce=0.4, iacl=2, norder=1,
level=3, north=7, iredsys=1, rrctols=0.0,idroptol=1, epsrn=1.0E-3,
hclosexmd= 1.0e-4, mxiterxmd=200)
ml.write_input()
ml.remove_package('RIV')
ml.write_input()
success, output = ml.run_model(silent=True)
os.chdir(strt_dir)
if success:
print(" Your {:0d} layer model ran successfully".format(nlay))
else:
print(" Your {:0d} layer model didn't work".format(nlay))
```
## Run 1-layer MODFLOW
Use the function to run MODFLOW for 1 layer to getting approximate top-of-aquifer elevation
```
modflow(md, mfpth, model_ws, nlay=1, top=top, strt=strt, nrow=NROW, ncol=NCOL, botm=bot, ibound=ibound,
hk=Kh1d, rech=rech, stream_dict=drn_dict, delr=L, delc=L, hnoflo=hnoflo, hdry=hdry, iphdry=0)
```
Read the head file and calculate new layer top (wt) and bottom (bot) elevations based on the estimated
water table (wt) being the top of the top layer. Divide the surficial layer into NLAY equally thick layers between wt and the bedrock surface elevation (as computed using minimum surficial thickness).
```
hdobj = fp.utils.HeadFile(head_file_pth)
heads1 = hdobj.get_data(kstpkper=(0, 0))
heads1[heads1 == hnoflo] = np.nan
heads1[heads1 <= hdry] = np.nan
heads1 = heads1[0, :, :]
hdobj = None
```
## Create layering using the scenario in gen_mod_dict
Make new model with (possibly) multiple layers. If there are dry cells in the 1 layer model, they are converted to NaN (not a number). The minimum function in the first line returns NaN if the element of either input arrays is NaN. In that case, replace NaN in modeltop with the top elevation. The process is similar to the 1 layer case. Thickness is estimated based on modeltop and bedrock and is constrained to be at least min_thk (set in gen_mod_dict.py). This thickness is divided into num_surf_layers number of layers. The cumulative thickness of these layers is the distance from the top of the model to the bottom of the layers. This 3D array of distances (the same for each layer) is subtracted from modeltop.
```
modeltop = np.minimum(heads1, top)
nan = np.isnan(heads1)
modeltop[nan] = top[nan]
thk = modeltop - bedrock
thk[thk < min_thk] = min_thk
NLAY = num_surf_layers
lay_extrude = np.ones((NLAY, NROW, NCOL))
lay_thk = lay_extrude * thk / NLAY
bot = modeltop - np.cumsum(lay_thk, axis=0)
```
Using the estimated water table as the new top-of-aquifer elevations sometimes leads to the situation, in usually a very small number of cells, that the drain elevation is below the bottom of the cell. The following procedure resets the bottom elevation to one meter below the drain elevation if that is the case.
```
stg = model_grid.stage.fillna(1.E+30, inplace=False)
tmpdrn = (lay_extrude * stg.reshape(NROW, NCOL)).ravel()
tmpbot = bot.ravel()
index = np.less(tmpdrn, tmpbot)
tmpbot[index] = tmpdrn[index] - 1.0
bot = tmpbot.reshape(NLAY, NROW, NCOL)
```
* If add_bedrock = True in gen_mod_dict.py, add a layer to the bottom and increment NLAY by 1.
* Assign the new bottom-most layer an elevation equal to the elevation of the bottom of the lowest surficial layer minus bedrock_thk, which is specified in rock_riv_dict (in gen_mod_dict.py).
* Concatenate the new bottom-of-bedrock-layer to the bottom of the surficial bottom array.
* Compute the vertical midpoint of each cell. Make an array (bedrock_index) that is True if the bedrock surface is higher than the midpoint and False if it is not.
* lay_extrude replaces the old lay_extrude to account for the new bedrock layer. It is not used in this cell, but is used later to extrude other arrays.
```
sol_thk = model_grid.soller_thk.reshape(NROW, NCOL)
tmp = top - sol_thk
bedrock_4_K = bedrock.copy()
bedrock_4_K[bedrock > top] = tmp[bedrock > top]
if add_bedrock:
NLAY = num_surf_layers + 1
lay_extrude = np.ones((NLAY, NROW, NCOL))
bed_bot = bot[-1:,:,:] - bedrock_thk
bot = np.concatenate((bot, bed_bot), axis=0)
mids = bot + thk / NLAY / 2
bedrock_index = mids < bedrock_4_K
bedrock_index[-1:,:,:] = True
elif not add_bedrock:
print(' no bedrock')
pass
else:
print(' add_bedrock variable needs to True or False')
```
Extrude all arrays to NLAY number of layers. Create a top-of-aquifer elevation (fake_top) that is higher (20% in this case) than the simulated 1-layer water table because in doing this approximation, some stream elevations end up higher than top_of_aquifer and thus do not operate as drains. The fake_top shouldn't affect model computations if it is set high enough because the model uses convertible (confined or unconfined) layers.
```
fake_top = (modeltop * 1.2).astype(np.float32)
strt = (lay_extrude * modeltop * 1.05).astype(np.float32)
ibound = (lay_extrude * ibound).astype(np.int16)
```
Perform the mapping from zone number to K to create the Kh3d array.
```
zones3d = np.zeros(( NLAY, NROW, NCOL ), dtype=np.int32)
qa = model_grid.qu_atlas.reshape(NROW, NCOL)
qa3d = (lay_extrude * qa).astype(np.int32)
zones3d[qa3d == 1] = 1
zones3d[qa3d == 9] = 1
zones3d[qa3d == 11] = 1
zones3d[qa3d == 17] = 1
if add_bedrock:
zones3d[bedrock_index] = 3
la = model_grid.lake.reshape(NROW, NCOL)
zones3d[0, la == 1] = 2
Kh3d = np.zeros(( NLAY, NROW, NCOL ), dtype=np.float32)
for key, val in zone_dict.items():
Kh3d[zones3d == key] = K_dict[val]
```
Run MODFLOW again using the new layer definitions. The difference from the first run is that the top-of-aquifer elevation is the 1-layer water table rather than land surface, and of course, the number of surficial layers and/or the presence of a bedrock layer is different.
```
modflow(md, mfpth, model_ws, nlay=NLAY, top=fake_top, strt=strt, nrow=NROW, ncol=NCOL,
botm=bot, ibound=ibound, hk=Kh3d, rech=rech, stream_dict=drn_dict, delr=L,
delc=L, hnoflo=hnoflo, hdry=hdry, iphdry=1)
```
Read the new head array
```
hdobj = fp.utils.HeadFile(head_file_pth)
heads = hdobj.get_data()
hdobj = None
```
Make a 2D array of the heads in the highest active cells and call it the water_table
```
heads[heads == hnoflo] = np.nan
heads[heads <= hdry] = np.nan
hin = np.argmax(np.isfinite(heads), axis=0)
row, col = np.indices((hin.shape))
water_table = heads[hin, row, col]
water_table_ma = np.ma.MaskedArray(water_table, inactive)
```
Save the head array to a geotiff file.
```
data = water_table_ma
src_pth = os.path.join(geo_ws, 'ibound.tif')
src = gdal.Open(src_pth)
dst_pth = os.path.join(model_ws, 'pre-heads.tif')
driver = gdal.GetDriverByName('GTiff')
dst = driver.CreateCopy(dst_pth, src, 0)
band = dst.GetRasterBand(1)
band.WriteArray(data)
band.SetNoDataValue(np.nan)
dst = None
src = None
```
Save the heads and K from the upper-most layer to model_grid.csv
```
model_grid['pre_cal_heads'] = water_table_ma.ravel()
model_grid['pre_cal_K'] = Kh3d[0,:,:].ravel()
if add_bedrock:
model_grid['thk'] = model_grid.top - bot[-1,:,:].ravel() + bedrock_thk
else:
model_grid['thk'] = model_grid.top - bot[-1,:,:].ravel()
model_grid['thkR'] = model_grid.thk / model_grid.recharge
model_grid.to_csv(os.path.join(model_ws, 'model_grid.csv'))
```
Save zone array for use in calibration.
```
zone_file = os.path.join(model_ws, 'zone_array.npz')
np.savez(zone_file, zone=zones3d)
```
Plot a cross-section to see what the layers look like. Change row_to_plot to see other rows. Columns could be easily added.
```
def calc_error(top, head, obs_type):
# an offset of 1 is used to eliminate counting heads that
# are within 1 m of their target as errors.
# count topo and hydro errors
t = top < (head - err_tol)
h = top > (head + err_tol)
tmp_df = pd.DataFrame({'head':head, 'ot':obs_type, 't':t, 'h':h})
tmp = tmp_df.groupby('ot').sum()
h_e_ = tmp.loc['hydro', 'h']
t_e_ = tmp.loc['topo', 't']
result = np.array([h_e_, t_e_])
return result
hydro, topo = calc_error(model_grid.top, water_table.ravel(), model_grid.obs_type)
num_hydro = model_grid.obs_type.value_counts()['hydro']
num_topo = model_grid.obs_type.value_counts()['topo']
num_cells = num_hydro + num_topo
hydro = hydro / num_hydro
topo = topo / num_topo
def ma2(data2D):
return np.ma.MaskedArray(data2D, mask=inactive)
def ma3(data3D):
return np.ma.MaskedArray(data3D, mask=(ibound == 0))
row_to_plot = NROW / 2
xplot = np.linspace( L / 2, NCOL * L - L / 2, NCOL)
mKh = ma3(Kh3d)
mtop = ma2(top)
mbed = ma2(bedrock)
mbot = ma3(bot)
colors = ['green', 'red', 'gray']
fig = plt.figure(figsize=(8,8))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax1.plot(xplot, mtop[row_to_plot, ], label='land surface', color='black', lw=0.5)
ax1.plot(xplot, water_table_ma[row_to_plot, ], label='water table', color='blue', lw=1.)
ax1.fill_between(xplot, mtop[row_to_plot, ], mbot[0, row_to_plot, :], alpha=0.25,
color='blue', label='layer 1', lw=0.75)
for lay in range(NLAY-1):
label = 'layer {}'.format(lay+2)
ax1.fill_between(xplot, mbot[lay, row_to_plot, :], mbot[lay+1, row_to_plot, :], label=label,
color=colors[lay], alpha=0.250, lw=0.75)
ax1.plot(xplot, mbed[row_to_plot, :], label='bedrock (Soller)', color='red', linestyle='dotted', lw=1.5)
ax1.plot(xplot, mbot[-1, row_to_plot, :], color='black', linestyle='solid', lw=0.5)
ax1.legend(loc=0, frameon=False, fontsize=10, ncol=3)#, bbox_to_anchor=(1.0, 0.5))
ax1.set_ylabel('Altitude, in meters')
ax1.set_xticklabels('')
ax1.set_title('Default section along row {}, {} model, weight {:0.1f}\nK fine = {:0.1f} K coarse = {:0.1f}\
K bedrock = {:0.1f}\nFraction dry drains {:0.2f} Fraction flooded cells {:0.2f}'.format(row_to_plot, \
md, 1, K_dict['K_fine'], K_dict['K_coarse'], K_dict['K_bedrock'], hydro, topo))
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax2.fill_between(xplot, 0, mKh[0, row_to_plot, :], alpha=0.25, color='blue',
label='layer 1', lw=0.75, step='mid')
ax2.set_xlabel('Distance in meters')
ax2.set_yscale('log')
ax2.set_ylabel('Hydraulic conductivity\n in layer 1, in meters / day')
line = '{}_{}_xs.png'.format(md, scenario_dir)
fig_name = os.path.join(model_ws, line)
plt.savefig(fig_name)
t = top < (water_table - err_tol)
h = top > (water_table + err_tol)
mt = np.ma.MaskedArray(t.reshape(NROW, NCOL), model_grid.obs_type != 'topo')
mh = np.ma.MaskedArray(h.reshape(NROW, NCOL), model_grid.obs_type != 'hydro')
from matplotlib import colors
cmap = colors.ListedColormap(['0.50', 'red'])
cmap2 = colors.ListedColormap(['blue'])
back = np.ma.MaskedArray(ibound[0,:,:], ibound[0,:,:] == 0)
fig, ax = plt.subplots(1,2)
ax[0].imshow(back, cmap=cmap2, alpha=0.2)
im0 = ax[0].imshow(mh, cmap=cmap, interpolation='None')
ax[0].axhline(row_to_plot)
# fig.colorbar(im0, ax=ax[0])
ax[1].imshow(back, cmap=cmap2, alpha=0.2)
im1 = ax[1].imshow(mt, cmap=cmap, interpolation='None')
ax[1].axhline(row_to_plot)
# fig.colorbar(im1, ax=ax[1])
fig.suptitle('Default model errors (in red) along row {}, {} model, weight {:0.1f}\nK fine = {:0.1f} K coarse = {:0.1f}\
K bedrock = {:0.1f}\nFraction dry drains {:0.2f} Fraction flooded cells {:0.2f}'.format(row_to_plot, \
md, 1.0, K_dict['K_fine'], K_dict['K_coarse'], K_dict['K_bedrock'], hydro, topo))
# fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
# wspace=None, hspace=None)
fig.set_size_inches(6, 6)
# line = '{}_{}_error_map_cal.png'.format(md, scenario_dir)
line = '{}_{}_error_map.png'.format(md, scenario_dir) #csc
fig_name = os.path.join(model_ws, line)
plt.savefig(fig_name)
```
|
github_jupyter
|
```
%matplotlib inline
```
Advanced: Making Dynamic Decisions and the Bi-LSTM CRF
======================================================
Dynamic versus Static Deep Learning Toolkits
--------------------------------------------
Pytorch is a *dynamic* neural network kit. Another example of a dynamic
kit is `Dynet <https://github.com/clab/dynet>`__ (I mention this because
working with Pytorch and Dynet is similar. If you see an example in
Dynet, it will probably help you implement it in Pytorch). The opposite
is the *static* tool kit, which includes Theano, Keras, TensorFlow, etc.
The core difference is the following:
* In a static toolkit, you define
a computation graph once, compile it, and then stream instances to it.
* In a dynamic toolkit, you define a computation graph *for each
instance*. It is never compiled and is executed on-the-fly
Without a lot of experience, it is difficult to appreciate the
difference. One example is to suppose we want to build a deep
constituent parser. Suppose our model involves roughly the following
steps:
* We build the tree bottom up
* Tag the root nodes (the words of the sentence)
* From there, use a neural network and the embeddings
of the words to find combinations that form constituents. Whenever you
form a new constituent, use some sort of technique to get an embedding
of the constituent. In this case, our network architecture will depend
completely on the input sentence. In the sentence "The green cat
scratched the wall", at some point in the model, we will want to combine
the span $(i,j,r) = (1, 3, \text{NP})$ (that is, an NP constituent
spans word 1 to word 3, in this case "The green cat").
However, another sentence might be "Somewhere, the big fat cat scratched
the wall". In this sentence, we will want to form the constituent
$(2, 4, NP)$ at some point. The constituents we will want to form
will depend on the instance. If we just compile the computation graph
once, as in a static toolkit, it will be exceptionally difficult or
impossible to program this logic. In a dynamic toolkit though, there
isn't just 1 pre-defined computation graph. There can be a new
computation graph for each instance, so this problem goes away.
Dynamic toolkits also have the advantage of being easier to debug and
the code more closely resembling the host language (by that I mean that
Pytorch and Dynet look more like actual Python code than Keras or
Theano).
Bi-LSTM Conditional Random Field Discussion
-------------------------------------------
For this section, we will see a full, complicated example of a Bi-LSTM
Conditional Random Field for named-entity recognition. The LSTM tagger
above is typically sufficient for part-of-speech tagging, but a sequence
model like the CRF is really essential for strong performance on NER.
Familiarity with CRF's is assumed. Although this name sounds scary, all
the model is is a CRF but where an LSTM provides the features. This is
an advanced model though, far more complicated than any earlier model in
this tutorial. If you want to skip it, that is fine. To see if you're
ready, see if you can:
- Write the recurrence for the viterbi variable at step i for tag k.
- Modify the above recurrence to compute the forward variables instead.
- Modify again the above recurrence to compute the forward variables in
log-space (hint: log-sum-exp)
If you can do those three things, you should be able to understand the
code below. Recall that the CRF computes a conditional probability. Let
$y$ be a tag sequence and $x$ an input sequence of words.
Then we compute
\begin{align}P(y|x) = \frac{\exp{(\text{Score}(x, y)})}{\sum_{y'} \exp{(\text{Score}(x, y')})}\end{align}
Where the score is determined by defining some log potentials
$\log \psi_i(x,y)$ such that
\begin{align}\text{Score}(x,y) = \sum_i \log \psi_i(x,y)\end{align}
To make the partition function tractable, the potentials must look only
at local features.
In the Bi-LSTM CRF, we define two kinds of potentials: emission and
transition. The emission potential for the word at index $i$ comes
from the hidden state of the Bi-LSTM at timestep $i$. The
transition scores are stored in a $|T|x|T|$ matrix
$\textbf{P}$, where $T$ is the tag set. In my
implementation, $\textbf{P}_{j,k}$ is the score of transitioning
to tag $j$ from tag $k$. So:
\begin{align}\text{Score}(x,y) = \sum_i \log \psi_\text{EMIT}(y_i \rightarrow x_i) + \log \psi_\text{TRANS}(y_{i-1} \rightarrow y_i)\end{align}
\begin{align}= \sum_i h_i[y_i] + \textbf{P}_{y_i, y_{i-1}}\end{align}
where in this second expression, we think of the tags as being assigned
unique non-negative indices.
If the above discussion was too brief, you can check out
`this <http://www.cs.columbia.edu/%7Emcollins/crf.pdf>`__ write up from
Michael Collins on CRFs.
Implementation Notes
--------------------
The example below implements the forward algorithm in log space to
compute the partition function, and the viterbi algorithm to decode.
Backpropagation will compute the gradients automatically for us. We
don't have to do anything by hand.
The implementation is not optimized. If you understand what is going on,
you'll probably quickly see that iterating over the next tag in the
forward algorithm could probably be done in one big operation. I wanted
to code to be more readable. If you want to make the relevant change,
you could probably use this tagger for real tasks.
```
# Author: Robert Guthrie
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
```
Helper functions to make the code more readable.
```
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
```
Create model
```
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)),
autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)))
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = autograd.Variable(init_alphas)
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward variables at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(
1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = autograd.Variable(torch.Tensor([0]))
tags = torch.cat([torch.LongTensor([self.tag_to_ix[START_TAG]]), tags])
for i, feat in enumerate(feats):
score = score + \
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = autograd.Variable(init_vvars)
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id])
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
```
Run training
```
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# Make up some training data
training_data = [(
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
)]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# Check predictions before training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
precheck_tags = torch.LongTensor([tag_to_ix[t] for t in training_data[0][1]])
print(model(precheck_sent))
# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in range(
300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Get our inputs ready for the network, that is,
# turn them into Variables of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.LongTensor([tag_to_ix[t] for t in tags])
# Step 3. Run our forward pass.
neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
neg_log_likelihood.backward()
optimizer.step()
# Check predictions after training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print(model(precheck_sent))
# We got it!
```
Exercise: A new loss function for discriminative tagging
--------------------------------------------------------
It wasn't really necessary for us to create a computation graph when
doing decoding, since we do not backpropagate from the viterbi path
score. Since we have it anyway, try training the tagger where the loss
function is the difference between the Viterbi path score and the score
of the gold-standard path. It should be clear that this function is
non-negative and 0 when the predicted tag sequence is the correct tag
sequence. This is essentially *structured perceptron*.
This modification should be short, since Viterbi and score\_sentence are
already implemented. This is an example of the shape of the computation
graph *depending on the training instance*. Although I haven't tried
implementing this in a static toolkit, I imagine that it is possible but
much less straightforward.
Pick up some real data and do a comparison!
|
github_jupyter
|
## Sparse logistic regression
$\newcommand{\n}[1]{\left\|#1 \right\|}$
$\newcommand{\R}{\mathbb R} $
$\newcommand{\N}{\mathbb N} $
$\newcommand{\Z}{\mathbb Z} $
$\newcommand{\lr}[1]{\left\langle #1\right\rangle}$
We want to minimize
$$\min_x J(x) := \sum_{i=1}^m \log\bigl(1+\exp (-b_i\lr{a_i, x})\bigr) + \gamma \n{x}_1$$
where $(a_i, b_i)\in \R^n\times \{-1,1\}$ is the training set and $\gamma >0$. We can rewrite the objective as
$J(x) = \tilde f(Kx)+g(x)$,
where $$\tilde f(y)=\sum_{i=1}^{} \log (1+\exp(y_i)), \quad K = -b*A \in \R^{m\times n}, \quad g(x) = \gamma \n{x}_1$$
```
import numpy as np
import scipy.linalg as LA
import scipy.sparse as spr
import scipy.sparse.linalg as spr_LA
from time import perf_counter
from sklearn import datasets
filename = "data/a9a"
#filename = "data/real-sim.bz2"
#filename = "data/rcv1_train.binary.bz2"
#filename = "data/kdda.t.bz2"
A, b = datasets.load_svmlight_file(filename)
m, n = A.shape
print("The dataset {}. The dimensions: m={}, n={}".format(filename[5:], m, n))
# define all ingredients for sparse logistic regression
gamma = 0.005 * LA.norm(A.T.dot(b), np.inf)
K = (A.T.multiply(-b)).T.tocsr()
# find the norm of K^T K
L = spr_LA.svds(K, k=1, return_singular_vectors=False)**2
# starting point
x0 = np.zeros(n)
# stepsize
ss = 4/L
g = lambda x: gamma*LA.norm(x,1)
prox_g = lambda x, rho: x + np.clip(-x, -rho*gamma, rho*gamma)
f = lambda x: np.log(1. + np.exp(x)).sum()
def df(x):
exp_x = np.exp(x)
return exp_x/(1.+exp_x)
dh = lambda x, Kx: K.T.dot(df(Kx))
# residual
res = lambda x: LA.norm(x-prox_g(x-dh(x,K.dot(x)), 1))
# energy
J = lambda x, Kx: f(Kx)+g(x)
### Algorithms
def prox_grad(x1, s=1, numb_iter=100):
"""
Implementation of the proximal gradient method.
x1: array, a starting point
s: positive number, a stepsize
numb_iter: positive integer, number of iterations
Returns an array of energy values, computed in each iteration, and the
argument x_k after numb_iter iterations
"""
begin = perf_counter()
x = x1.copy()
Kx = K.dot(x)
values = [J(x, Kx)]
dhx = dh(x,Kx)
for i in range(numb_iter):
#x = prox_g(x - s * dh(x, Kx), s)
x = prox_g(x - s * dhx, s)
Kx = K.dot(x)
dhx = dh(x,Kx)
values.append(J(x, Kx))
end = perf_counter()
print("Time execution of prox-grad:", end - begin)
return np.array(values), x
def fista(x1, s=1, numb_iter=100):
"""
Implementation of the FISTA.
x1: array, a starting point
s: positive number, a stepsize
numb_iter: positive integer, number of iterations
Returns an array of energy values, computed in each iteration, and the
argument x_k after numb_iter iterations
"""
begin = perf_counter()
x, y = x1.copy(), x1.copy()
t = 1.
Ky = K.dot(y)
values = [J(y,Ky)]
for i in range(numb_iter):
x1 = prox_g(y - s * dh(y, Ky), s)
t1 = 0.5 * (1 + np.sqrt(1 + 4 * t**2))
y = x1 + (t - 1) / t1 * (x1 - x)
x, t = x1, t1
Ky = K.dot(y)
values.append(J(y, Ky))
end = perf_counter()
print("Time execution of FISTA:", end - begin)
return np.array(values), x
def adaptive_graal(x1, numb_iter=100):
"""
Implementation of the adaptive GRAAL.
x1: array, a starting point
numb_iter: positive integer, number of iterations
Returns an array of energy values, computed in each iteration, and the
argument x_k after numb_iter iterations
"""
begin = perf_counter()
phi = 1.5
x, x_ = x1.copy(), x1.copy()
x0 = x + np.random.randn(x.shape[0]) * 1e-9
Kx = K.dot(x)
dhx = dh(x, Kx)
la = phi / 2 * LA.norm(x - x0) / LA.norm(dhx - dh(x0, K.dot(x0)))
rho = 1. / phi + 1. / phi**2
values = [J(x, Kx)]
th = 1
for i in range(numb_iter):
x1 = prox_g(x_ - la * dhx, la)
Kx1 = K.dot(x1)
dhx1 = dh(x1, Kx1)
n1 = LA.norm(x1 - x)**2
n2 = LA.norm(dhx1 - dhx)**2
n1_div_n2 = n1/n2 if n2 != 0 else la*10
la1 = min(rho * la, 0.25 * phi * th / la * (n1_div_n2))
x_ = ((phi - 1) * x1 + x_) / phi
th = phi * la1 / la
x, la, dhx = x1, la1, dhx1
values.append(J(x1, Kx1))
end = perf_counter()
print("Time execution of aGRAAL:", end - begin)
return values, x, x_
```
Run the algorithms. It might take some time, if the dataset and/or the number of iterations are huge
```
N = 10000
ans1 = prox_grad(x0, ss, numb_iter=N)
ans2 = fista(x0, ss, numb_iter=N)
ans3 = adaptive_graal(x0, numb_iter=N)
x1, x2, x3 = ans1[1], ans2[1], ans3[1]
x1, x3 = ans1[1], ans3[1]
print("Residuals:", [res(x) for x in [x1, x2, x3]])
```
Plot the results
```
values = [ans1[0], ans2[0], ans3[0]]
labels = ["PGM", "FISTA", "aGRAAL"]
linestyles = [':', "--", "-"]
colors = ['b', 'g', '#FFD700']
v_min = min([min(v) for v in values])
plt.figure(figsize=(6,4))
for i,v in enumerate(values):
plt.plot(v - v_min, color=colors[i], label=labels[i], linestyle=linestyles[i])
plt.yscale('log')
plt.xlabel(u'iterations, k')
plt.ylabel('$J(x^k)-J_{_*}$')
plt.legend()
#plt.savefig('figures/a9a.pdf', bbox_inches='tight')
plt.show()
plt.clf()
np.max(spr_LA.eigsh(K.T.dot(K))[0])
L
```
|
github_jupyter
|
# Ibis Integration (Experimental)
The [Ibis project](https://ibis-project.org/docs/) tries to bridge the gap between local Python and [various backends](https://ibis-project.org/docs/backends/index.html) including distributed systems such as Spark and Dask. The main idea is to create a pythonic interface to express SQL semantic, so the expression is agnostic to the backends.
The design idea is very aligned with Fugue. But please notice there are a few key differences:
* **Fugue supports both pythonic APIs and SQL**, and the choice should be determined by particular cases or users' preferences. On the other hand, Ibis focuses on the pythonic expression of SQL and perfectionizes it.
* **Fugue supports SQL and non-SQL semantic for data transformation.** Besides SQL, another important option is [Fugue Transform](introduction.html#fugue-transform). The Fugue transformers can wrap complicated Python/Pandas logic and apply them distributedly on dataframes. A typical example is distributed model inference, the inference part has to be done by Python, it can be easily achieved by a transformer, but the data preparation may be done nicely by SQL or Ibis.
* **Fugue and Ibis are on different abstraction layers.** Ibis is nice to construct single SQL statements to accomplish single tasks. Even it involves multiple tables and multiple steps, its final step is either outputting one table or inserting one table into a database. On the other hand, Fugue workflow is to orchestrate these tasks. For example, it can read a table, do the first transformation and save to a file, then do the second transformation and print. Each transformation may be done using Ibis, but loading, saving and printing and the orchestration can be done by Fugue.
This is also why Ibis can be a very nice option for Fugue users to build their pipelines. For people who prefer pythonic APIs, they can keep all the logic in Python with the help of Ibis. Although Fugue has its own functional API similar to Ibis, the programming interface of Ibis is really elegant. It usually helps users write less but more expressive code to achieve the same thing.
## Hello World
In this example, we try to achieve this SQL semantic:
```sql
SELECT a, a+1 AS b FROM
(SELECT a FROM tb1 UNION SELECT a FROM tb2)
```
```
from ibis import BaseBackend, literal
import ibis.expr.types as ir
def ibis_func(backend:BaseBackend) -> ir.TableExpr:
tb1 = backend.table("tb1")
tb2 = backend.table("tb2")
tb3 = tb1.union(tb2)
return tb3.mutate(b=tb3.a+literal(1))
```
Now let's test with the pandas backend
```
import ibis
import pandas as pd
con = ibis.pandas.connect({
"tb1": pd.DataFrame([[0]], columns=["a"]),
"tb2": pd.DataFrame([[1]], columns=["a"])
})
ibis_func(con).execute()
```
Now let's make this a part of Fugue
```
from fugue import FugueWorkflow
from fugue_ibis import run_ibis
dag = FugueWorkflow()
df1 = dag.df([[0]], "a:long")
df2 = dag.df([[1]], "a:long")
df3 = run_ibis(ibis_func, tb1=df1, tb2=df2)
df3.show()
```
Now let's run on Pandas
```
dag.run()
```
Now let's run on Dask
```
import fugue_dask
dag.run("dask")
```
Now let's run on DuckDB
```
import fugue_duckdb
dag.run("duck")
```
For each different execution engine, Ibis will also run on the correspondent backend.
## A deeper integration
The above approach needs a function taking in an Ibis backend and returning a `TableExpr`. The following is another approach that simpler and more elegant.
```
from fugue_ibis import as_ibis, as_fugue
dag = FugueWorkflow()
tb1 = as_ibis(dag.df([[0]], "a:long"))
tb2 = as_ibis(dag.df([[1]], "a:long"))
tb3 = tb1.union(tb2)
df3 = as_fugue(tb3.mutate(b=tb3.a+literal(1)))
df3.show()
dag.run()
```
Alternatively, you can treat `as_ibis` and `as_fugue` as class methods. This is more convenient to use, but it's a bit magical. This is achieved by adding these two methods using `setattr` to the correspondent classes. This patching-like design pattern is widely used by Ibis.
```
import fugue_ibis # must import
dag = FugueWorkflow()
tb1 = dag.df([[0]], "a:long").as_ibis()
tb2 = dag.df([[1]], "a:long").as_ibis()
tb3 = tb1.union(tb2)
df3 = tb3.mutate(b=tb3.a+literal(1)).as_fugue()
df3.show()
dag.run()
```
By importing `fugue_ibis`, the two methods were automatically added.
It's up to the users which way to go. The first approach (`run_ibis`) is the best to separate Ibis logic, as you can see, it is also great for unit testing. The second approach is elegant, but you will have to unit test the code with the logic before and after the conversions. The third approach is the most intuitive, but it's a bit magical.
## Z-Score
Now, let's consider a practical example. We want to use Fugue to compute z-score of a dataframe, partitioning should be an option. The reason to implement it on Fugue level is that the compute becomes scale agnostic and framework agnostic.
```
from fugue import WorkflowDataFrame
from fugue_ibis import as_ibis, as_fugue
def z_score(df:WorkflowDataFrame, input_col:str, output_col:str) -> WorkflowDataFrame:
by = df.partition_spec.partition_by
idf = as_ibis(df)
col = idf[input_col]
if len(by)==0:
return as_fugue(idf.mutate(**{output_col:(col - col.mean())/col.std()}))
agg = idf.group_by(by).aggregate(mean_=col.mean(), std_=col.std())
j = idf.inner_join(agg, by)[idf, ((idf[input_col]-agg.mean_)/agg.std_).name(output_col)]
return as_fugue(j)
```
Now, generate testing data
```
import numpy as np
np.random.seed(0)
pdf = pd.DataFrame(dict(
a=np.random.choice(["a","b"], 100),
b=np.random.choice(["c","d"], 100),
c=np.random.rand(100),
))
pdf["expected1"] = (pdf.c - pdf.c.mean())/pdf.c.std()
pdf = pdf.groupby(["a", "b"]).apply(lambda tdf: tdf.assign(expected2=(tdf.c - tdf.c.mean())/tdf.c.std())).reset_index(drop=True)
```
And here is the final code.
```
dag = FugueWorkflow()
df = z_score(dag.df(pdf), "c", "z1")
df = z_score(df.partition_by("a", "b"), "c", "z2")
df.show()
dag.run()
```
## Consistency issues
Ibis as of 2.0.0 can have different behaviors on different backends. Here are some examples from the common discrepencies between pandas and SQL.
```
# pandas drops null keys on group (by default), SQL doesn't
dag = FugueWorkflow()
df = dag.df([["a",1],[None,2]], "a:str,b:int").as_ibis()
df.groupby(["a"]).aggregate(s=df.b.sum()).as_fugue().show()
dag.run()
dag.run("duckdb")
# pandas joins on NULLs, SQL doesn't
dag = FugueWorkflow()
df1 = dag.df([["a",1],[None,2]], "a:str,b:int").as_ibis()
df2 = dag.df([["a",1],[None,2]], "a:str,c:int").as_ibis()
df1.inner_join(df2, ["a"])[df1, df2.c].as_fugue().show()
dag.run()
dag.run("duckdb")
```
Since Ibis integration is experimental, we rely on Ibis to achieve consistent behaviors. If you have any Ibis specific question please also consider asking in [Ibis issues](https://github.com/ibis-project/ibis/issues).
|
github_jupyter
|
### Homework: going neural (6 pts)
We've checked out statistical approaches to language models in the last notebook. Now let's go find out what deep learning has to offer.
<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/expanding_mind_lm_kn_3.png' width=300px>
We're gonna use the same dataset as before, except this time we build a language model that's character-level, not word level.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
```
Working on character level means that we don't need to deal with large vocabulary or missing words. Heck, we can even keep uppercase words in text! The downside, however, is that all our sequences just got a lot longer.
However, we still need special tokens:
* Begin Of Sequence (__BOS__) - this token is at the start of each sequence. We use it so that we always have non-empty input to our neural network. $P(x_t) = P(x_1 | BOS)$
* End Of Sequence (__EOS__) - you guess it... this token is at the end of each sequence. The catch is that it should __not__ occur anywhere else except at the very end. If our model produces this token, the sequence is over.
```
BOS, EOS = ' ', '\n'
data = pd.read_json("./arxivData.json")
lines = data.apply(lambda row: (row['title'] + ' ; ' + row['summary'])[:512], axis=1) \
.apply(lambda line: BOS + line.replace(EOS, ' ') + EOS) \
.tolist()
# if you missed the seminar, download data here - https://yadi.sk/d/_nGyU2IajjR9-w
```
Our next step is __building char-level vocabulary__. Put simply, you need to assemble a list of all unique tokens in the dataset.
```
# get all unique characters from lines (including capital letters and symbols)
tokens = set(''.join(lines))
tokens = sorted(tokens)
n_tokens = len(tokens)
print ('n_tokens = ',n_tokens)
assert 100 < n_tokens < 150
assert BOS in tokens, EOS in tokens
```
We can now assign each character with it's index in tokens list. This way we can encode a string into a TF-friendly integer vector.
```
# dictionary of character -> its identifier (index in tokens list)
token_to_id = {token: id for id, token in enumerate(tokens)}
assert len(tokens) == len(token_to_id), "dictionaries must have same size"
for i in range(n_tokens):
assert token_to_id[tokens[i]] == i, "token identifier must be it's position in tokens list"
print("Seems alright!")
```
Our final step is to assemble several strings in a integet matrix `[batch_size, text_length]`.
The only problem is that each sequence has a different length. We can work around that by padding short sequences with extra _EOS_ or cropping long sequences. Here's how it works:
```
def to_matrix(lines, max_len=None, pad=token_to_id[EOS], dtype='int32'):
"""Casts a list of lines into tf-digestable matrix"""
max_len = max_len or max(map(len, lines))
lines_ix = np.zeros([len(lines), max_len], dtype) + pad
for i in range(len(lines)):
line_ix = list(map(token_to_id.get, lines[i][:max_len]))
lines_ix[i, :len(line_ix)] = line_ix
return lines_ix
#Example: cast 4 random names to matrices, pad with zeros
dummy_lines = [
' abc\n',
' abacaba\n',
' abc1234567890\n',
]
print(to_matrix(dummy_lines))
```
### Neural Language Model
Just like for N-gram LMs, we want to estimate probability of text as a joint probability of tokens (symbols this time).
$$P(X) = \prod_t P(x_t \mid x_0, \dots, x_{t-1}).$$
Instead of counting all possible statistics, we want to train a neural network with parameters $\theta$ that estimates the conditional probabilities:
$$ P(x_t \mid x_0, \dots, x_{t-1}) \approx p(x_t \mid x_0, \dots, x_{t-1}, \theta) $$
But before we optimize, we need to define our neural network. Let's start with a fixed-window (aka convolutional) architecture:
<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/fixed_window_lm.jpg' width=400px>
```
import tensorflow as tf
import keras, keras.layers as L
sess = tf.InteractiveSession()
class FixedWindowLanguageModel:
def __init__(self, n_tokens=n_tokens, emb_size=16, hid_size=64):
"""
A fixed window model that looks on at least 5 previous symbols.
Note: fixed window LM is effectively performing a convolution over a sequence of words.
This convolution only looks on current and previous words.
Such convolution can be represented as a sequence of 2 operations:
- pad input vectors by {strides * (filter_size - 1)} zero vectors on the "left", do not pad right
- perform regular convolution with {filter_size} and {strides}
You can stack several convolutions at once
"""
#YOUR CODE - create layers/variables and any metadata you want, e.g. self.emb = L.Embedding(...)
self.emb = L.Embedding(input_dim=n_tokens, output_dim=emb_size)
self.conv1 = L.Convolution1D(filters=hid_size, kernel_size=5,
padding='causal', name='conv1')
self.conv2 = L.Convolution1D(filters=n_tokens, kernel_size=5,
padding='causal', name='conv2')
self.activation = L.Activation('relu')
#END OF YOUR CODE
self.prefix_ix = tf.placeholder('int32', [None, None])
self.next_token_probs = tf.nn.softmax(self(self.prefix_ix)[:, -1])
def __call__(self, input_ix):
"""
compute language model logits given input tokens
:param input_ix: batch of sequences with token indices, tf tensor: int32[batch_size, sequence_length]
:returns: pre-softmax linear outputs of language model [batch_size, sequence_length, n_tokens]
these outputs will be used as logits to compute P(x_t | x_0, ..., x_{t - 1})
"""
embedding = self.emb(input_ix)
conv1 = self.conv1(embedding)
conv1 = self.activation(conv1)
conv2 = self.conv2(conv1)
return conv2
def get_possible_next_tokens(self, prefix=BOS, temperature=1.0, max_len=100, sess=sess):
""" :returns: probabilities of next token, dict {token : prob} for all tokens """
probs = sess.run(self.next_token_probs, {self.prefix_ix: to_matrix([prefix])})[0]
return dict(zip(tokens, probs))
window_lm = FixedWindowLanguageModel()
dummy_input_ix = tf.constant(to_matrix(dummy_lines))
dummy_lm_out = window_lm(dummy_input_ix)
# note: tensorflow and keras layers only create variables after they're first applied (called)
sess.run(tf.global_variables_initializer())
dummy_logits = sess.run(dummy_lm_out)
assert dummy_logits.shape == (len(dummy_lines), max(map(len, dummy_lines)), n_tokens), "please check output shape"
assert np.all(np.isfinite(dummy_logits)), "inf/nan encountered"
assert not np.allclose(dummy_logits.sum(-1), 1), "please predict linear outputs, don't use softmax (maybe you've just got unlucky)"
# test for lookahead
dummy_input_ix_2 = tf.constant(to_matrix([line[:3] + 'e' * (len(line) - 3) for line in dummy_lines]))
dummy_lm_out_2 = window_lm(dummy_input_ix_2)
dummy_logits_2 = sess.run(dummy_lm_out_2)
assert np.allclose(dummy_logits[:, :3] - dummy_logits_2[:, :3], 0), "your model's predictions depend on FUTURE tokens. " \
" Make sure you don't allow any layers to look ahead of current token." \
" You can also get this error if your model is not deterministic (e.g. dropout). Disable it for this test."
```
We can now tune our network's parameters to minimize categorical crossentropy over training dataset $D$:
$$ L = {\frac1{|D|}} \sum_{X \in D} \sum_{x_i \in X} - \log p(x_t \mid x_1, \dots, x_{t-1}, \theta) $$
As usual with with neural nets, this optimization is performed via stochastic gradient descent with backprop. One can also note that minimizing crossentropy is equivalent to minimizing model __perplexity__, KL-divergence or maximizng log-likelihood.
```
def compute_lengths(input_ix, eos_ix=token_to_id[EOS]):
""" compute length of each line in input ix (incl. first EOS), int32 vector of shape [batch_size] """
count_eos = tf.cumsum(tf.to_int32(tf.equal(input_ix, eos_ix)), axis=1, exclusive=True)
lengths = tf.reduce_sum(tf.to_int32(tf.equal(count_eos, 0)), axis=1)
return lengths
print('matrix:\n', dummy_input_ix.eval())
print('lengths:', compute_lengths(dummy_input_ix).eval())
input_ix = tf.placeholder('int32', [None, None])
logits = window_lm(input_ix[:, :-1])
reference_answers = input_ix[:, 1:]
# Your task: implement loss function as per formula above
# your loss should only be computed on actual tokens, excluding padding
# predicting actual tokens and first EOS do count. Subsequent EOS-es don't
# you will likely need to use compute_lengths and/or tf.sequence_mask to get it right.
lengths = compute_lengths(input_ix)
mask = tf.to_float(tf.sequence_mask(lengths, tf.shape(input_ix)[1])[:, 1:])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=reference_answers, logits=logits)
loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
# operation to update network weights
train_step = tf.train.AdamOptimizer().minimize(loss)
loss_1 = sess.run(loss, {input_ix: to_matrix(dummy_lines, max_len=50)})
loss_2 = sess.run(loss, {input_ix: to_matrix(dummy_lines, max_len=100)})
assert (np.ndim(loss_1) == 0) and (0 < loss_1 < 100), "loss must be a positive scalar"
assert np.allclose(loss_1, loss_2), 'do not include AFTER first EOS into loss. '\
'Hint: use tf.sequence_mask. Beware +/-1 errors. And be careful when averaging!'
```
### Training loop
Now let's train our model on minibatches of data
```
from sklearn.model_selection import train_test_split
train_lines, dev_lines = train_test_split(lines, test_size=0.25, random_state=42)
sess.run(tf.global_variables_initializer())
batch_size = 256
score_dev_every = 250
train_history, dev_history = [], []
def score_lines(dev_lines, batch_size):
""" computes average loss over the entire dataset """
dev_loss_num, dev_loss_len = 0., 0.
for i in range(0, len(dev_lines), batch_size):
batch_ix = to_matrix(dev_lines[i: i + batch_size])
dev_loss_num += sess.run(loss, {input_ix: batch_ix}) * len(batch_ix)
dev_loss_len += len(batch_ix)
return dev_loss_num / dev_loss_len
def generate(lm, prefix=BOS, temperature=1.0, max_len=100):
"""
Samples output sequence from probability distribution obtained by lm
:param temperature: samples proportionally to lm probabilities ^ temperature
if temperature == 0, always takes most likely token. Break ties arbitrarily.
"""
while True:
token_probs = lm.get_possible_next_tokens(prefix)
tokens, probs = zip(*token_probs.items())
if temperature == 0:
next_token = tokens[np.argmax(probs)]
else:
probs = np.array([p ** (1. / temperature) for p in probs])
probs /= sum(probs)
next_token = np.random.choice(tokens, p=probs)
prefix += next_token
if next_token == EOS or len(prefix) > max_len: break
return prefix
if len(dev_history) == 0:
dev_history.append((0, score_lines(dev_lines, batch_size)))
print("Before training:", generate(window_lm, 'Bridging'))
from IPython.display import clear_output
from random import sample
from tqdm import trange
for i in trange(len(train_history), 5000):
batch = to_matrix(sample(train_lines, batch_size))
loss_i, _ = sess.run([loss, train_step], {input_ix: batch})
train_history.append((i, loss_i))
if (i + 1) % 50 == 0:
clear_output(True)
plt.scatter(*zip(*train_history), alpha=0.1, label='train_loss')
if len(dev_history):
plt.plot(*zip(*dev_history), color='red', label='dev_loss')
plt.legend(); plt.grid(); plt.show()
print("Generated examples (tau=0.5):")
for j in range(3):
print(generate(window_lm, temperature=0.5))
if (i + 1) % score_dev_every == 0:
print("Scoring dev...")
dev_history.append((i, score_lines(dev_lines, batch_size)))
print('#%i Dev loss: %.3f' % dev_history[-1])
assert np.mean(train_history[:10], axis=0)[1] > np.mean(train_history[-10:], axis=0)[1], "The model didn't converge."
print("Final dev loss:", dev_history[-1][-1])
for i in range(10):
print(generate(window_lm, temperature=0.5))
```
### RNN Language Models
Fixed-size architectures are reasonably good when capturing short-term dependencies, but their design prevents them from capturing any signal outside their window. We can mitigate this problem by using a __recurrent neural network__:
$$ h_0 = \vec 0 ; \quad h_{t+1} = RNN(x_t, h_t) $$
$$ p(x_t \mid x_0, \dots, x_{t-1}, \theta) = dense_{softmax}(h_{t-1}) $$
Such model processes one token at a time, left to right, and maintains a hidden state vector between them. Theoretically, it can learn arbitrarily long temporal dependencies given large enough hidden size.
<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/rnn_lm.jpg' width=480px>
```
class RNNLanguageModel:
def __init__(self, n_tokens=n_tokens, emb_size=16, hid_size=256):
"""
Build a recurrent language model.
You are free to choose anything you want, but the recommended architecture is
- token embeddings
- one or more LSTM/GRU layers with hid size
- linear layer to predict logits
"""
# YOUR CODE - create layers/variables/etc
self.emb = L.Embedding(n_tokens, emb_size)
self.lstm = L.LSTM(hid_size, return_sequences=True)
self.linear = L.Dense(n_tokens)
#END OF YOUR CODE
self.prefix_ix = tf.placeholder('int32', [None, None])
self.next_token_probs = tf.nn.softmax(self(self.prefix_ix)[:, -1])
def __call__(self, input_ix):
"""
compute language model logits given input tokens
:param input_ix: batch of sequences with token indices, tf tensor: int32[batch_size, sequence_length]
:returns: pre-softmax linear outputs of language model [batch_size, sequence_length, n_tokens]
these outputs will be used as logits to compute P(x_t | x_0, ..., x_{t - 1})
"""
embedding = self.emb(input_ix)
lstm = self.lstm(embedding)
linear = self.linear(lstm)
return linear
def get_possible_next_tokens(self, prefix=BOS, temperature=1.0, max_len=100, sess=sess):
""" :returns: probabilities of next token, dict {token : prob} for all tokens """
probs = sess.run(self.next_token_probs, {self.prefix_ix: to_matrix([prefix])})[0]
return dict(zip(tokens, probs))
rnn_lm = RNNLanguageModel()
dummy_input_ix = tf.constant(to_matrix(dummy_lines))
dummy_lm_out = rnn_lm(dummy_input_ix)
# note: tensorflow and keras layers only create variables after they're first applied (called)
sess.run(tf.global_variables_initializer())
dummy_logits = sess.run(dummy_lm_out)
assert dummy_logits.shape == (len(dummy_lines), max(map(len, dummy_lines)), n_tokens), "please check output shape"
assert np.all(np.isfinite(dummy_logits)), "inf/nan encountered"
assert not np.allclose(dummy_logits.sum(-1), 1), "please predict linear outputs, don't use softmax (maybe you've just got unlucky)"
# test for lookahead
dummy_input_ix_2 = tf.constant(to_matrix([line[:3] + 'e' * (len(line) - 3) for line in dummy_lines]))
dummy_lm_out_2 = rnn_lm(dummy_input_ix_2)
dummy_logits_2 = sess.run(dummy_lm_out_2)
assert np.allclose(dummy_logits[:, :3] - dummy_logits_2[:, :3], 0), "your model's predictions depend on FUTURE tokens. " \
" Make sure you don't allow any layers to look ahead of current token." \
" You can also get this error if your model is not deterministic (e.g. dropout). Disable it for this test."
```
### RNN training
Our RNN language model should optimize the same loss function as fixed-window model. But there's a catch. Since RNN recurrently multiplies gradients through many time-steps, gradient values may explode, [breaking](https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/nan.jpg) your model.
The common solution to that problem is to clip gradients either [individually](https://www.tensorflow.org/versions/r1.1/api_docs/python/tf/clip_by_value) or [globally](https://www.tensorflow.org/versions/r1.1/api_docs/python/tf/clip_by_global_norm).
Your task here is to prepare tensorflow graph that would minimize the same loss function. If you encounter large loss fluctuations during training, please add gradient clipping using urls above.
_Note: gradient clipping is not exclusive to RNNs. Convolutional networks with enough depth often suffer from the same issue._
```
input_ix = tf.placeholder('int32', [None, None])
logits = rnn_lm(input_ix[:, :-1])
reference_answers = input_ix[:, 1:]
# Copy the loss function and train step from the fixed-window model training
lengths = compute_lengths(input_ix)
mask = tf.to_float(tf.sequence_mask(lengths, tf.shape(input_ix)[1])[:, 1:])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=reference_answers, logits=logits)
loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
# and the train step
train_step = tf.train.AdamOptimizer().minimize(loss)
loss_1 = sess.run(loss, {input_ix: to_matrix(dummy_lines, max_len=50)})
loss_2 = sess.run(loss, {input_ix: to_matrix(dummy_lines, max_len=100)})
assert (np.ndim(loss_1) == 0) and (0 < loss_1 < 100), "loss must be a positive scalar"
assert np.allclose(loss_1, loss_2), 'do not include AFTER first EOS into loss. Hint: use tf.sequence_mask. Be careful when averaging!'
```
### RNN: Training loop
```
sess.run(tf.global_variables_initializer())
batch_size = 128
score_dev_every = 250
train_history, dev_history = [], []
dev_history.append((0, score_lines(dev_lines, batch_size)))
for i in trange(len(train_history), 5000):
batch = to_matrix(sample(train_lines, batch_size))
loss_i, _ = sess.run([loss, train_step], {input_ix: batch})
train_history.append((i, loss_i))
if (i + 1) % 50 == 0:
clear_output(True)
plt.scatter(*zip(*train_history), alpha=0.1, label='train_loss')
if len(dev_history):
plt.plot(*zip(*dev_history), color='red', label='dev_loss')
plt.legend(); plt.grid(); plt.show()
print("Generated examples (tau=0.5):")
for j in range(3):
print(generate(rnn_lm, temperature=0.5))
if (i + 1) % score_dev_every == 0:
print("Scoring dev...")
dev_history.append((i, score_lines(dev_lines, batch_size)))
print('#%i Dev loss: %.3f' % dev_history[-1])
assert np.mean(train_history[:10]) > np.mean(train_history[-10:]), "The model didn't converge."
print("Final dev loss:", dev_history[-1][-1])
for i in range(10):
print(generate(rnn_lm, temperature=0.5))
```
### Bonus quest: Ultimate Language Model
So you've learned the building blocks of neural language models, you can now build the ultimate monster:
* Make it char-level, word level or maybe use sub-word units like [bpe](https://github.com/rsennrich/subword-nmt);
* Combine convolutions, recurrent cells, pre-trained embeddings and all the black magic deep learning has to offer;
* Use strides to get larger window size quickly. Here's a [scheme](https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif) from google wavenet.
* Train on large data. Like... really large. Try [1 Billion Words](http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz) benchmark;
* Use training schedules to speed up training. Start with small length and increase over time; Take a look at [one cycle](https://medium.com/@nachiket.tanksale/finding-good-learning-rate-and-the-one-cycle-policy-7159fe1db5d6) for learning rate;
_You are NOT required to submit this assignment. Please make sure you don't miss your deadline because of it :)_
|
github_jupyter
|
# One Shot Learning with Siamese Networks
This is the jupyter notebook that accompanies
## Imports
All the imports are defined here
```
%matplotlib inline
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
```
## Helper functions
Set of helper functions
```
def imshow(img,text=None,should_save=False):
npimg = img.numpy()
plt.axis("off")
if text:
plt.text(75, 8, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
```
## Configuration Class
A simple class to manage configuration
```
class Config():
training_dir = "./data/faces/training/"
testing_dir = "./data/faces/testing/"
train_batch_size = 64
train_number_epochs = 100
```
## Custom Dataset Class
This dataset generates a pair of images. 0 for geniune pair and 1 for imposter pair
```
class SiameseNetworkDataset(Dataset):
def __init__(self,imageFolderDataset,transform=None,should_invert=True):
self.imageFolderDataset = imageFolderDataset
self.transform = transform
self.should_invert = should_invert
def __getitem__(self,index):
img0_tuple = random.choice(self.imageFolderDataset.imgs)
#we need to make sure approx 50% of images are in the same class
should_get_same_class = random.randint(0,1)
if should_get_same_class:
while True:
#keep looping till the same class image is found
img1_tuple = random.choice(self.imageFolderDataset.imgs)
if img0_tuple[1]==img1_tuple[1]:
break
else:
while True:
#keep looping till a different class image is found
img1_tuple = random.choice(self.imageFolderDataset.imgs)
if img0_tuple[1] !=img1_tuple[1]:
break
img0 = Image.open(img0_tuple[0])
img1 = Image.open(img1_tuple[0])
img0 = img0.convert("L")
img1 = img1.convert("L")
if self.should_invert:
img0 = PIL.ImageOps.invert(img0)
img1 = PIL.ImageOps.invert(img1)
if self.transform is not None:
img0 = self.transform(img0)
img1 = self.transform(img1)
return img0, img1 , torch.from_numpy(np.array([int(img1_tuple[1]!=img0_tuple[1])],dtype=np.float32))
def __len__(self):
return len(self.imageFolderDataset.imgs)
```
## Using Image Folder Dataset
```
folder_dataset = dset.ImageFolder(root=Config.training_dir)
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
transform=transforms.Compose([transforms.Resize((100,100)),
transforms.ToTensor()
])
,should_invert=False)
```
## Visualising some of the data
The top row and the bottom row of any column is one pair. The 0s and 1s correspond to the column of the image.
1 indiciates dissimilar, and 0 indicates similar.
```
vis_dataloader = DataLoader(siamese_dataset,
shuffle=True,
num_workers=8,
batch_size=8)
dataiter = iter(vis_dataloader)
example_batch = next(dataiter)
concatenated = torch.cat((example_batch[0],example_batch[1]),0)
imshow(torchvision.utils.make_grid(concatenated))
print(example_batch[2].numpy())
```
## Neural Net Definition
We will use a standard convolutional neural network
```
class SiameseNetwork(nn.Module):
def __init__(self):
super(SiameseNetwork, self).__init__()
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(1, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4),
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
)
self.fc1 = nn.Sequential(
nn.Linear(8*100*100, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 5))
def forward_once(self, x):
output = self.cnn1(x)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
```
## Contrastive Loss
```
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim = True)
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
```
## Training Time!
```
train_dataloader = DataLoader(siamese_dataset,
shuffle=True,
num_workers=8,
batch_size=Config.train_batch_size)
net = SiameseNetwork().cuda()
criterion = ContrastiveLoss()
optimizer = optim.Adam(net.parameters(),lr = 0.0005 )
counter = []
loss_history = []
iteration_number= 0
for epoch in range(0,Config.train_number_epochs):
for i, data in enumerate(train_dataloader,0):
img0, img1 , label = data
img0, img1 , label = img0.cuda(), img1.cuda() , label.cuda()
optimizer.zero_grad()
output1,output2 = net(img0,img1)
loss_contrastive = criterion(output1,output2,label)
loss_contrastive.backward()
optimizer.step()
if i %10 == 0 :
print("Epoch number {}\n Current loss {}\n".format(epoch,loss_contrastive.item()))
iteration_number +=10
counter.append(iteration_number)
loss_history.append(loss_contrastive.item())
show_plot(counter,loss_history)
```
## Some simple testing
The last 3 subjects were held out from the training, and will be used to test. The Distance between each image pair denotes the degree of similarity the model found between the two images. Less means it found more similar, while higher values indicate it found them to be dissimilar.
```
folder_dataset_test = dset.ImageFolder(root=Config.testing_dir)
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset_test,
transform=transforms.Compose([transforms.Resize((100,100)),
transforms.ToTensor()
])
,should_invert=False)
test_dataloader = DataLoader(siamese_dataset,num_workers=6,batch_size=1,shuffle=True)
dataiter = iter(test_dataloader)
x0,_,_ = next(dataiter)
for i in range(10):
_,x1,label2 = next(dataiter)
concatenated = torch.cat((x0,x1),0)
output1,output2 = net(Variable(x0).cuda(),Variable(x1).cuda())
euclidean_distance = F.pairwise_distance(output1, output2)
imshow(torchvision.utils.make_grid(concatenated),'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))
```
|
github_jupyter
|
# Naive Bayes
$$ \begin{split} \mathop{argmax}_{c_k}p(y=c_k|x) &= \mathop{argmax}_{c_k}p(y=c_k)p(x|y=c_k) \\
& \left( due to: p(y=c_k|x) = \frac{p(y=c_k)p(x|y=c_k)}{p(x)} \right) \\
&= \mathop{argmax}_{c_k}p(y=c_k)\prod_jp(x^{(j)}|y=c_k) \end{split} $$
Use Maximum Likelihood Estimate(MLE) to evaluate $ p(y=c_k)$ and $ p(x^{(j)}|y=c_k) $ in datasets.
$$ \hat{p}(y=c_k) = \frac{\sum_i I(y_i=c_k)}{N} \\
\hat{p}(x^{(j)}=a_j|y=c_k) = \frac{\sum_i I(x_i^{(j)}=a_j,y=c_k)}{I(y_i=c_k)}
$$
Bayesian estimation add $ \lambda $ on numerator and denominator in MLE.
# Naive Bayes in Scikit-learn
Classifiers: GaussianNB, MultinomialNB, BernoulliNB
## Documents Classification
Use TF-IDF(Term Frequency and Inverse Document Frequency) of term in documents as feature
$$ TF-IDF = TF*IDF \\
TF(t) = \frac {\text{Number of times term t appears in a document}}{\text{Total number of terms in the document}}\\
IDF(t) = log_e\frac {\text{Total number of documents}}{\text{Number of documents with term t in it + 1}} $$
Bag of Words
### TfidfVectorizer
sklearn.feature_extraction.text.TfidfVectorizer(stop_words, token_pattern, max_df)
```
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer()
documents=[
'my dog has flea problems help please',
'maybe not take him to dog park stupid',
'my dalmation is so cute I love him',
'stop posting stupid worthless garbage',
'mr licks ate my steak how to stop him',
'quit buying worthlsess dog food stupid',
]
targets=[0,1,0,1,0,1] # 0 normal, 1 insult
tf_matrix = vect.fit_transform(documents)
# all unique words
words = vect.get_feature_names()
print(len(words), words)
# words id
print(len(vect.vocabulary_), vect.vocabulary_)
tfidf = tf_matrix.toarray()
print(tfidf.shape, tfidf[0])
```
### CountVectorizer
```
from sklearn.feature_extraction.text import CountVectorizer
c_vect = CountVectorizer()
c_matrix = c_vect.fit_transform(documents)
print(c_vect.get_feature_names())
c_matrix.toarray()
# default ngram_range is (1, 1), token_pattern=â(?u)\b\w\w+\bâ
c_vect_ngram = CountVectorizer(ngram_range=(1, 2))
c_matrix_ngram = c_vect_ngram.fit_transform(documents)
print(c_vect_ngram.get_feature_names())
```
### MultinomialNB
```
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=0.001).fit(tf_matrix, targets)
test_vect = TfidfVectorizer(vocabulary=vect.vocabulary_)
test_features = test_vect.fit_transform([documents[3]])
predicted_labels = clf.predict(test_features)
from sklearn import metrics
print(metrics.accuracy_score([targets[3]], predicted_labels))
```
|
github_jupyter
|
# Working with Streaming Data
Learning Objectives
1. Learn how to process real-time data for ML models using Cloud Dataflow
2. Learn how to serve online predictions using real-time data
## Introduction
It can be useful to leverage real time data in a machine learning model when making a prediction. However, doing so requires setting up a streaming data pipeline which can be non-trivial.
Typically you will have the following:
- A series of IoT devices generating and sending data from the field in real-time (in our case these are the taxis)
- A messaging bus to that receives and temporarily stores the IoT data (in our case this is Cloud Pub/Sub)
- A streaming processing service that subscribes to the messaging bus, windows the messages and performs data transformations on each window (in our case this is Cloud Dataflow)
- A persistent store to keep the processed data (in our case this is BigQuery)
These steps happen continuously and in real-time, and are illustrated by the blue arrows in the diagram below.
Once this streaming data pipeline is established, we need to modify our model serving to leverage it. This simply means adding a call to the persistent store (BigQuery) to fetch the latest real-time data when a prediction request comes in. This flow is illustrated by the red arrows in the diagram below.
<img src='../assets/taxi_streaming_data.png' width='80%'>
In this lab we will address how to process real-time data for machine learning models. We will use the same data as our previous 'taxifare' labs, but with the addition of `trips_last_5min` data as an additional feature. This is our proxy for real-time traffic.
```
!pip install --upgrade apache-beam[gcp]
```
Restart the kernel before proceeding further (On the Notebook menu - Kernel - Restart Kernel).
```
import os
import shutil
import numpy as np
import tensorflow as tf
from google import api_core
from google.cloud import aiplatform, bigquery
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from matplotlib import pyplot as plt
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Dense, DenseFeatures
from tensorflow.keras.models import Sequential
print(tf.__version__)
# Change below if necessary
PROJECT = !gcloud config get-value project # noqa: E999
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
%env PROJECT=$PROJECT
%env BUCKET=$BUCKET
%env REGION=$REGION
%%bash
gcloud config set project $PROJECT
gcloud config set ai/region $REGION
```
## Re-train our model with `trips_last_5min` feature
In this lab, we want to show how to process real-time data for training and prediction. So, we need to retrain our previous model with this additional feature. Go through the notebook `4a_streaming_data_training.ipynb`. Open and run the notebook to train and save a model. This notebook is very similar to what we did in the Introduction to Tensorflow module but note the added feature for `trips_last_5min` in the model and the dataset.
## Simulate Real Time Taxi Data
Since we donât actually have real-time taxi data we will synthesize it using a simple python script. The script publishes events to Google Cloud Pub/Sub.
Inspect the `iot_devices.py` script in the `taxicab_traffic` folder. It is configured to send about 2,000 trip messages every five minutes with some randomness in the frequency to mimic traffic fluctuations. These numbers come from looking at the historical average of taxi ride frequency in BigQuery.
In production this script would be replaced with actual taxis with IoT devices sending trip data to Cloud Pub/Sub.
To execute the `iot_devices.py` script, launch a terminal and navigate to the `asl-ml-immersion/notebooks/building_production_ml_systems/solutions` directory. Then run the following two commands.
```bash
PROJECT_ID=$(gcloud config get-value project)
python3 ./taxicab_traffic/iot_devices.py --project=$PROJECT_ID
```
You will see new messages being published every 5 seconds. **Keep this terminal open** so it continues to publish events to the Pub/Sub topic. If you open [Pub/Sub in your Google Cloud Console](https://console.cloud.google.com/cloudpubsub/topic/list), you should be able to see a topic called `taxi_rides`.
## Create a BigQuery table to collect the processed data
In the next section, we will create a dataflow pipeline to write processed taxifare data to a BigQuery Table, however that table does not yet exist. Execute the following commands to create a BigQuery dataset called `taxifare` and a table within that dataset called `traffic_realtime`.
```
bq = bigquery.Client()
dataset = bigquery.Dataset(bq.dataset("taxifare"))
try:
bq.create_dataset(dataset) # will fail if dataset already exists
print("Dataset created.")
except api_core.exceptions.Conflict:
print("Dataset already exists.")
```
Next, we create a table called `traffic_realtime` and set up the schema.
```
dataset = bigquery.Dataset(bq.dataset("taxifare"))
table_ref = dataset.table("traffic_realtime")
SCHEMA = [
bigquery.SchemaField("trips_last_5min", "INTEGER", mode="REQUIRED"),
bigquery.SchemaField("time", "TIMESTAMP", mode="REQUIRED"),
]
table = bigquery.Table(table_ref, schema=SCHEMA)
try:
bq.create_table(table)
print("Table created.")
except api_core.exceptions.Conflict:
print("Table already exists.")
```
## Launch Streaming Dataflow Pipeline
Now that we have our taxi data being pushed to Pub/Sub, and our BigQuery table set up, letâs consume the Pub/Sub data using a streaming DataFlow pipeline.
The pipeline is defined in `./taxicab_traffic/streaming_count.py`. Open that file and inspect it.
There are 5 transformations being applied:
- Read from PubSub
- Window the messages
- Count number of messages in the window
- Format the count for BigQuery
- Write results to BigQuery
**TODO:** Open the file ./taxicab_traffic/streaming_count.py and find the TODO there. Specify a sliding window that is 5 minutes long, and gets recalculated every 15 seconds. Hint: Reference the [beam programming guide](https://beam.apache.org/documentation/programming-guide/#windowing) for guidance. To check your answer reference the solution.
For the second transform, we specify a sliding window that is 5 minutes long, and recalculate values every 15 seconds.
In a new terminal, launch the dataflow pipeline using the command below. You can change the `BUCKET` variable, if necessary. Here it is assumed to be your `PROJECT_ID`.
```bash
PROJECT_ID=$(gcloud config get-value project)
REGION=$(gcloud config get-value ai/region)
BUCKET=$PROJECT_ID # change as necessary
python3 ./taxicab_traffic/streaming_count.py \
--input_topic taxi_rides \
--runner=DataflowRunner \
--project=$PROJECT_ID \
--region=$REGION \
--temp_location=gs://$BUCKET/dataflow_streaming
```
Once you've submitted the command above you can examine the progress of that job in the [Dataflow section of Cloud console](https://console.cloud.google.com/dataflow).
## Explore the data in the table
After a few moments, you should also see new data written to your BigQuery table as well.
Re-run the query periodically to observe new data streaming in! You should see a new row every 15 seconds.
```
%%bigquery
SELECT
*
FROM
`taxifare.traffic_realtime`
ORDER BY
time DESC
LIMIT 10
```
## Make predictions from the new data
In the rest of the lab, we'll referece the model we trained and deployed from the previous labs, so make sure you have run the code in the `4a_streaming_data_training.ipynb` notebook.
The `add_traffic_last_5min` function below will query the `traffic_realtime` table to find the most recent traffic information and add that feature to our instance for prediction.
**Exercise.** Complete the code in the function below. Write a SQL query that will return the most recent entry in `traffic_realtime` and add it to the instance.
```
# TODO 2a. Write a function to take most recent entry in `traffic_realtime`
# table and add it to instance.
def add_traffic_last_5min(instance):
bq = bigquery.Client()
query_string = """
TODO: Your code goes here
"""
trips = bq.query(query_string).to_dataframe()["trips_last_5min"][0]
instance['traffic_last_5min'] = # TODO: Your code goes here.
return instance
```
The `traffic_realtime` table is updated in realtime using Cloud Pub/Sub and Dataflow so, if you run the cell below periodically, you should see the `traffic_last_5min` feature added to the instance and change over time.
```
add_traffic_last_5min(
instance={
"dayofweek": 4,
"hourofday": 13,
"pickup_longitude": -73.99,
"pickup_latitude": 40.758,
"dropoff_latitude": 41.742,
"dropoff_longitude": -73.07,
}
)
```
Finally, we'll use the python api to call predictions on an instance, using the realtime traffic information in our prediction. Just as above, you should notice that our resulting predicitons change with time as our realtime traffic information changes as well.
**Exercise.** Complete the code below to call prediction on an instance incorporating realtime traffic info. You should
- use the function `add_traffic_last_5min` to add the most recent realtime traffic data to the prediction instance
- call prediction on your model for this realtime instance and save the result as a variable called `response`
- parse the json of `response` to print the predicted taxifare cost
Copy the `ENDPOINT_RESOURCENAME` from the deployment in the previous lab to the beginning of the block below.
```
# TODO 2b. Write code to call prediction on instance using realtime traffic
# info. Hint: Look at this sample
# https://github.com/googleapis/python-aiplatform/blob/master/samples/snippets/predict_custom_trained_model_sample.py
# TODO: Copy the `ENDPOINT_RESOURCENAME` from the deployment in the previous
# lab.
ENDPOINT_RESOURCENAME = ""
api_endpoint = f"{REGION}-aiplatform.googleapis.com"
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple
# requests.
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
instance = {
"dayofweek": 4,
"hourofday": 13,
"pickup_longitude": -73.99,
"pickup_latitude": 40.758,
"dropoff_latitude": 41.742,
"dropoff_longitude": -73.07,
}
# The format of each instance should conform to the deployed model's
# prediction input schema.
instance_dict = # TODO: Your code goes here.
instance = json_format.ParseDict(instance, Value())
instances = [instance]
response = # TODO: Your code goes here.
# The predictions are a google.protobuf.Value representation of the model's
# predictions.
print(" prediction:",
# TODO: Your code goes here.
)
```
## Cleanup
In order to avoid ongoing charges, when you are finished with this lab, you can delete your Dataflow job of that job from the [Dataflow section of Cloud console](https://console.cloud.google.com/dataflow).
An endpoint with a model deployed to it incurs ongoing charges, as there must be at least one replica defined (the `min-replica-count` parameter is at least 1). In order to stop incurring charges, you can click on the endpoint on the [Endpoints page of the Cloud Console](https://console.cloud.google.com/vertex-ai/endpoints) and un-deploy your model.
Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
# Build GAN (Generative Adversarial Networks) with PyTorch and SageMaker
### About GAN
Generative Adversarial Network (GAN) i is a generative machine learning model, which is widely used in advertising, games, entertainment, media, pharmaceuticals and other industries. It can be used to create fictional characters and scenes, simulate facial aging, and change image styles, and produce chemical formulas and so on.
GAN was proposed by Ian Goodfellow in 2014, it is a deep neural network architecture consisting of a generative network and a discriminant network. The generation network generates "fake" data and tries to deceive the discrimination network; the discrimination network authenticates the generated data and tries to correctly identify all "fake" data. In the process of training iterations, the two networks continue to evolve and confront until they reach an equilibrium state (reference: Nash equilibrium), the discriminant network can no longer recognize "fake" data, and the training ends.
This example will lead you to build a GAN model leveraging the PyTorch framework, introducing GAN from the perspective of engineering practice, and opening a new and interesting AI/ML experience in generative models.
### Environment setup
Upgrade packages
```
!pip install --upgrade pip sagemaker awscli boto3 numpy ipywidgets
!pip install Pillow==7.1.2
```
Create folders
```
!mkdir -p data src tmp
```
### Download data
There are many public datasets on the Internet, which are very helpful for machine learning engineering and scientific research, such as algorithm study and evaluation. We will use MNIST dataset, which is a handwritten digits dataset, we will use it to train a GAN model, and eventually generate some fake "handwritten" digits.
```
!aws s3 cp --recursive s3://sagemaker-sample-files/datasets/image/MNIST/pytorch/ ./data
```
### Data preparation
PyTorch framework has a torchvision.datasets package, which provides access to a number of datasets, you may use the following commands to read MNIST pre-downloaded dataset from local storage, for later use.
```
from torchvision import datasets
dataroot = './data'
trainset = datasets.MNIST(root=dataroot, train=True, download=False)
testset = datasets.MNIST(root=dataroot, train=False, download=False)
print(trainset)
print(testset)
```
SageMaker SDK will create a default Amazon S3 bucket for you to access various files and data, that you may need in the machine learning engineering lifecycle. We can get the name of this bucket through the default_bucket method of the sagemaker.session.Session class in the SageMaker SDK.
```
from sagemaker.session import Session
sess = Session()
# S3 bucket for saving code and model artifacts.
# Feel free to specify a different bucket here if you wish.
bucket = sess.default_bucket()
prefix = 'byos-pytorch-gan'
# Location to save your custom code in tar.gz format.
s3_custom_code_upload_location = f's3://{bucket}/{prefix}/customcode'
# Location where results of model training are saved.
s3_model_artifacts_location = f's3://{bucket}/{prefix}/artifacts/'
```
The SageMaker SDK provides tools for operating AWS services. For example, the S3Downloader class is used to download objects in S3, and the S3Uploader is used to upload local files to S3. You will upload the dataset files to Amazon S3 for model training. During model training, we do not download data from the Internet to avoid network latency caused by fetching data from the Internet, and at the same time avoiding possible security risks due to direct access to the Internet.
```
import os
from sagemaker.s3 import S3Uploader as s3up
s3_data_location = s3up.upload(os.path.join(dataroot, "MNIST"), f"s3://{bucket}/{prefix}/data/mnist")
```
### Training
DCGAN (Deep Convolutional Generative Adversarial Networks) is a variant of the GAN families. This architecture essentially leverages Deep Convolutional Neural Networks to generate images belonging to a given distribution from noisy data using the Generator-Discriminator framework.
```
%%writefile src/train.py
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
cudnn.benchmark = True
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Generator(nn.Module):
def __init__(self, *, nz, nc, ngf, ngpu=1):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
def save(self, path, *, filename=None, device='cpu'):
# recommended way from http://pytorch.org/docs/master/notes/serialization.html
self.to(device)
if not filename is None:
path = os.path.join(path, filename)
torch.save(self.state_dict(), path)
def load(self, path, *, filename=None):
if not filename is None:
path = os.path.join(path, filename)
with open(path, 'rb') as f:
self.load_state_dict(torch.load(f))
class Discriminator(nn.Module):
def __init__(self, *, nc, ndf, ngpu=1):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
def save(self, path, *, filename=None, device='cpu'):
# recommended way from http://pytorch.org/docs/master/notes/serialization.html
self.to(device)
if not filename is None:
path = os.path.join(path, filename)
torch.save(self.state_dict(), path)
def load(self, path, *, filename=None):
if not filename is None:
path = os.path.join(path, filename)
with open(path, 'rb') as f:
self.load_state_dict(torch.load(f))
class DCGAN(object):
"""
A wrapper class for Generator and Discriminator,
'train_step' method is for single batch training.
"""
fixed_noise = None
criterion = None
device = None
netG = None
netD = None
optimizerG = None
optimizerD = None
nz = None
nc = None
ngf = None
ndf = None
real_cpu = None
def __init__(self, *, batch_size, nz, nc, ngf, ndf, device, weights_init,
learning_rate, betas, real_label, fake_label):
super(DCGAN, self).__init__()
import torch
self.nz = nz
self.nc = nc
self.ngf = ngf
self.ndf = ndf
self.real_label = real_label
self.fake_label = fake_label
self.fixed_noise = torch.randn(batch_size, nz, 1, 1, device=device)
self.criterion = nn.BCELoss()
self.device = device
self.netG = Generator(nz=nz, nc=nc, ngf=ngf).to(device)
# print(netG)
self.netD = Discriminator(nc=nc, ndf=ndf).to(device)
# print(netD)
self.netG.apply(weights_init)
self.netD.apply(weights_init)
# setup optimizer
self.optimizerG = optim.Adam(self.netG.parameters(), lr=learning_rate, betas=betas)
self.optimizerD = optim.Adam(self.netD.parameters(), lr=learning_rate, betas=betas)
def train_step(self, data, *, epoch, epochs):
import torch
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
self.netD.zero_grad()
self.real_cpu = data[0]
real = data[0].to(self.device)
batch_size = real.size(0)
label = torch.full((batch_size,), self.real_label, device=self.device)
output = self.netD(real).view(-1)
errD_real = self.criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
# train with fake
noise = torch.randn(batch_size, self.nz, 1, 1, device=self.device)
fake = self.netG(noise)
label.fill_(self.fake_label)
output = self.netD(fake.detach()).view(-1)
errD_fake = self.criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
self.optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
self.netG.zero_grad()
label.fill_(self.real_label) # fake labels are real for generator cost
output = self.netD(fake).view(-1)
errG = self.criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
self.optimizerG.step()
return errG.item(), errD.item(), D_x, D_G_z1, D_G_z2
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight, 1.0, 0.02)
torch.nn.init.zeros_(m.bias)
def log_batch(epoch, epochs, batch, batches, errD, errG, D_x, D_G_z1, D_G_z2, *, log_interval=10, output_dir):
if batch % log_interval == 0:
logger.info(f"Epoch[{epoch}/{epochs}], Batch[{batch}/{batches}], " +
f"Loss_D: {errD:.4}, Loss_G: {errG:.4}, D(x): {D_x:.4}, D(G(z)): {D_G_z1:.4}/{D_G_z2:.4}")
def get_device(use_cuda):
import torch
device = "cpu"
num_gpus = 0
if torch.cuda.is_available():
if use_cuda:
device = "cuda"
torch.cuda.set_device(0)
num_gpus = torch.cuda.device_count()
else:
logger.debug("WARNING: You have a CUDA device, so you should probably run with --cuda 1")
logger.debug(f"Number of gpus available: {num_gpus}")
return device, num_gpus
def train(dataloader, hps, test_batch_size,
device, model_dir, output_dir, seed, log_interval):
epochs = hps['epochs']
batch_size = hps['batch-size']
nz = hps['nz']
ngf = hps['ngf']
ndf = hps['ndf']
learning_rate = hps['learning-rate']
beta1 = hps['beta1']
dcgan = DCGAN(batch_size=batch_size, nz=nz, nc=1, ngf=ngf, ndf=ndf,
device=device, weights_init=weights_init, learning_rate=learning_rate,
betas=(beta1, 0.999), real_label=1, fake_label=0)
for epoch in range(epochs):
batches = len(dataloader)
for batch, data in enumerate(dataloader, 0):
errG, errD, D_x, D_G_z1, D_G_z2 = dcgan.train_step(data,
epoch=epoch, epochs=epochs)
log_batch(epoch, epochs, batch, batches, errD, errG,
D_x, D_G_z1, D_G_z2, log_interval=log_interval, output_dir=output_dir)
save_model(model_dir, dcgan.netG)
return
def save_model(model_dir, model):
logger.info("Saving the model.")
model.save(model_dir, filename="model.pth")
def load_model(model_dir, device=None):
logger.info("Loading the model.")
if device is None:
device = get_training_device_name(1)
netG.load(model_dir, filename="model.pth", device=device)
return netG
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Example')
parser.add_argument('--batch-size', type=int, default=1000, metavar='N',
help='input batch size (default: 1000)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--seed', type=int, default=None, metavar='S',
help='random seed')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR', None))
parser.add_argument('--cuda', type=int, default=1)
parser.add_argument('--num-gpus', type=int, default=os.environ.get('SM_NUM_GPUS', None))
parser.add_argument('--pin-memory', type=bool, default=os.environ.get('SM_PIN_MEMORY', False))
parser.add_argument('--data-dir', required=False, default=None, help='path to data dir')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--output-dir', default=os.environ.get('SM_OUTPUT_DATA_DIR', None), help='folder to output images and model checkpoints')
parser.add_argument('--hps', default=os.environ.get('SM_HPS', None), help='Hyperparameters')
return parser.parse_known_args()
def get_datasets(*, dataroot='/opt/ml/input/data', classes=None):
dataset = dset.MNIST(root=dataroot,
transform=transforms.Compose([
transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
return dataset
if __name__ == '__main__':
args, unknown = parse_args()
# get training options
hps = json.loads(args.hps)
try:
os.makedirs(args.output_dir)
except OSError:
pass
if args.seed is None:
random_seed = random.randint(1, 10000)
logger.debug(f"Generated Random Seed: {random_seed}")
cudnn.benchmark = True
else:
logger.debug(f"Provided Random Seed: {args.seed}")
random_seed = args.seed
cudnn.deterministic = True
cudnn.benchmark = False
random.seed(random_seed)
torch.manual_seed(random_seed)
pin_memory=args.pin_memory
num_workers = int(args.workers)
device, num_gpus = get_device(args.cuda)
if device == 'cuda':
num_workers = 1
pin_memory = True
if args.data_dir is None:
input_dir = os.environ.get('SM_INPUT_DIR', None)
if input_dir is None and str(args.dataset).lower() != 'fake':
raise ValueError(f"`--data-dir` parameter is required for dataset \"{args.dataset}\"")
dataroot = input_dir + "/data"
else:
dataroot = args.data_dir
dataset = get_datasets(dataroot=dataroot)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
train(dataloader, hps, args.test_batch_size, device, args.model_dir, args.output_dir, args.seed, args.log_interval)
```
Per sagemaker.get_execution_role() method, the notebook can get the role pre-assigned to the notebook instance. This role will be used to obtain training resources, such as downloading training framework images, allocating Amazon EC2 instances, and so on.
```
from sagemaker import get_execution_role
# IAM execution role that gives SageMaker access to resources in your AWS account.
# We can use the SageMaker Python SDK to get the role from our notebook environment.
role = get_execution_role()
```
The hyperparameters, that used in the model training tasks, can be defined in the notebook so that it is separated from the algorithm and training code. The hyperparameters are passed in when the training task is created and dynamically combined with the training task.
```
import json
hps = {
'seed': 0,
'learning-rate': 0.0002,
'epochs': 18,
'pin-memory': 1,
'beta1': 0.5,
'nz': 100,
'ngf': 28,
'ndf': 28,
'batch-size': 128,
'log-interval': 20,
}
str_hps = json.dumps(hps, indent = 4)
print(str_hps)
```
```PyTorch``` class from sagemaker.pytorch package, is an estimator for PyTorch framework, it can be used to create and execute training tasks, as well as to deploy trained models. In the parameter list, ``instance_type`` is used to specify the instance type, such as CPU or GPU instances. The directory containing training script and the model code are specified by ``source_dir``, and the training script file name must be clearly defined by ``entry_point``. These parameters will be passed to the training task along with other parameters, and they determine the environment settings of the training task.
```
from sagemaker.pytorch import PyTorch
estimator = PyTorch(role=role,
entry_point='train.py',
source_dir='./src',
output_path=s3_model_artifacts_location,
code_location=s3_custom_code_upload_location,
instance_count=1,
instance_type='ml.g4dn.2xlarge',
framework_version='1.5.0',
py_version='py3',
hyperparameters=hps,
)
```
Please pay special attention to the ``train_use_spot_instances`` parameter. The value of ``True`` means that you want to use SPOT instances first. Since machine learning training usually requires a large amount of computing resources to run for a long time, leveraging SPOT instances can help you control your cost. The SPOT instances may save cost up to 90% of the on-demand instances, depending on the instance type, region, and time, the actual price might be different.
You have created a PyTorch object, and you can use it to fit pre-uploaded data on Amazon S3. The following command will initiate the training task, and the training data will be imported into the training environment in the form of an input channel named **MNIST**. When the training task starts, the training data was already downloaded from S3 to the local file system of the training instance, and the training script ```train.py``` will load the data from the local disk afterwards.
```
# Start training
estimator.fit({"MNIST": s3_data_location}, wait=False)
```
Depending on the training instance you choose, the training process may last from tens of minutes to several hours. It is recommended to set the ``wait`` parameter to ``False``, this option will detach the notebook from the training task. In scenarios with long training time and many training logs, it can prevent the notebook context from being lost due to network interruption or session timeout. After the notebook detached from the training task, the output will be temporarily invisible. You can execute the following code, and the notebook will obtain and resume the previous training session.
```
%%time
from sagemaker.estimator import Estimator
# Attaching previous training session
training_job_name = estimator.latest_training_job.name
attached_estimator = Estimator.attach(training_job_name)
```
Since the model was designed to leverage the GPU power to accelerate training, it will be much faster than training tasks on CPU instances. For example, the p3.2xlarge instance will take about 15 minutes, while the c5.xlarge instance may take more than 6 hours. The current model does not support distributed and parallel training, so multi-instance and multi-CPU/GPU will not bring extra benefits in training speed boosting.
When the training completes, the trained model will be uploaded to S3. The upload location is specified by the `output_path` parameter provided when creating the `PyTorch` object.
### Model verification
You will download the trained model from Amazon S3 to the local file system of the instance where the notebook is located. The following code will load the model, and then generate a picture with a random number as input, then display picture.
```
from sagemaker.s3 import S3Downloader as s3down
!mkdir -p ./tmp
model_url = attached_estimator.model_data
s3down.download(model_url, './tmp')
!tar -zxf tmp/model.tar.gz -C ./tmp
```
Execute the following instructions to load the trained model, and generate a set of "handwritten" digitals.
```
def generate_fake_handwriting(model, *, num_images, nz, device=None):
import torch
import torchvision.utils as vutils
from io import BytesIO
from PIL import Image
z = torch.randn(num_images, nz, 1, 1, device=device)
fake = model(z)
imgio = BytesIO()
vutils.save_image(fake.detach(), imgio, normalize=True, format="PNG")
img = Image.open(imgio)
return img
def load_model(path, *, model_cls=None, params=None, filename=None, device=None, strict=True):
import os
import torch
model_pt_path = path
if not filename is None:
model_pt_path = os.path.join(path, filename)
if device is None:
device = 'cpu'
if not model_cls is None:
model = model_cls(**params)
model.load_state_dict(torch.load(model_pt_path, map_location=torch.device(device)), strict=strict)
else:
model = torch.jit.load(model_pt_path, map_location=torch.device(device))
model.to(device)
return model
import matplotlib.pyplot as plt
import numpy as np
import torch
from src.train import Generator
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
params = {'nz': hps['nz'], 'nc': 1, 'ngf': hps['ngf']}
model = load_model("./tmp/model.pth", model_cls=Generator, params=params, device=device, strict=False)
img = generate_fake_handwriting(model, num_images=64, nz=hps['nz'], device=device)
plt.imshow(np.asarray(img))
```
### Clean up
Run the following commandline in a terminal, to remove files generated by this notebook from S3 and local storage
```
import os
print(f"aws s3 rm --recursive s3://{bucket}/{prefix}")
print(f"rm -rf {os.path.abspath(dataroot)}")
```
### Conclusion
The PyTorch framework, as one of the most popular deep learning framework, is being widely recognised and applied, has become one of the de facto mainstream frameworks.
Amazon SageMaker is tightly integrated with a variety of AWS services, such as Amazon EC2 instances of various types and sizes, Amazon S3, Amazon ECR, etc., providing an end-to-end, consistent machine learning experience for all framework practitioners. Amazon SageMaker continues to support mainstream machine learning frameworks, including PyTorch. Machine learning algorithms and models developed with PyTorch can be easily transplanted to Amazon SageMaker environment, by using Amazon SageMaker's fully managed Jupyter Notebook, SPOT training instances, Amazon Elastic Container Registry, SageMaker SDK, and so on, the complexity of machine learning engineering and infrastracture operation are simplified, productivity and efficiency are improved, operation and maintenance costs reduced.
DCGAN is a landmark in the field of generative confrontation networks, and it is the cornerstone of many complex generative confrontation networks today. We will explore some of the most recent and interesting variants of GAN in later exmaples.
I believe that through the introduction and engineering practice of this example, it will be helpful for you to understand the principles and engineering methods for GAN in general.
|
github_jupyter
|
# Quantization of Signals
*This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).*
## Characteristic of a Linear Uniform Quantizer
The characteristics of a quantizer depend on the mapping functions $f(\cdot)$, $g(\cdot)$ and the rounding operation $\lfloor \cdot \rfloor$ introduced in the [previous section](introduction.ipynb). A linear quantizer bases on linear mapping functions $f(\cdot)$ and $g(\cdot)$. A uniform quantizer splits the mapped input signal into quantization steps of equal size. Quantizers can be described by their nonlinear in-/output characteristic $x_Q[k] = \mathcal{Q} \{ x[k] \}$, where $\mathcal{Q} \{ \cdot \}$ denotes the quantization process. For linear uniform quantization it is common to differentiate between two characteristic curves, the so called mid-tread and mid-rise. Both are introduced in the following.
### Mid-Tread Characteristic Curve
The in-/output relation of the mid-tread quantizer is given as
\begin{equation}
x_Q[k] = Q \cdot \underbrace{\left\lfloor \frac{x[k]}{Q} + \frac{1}{2} \right\rfloor}_{index}
\end{equation}
where $Q$ denotes the constant quantization step size and $\lfloor \cdot \rfloor$ the [floor function](https://en.wikipedia.org/wiki/Floor_and_ceiling_functions) which maps a real number to the largest integer not greater than its argument. Without restricting $x[k]$ in amplitude, the resulting quantization indexes are [countable infinite](https://en.wikipedia.org/wiki/Countable_set). For a finite number of quantization indexes, the input signal has to be restricted to a minimal/maximal amplitude $x_\text{min} < x[k] < x_\text{max}$ before quantization. The resulting quantization characteristic of a linear uniform mid-tread quantizer is shown below

The term mid-tread is due to the fact that small values $|x[k]| < \frac{Q}{2}$ are mapped to zero.
#### Example - Mid-tread quantization of a sine signal
The quantization of one period of a sine signal $x[k] = A \cdot \sin[\Omega_0\,k]$ by a mid-tread quantizer is simulated. $A$ denotes the amplitude of the signal, $x_\text{min} = -1$ and $x_\text{max} = 1$ are the smallest and largest output values of the quantizer, respectively.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
A = 1.2 # amplitude of signal
Q = 1/10 # quantization stepsize
N = 2000 # number of samples
def uniform_midtread_quantizer(x, Q):
# limiter
x = np.copy(x)
idx = np.where(np.abs(x) >= 1)
x[idx] = np.sign(x[idx])
# linear uniform quantization
xQ = Q * np.floor(x/Q + 1/2)
return xQ
def plot_signals(x, xQ):
e = xQ - x
plt.figure(figsize=(10,6))
plt.plot(x, label=r'signal $x[k]$')
plt.plot(xQ, label=r'quantized signal $x_Q[k]$')
plt.plot(e, label=r'quantization error $e[k]$')
plt.xlabel(r'$k$')
plt.axis([0, N, -1.1*A, 1.1*A])
plt.legend()
plt.grid()
# generate signal
x = A * np.sin(2*np.pi/N * np.arange(N))
# quantize signal
xQ = uniform_midtread_quantizer(x, Q)
# plot signals
plot_signals(x, xQ)
```
**Exercise**
* Change the quantization stepsize `Q` and the amplitude `A` of the signal. Which effect does this have on the quantization error?
Solution: The smaller the quantization step size, the smaller the quantization error is for $|x[k]| < 1$. Note, the quantization error is not bounded for $|x[k]| > 1$ due to the clipping of the signal $x[k]$.
### Mid-Rise Characteristic Curve
The in-/output relation of the mid-rise quantizer is given as
\begin{equation}
x_Q[k] = Q \cdot \Big( \underbrace{\left\lfloor\frac{ x[k] }{Q}\right\rfloor}_{index} + \frac{1}{2} \Big)
\end{equation}
where $\lfloor \cdot \rfloor$ denotes the floor function. The quantization characteristic of a linear uniform mid-rise quantizer is illustrated below

The term mid-rise copes for the fact that $x[k] = 0$ is not mapped to zero. Small positive/negative values around zero are mapped to $\pm \frac{Q}{2}$.
#### Example - Mid-rise quantization of a sine signal
The previous example is now reevaluated using the mid-rise characteristic
```
A = 1.2 # amplitude of signal
Q = 1/10 # quantization stepsize
N = 2000 # number of samples
def uniform_midrise_quantizer(x, Q):
# limiter
x = np.copy(x)
idx = np.where(np.abs(x) >= 1)
x[idx] = np.sign(x[idx])
# linear uniform quantization
xQ = Q * (np.floor(x/Q) + .5)
return xQ
# generate signal
x = A * np.sin(2*np.pi/N * np.arange(N))
# quantize signal
xQ = uniform_midrise_quantizer(x, Q)
# plot signals
plot_signals(x, xQ)
```
**Exercise**
* What are the differences between the mid-tread and the mid-rise characteristic curves for the given example?
Solution: The mid-tread and the mid-rise quantization of the sine signal differ for signal values smaller than half of the quantization interval. Mid-tread has a representation of $x[k] = 0$ while this is not the case for the mid-rise quantization.
**Copyright**
This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
|
github_jupyter
|
```
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector
# 蜜å
¥æ°æ®é
mnist = input_data.read_data_sets(r"C:\Users\zdwxx\Downloads\Compressed\MNIST_data", one_hot=True)
# è¿è¡æ¬¡æ°
max_steps = 550 * 21
# åŸçæ°é
image_num = 3000
# å®ä¹äŒè¯
sess = tf.Session()
# æä»¶è·¯åŸ
DIR = "C:/Tensorflow/"
# 蜜å
¥åŸç
embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]),
trainable=False, name="embedding")
# å®ä¹äžäžªåæ°æŠèŠ
def varible_summaries(var):
with tf.name_scope("summary"):
mean = tf.reduce_mean(var)
tf.summary.scalar("mean", mean) # å¹³ååŒ
with tf.name_scope("stddev"):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev", stddev) # æ åå·®
tf.summary.scalar("max", tf.reduce_max(var)) #æå€§åŒ
tf.summary.scalar("min", tf.reduce_min(var)) # æå°åŒ
tf.summary.histogram("histogram", var) # çŽæ¹åŸ
# åœå空éŽ
with tf.name_scope("input"):
# å®ä¹äž€äžªplaceholder
x = tf.placeholder(tf.float32, [None, 784], name="x-input")
y = tf.placeholder(tf.float32, [None, 10], name="y-input")
# æŸç€ºåŸç
with tf.name_scope("input_reshape"):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image("input", image_shaped_input, 10)
with tf.name_scope("layer"):
#å建äžäžªç®åçç¥ç»çœç»
with tf.name_scope("wights1"):
W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1), name="W1")
varible_summaries(W1)
with tf.name_scope("biases1"):
b1 = tf.Variable(tf.zeros([500]) + 0.1, name="b1")
varible_summaries(b1)
# with tf.name_scope("wx_plus_b1"):
# wx_plus_b1 = tf.matmul(x, W1) + b1
with tf.name_scope("L1"):
L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
with tf.name_scope("wights2"):
W2 = tf.Variable(tf.truncated_normal([500, 10], stddev=0.1), name="W2")
varible_summaries(W2)
with tf.name_scope("biases2"):
b2 = tf.Variable(tf.zeros([10]) + 0.1, name="b2")
varible_summaries(b2)
with tf.name_scope("wx_plus_b2"):
wx_plus_b2 = tf.matmul(L1, W2) + b2
with tf.name_scope("softmax"):
prediction = tf.nn.softmax(wx_plus_b2) # 颿µåŒ
# äºæ¬¡ä»£ä»·åœæ°
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
with tf.name_scope("loss"):
loss = loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
tf.summary.scalar("loss", loss)
# 梯床äžéæ³
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# åå§ååé
init = tf.global_variables_initializer()
sess.run(init)
with tf.name_scope("accuracy"):
# ç»æåæŸåšäžäžªåžå°åå衚äž
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) #argmaxè¿å1ç»ŽåŒ éäžæå€§çåŒæåšçäœçœ®
# æ±åç¡®ç
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#cast蜬æ¢ç±»åïŒTrue->1.0, False->0.0
tf.summary.scalar("accuracy", accuracy)
# 产ç metadataæä»¶
if tf.gfile.Exists(DIR + "projector/projector/metadata.tsv"):
tf.gfile.DeleteRecursively(DIR + "projector/projector/metadata.tsv")
with open(DIR + "projector/projector/metadata.tsv", "w") as f:
lables = sess.run(tf.argmax(mnist.test.labels[:], 1))
for i in range(image_num):
f.write(str(lables[i]) + "\n")
# åå¹¶ææçsummary
merged = tf.summary.merge_all()
projector_writer = tf.summary.FileWriter(DIR + "projector/projector", sess.graph)
saver = tf.train.Saver()
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = embedding.name
embed.metadata_path = DIR + "projector/projector/metadata.tsv"
embed.sprite.image_path = DIR + "projector/data/mnist_10k_sprite.png"
embed.sprite.single_image_dim.extend([28, 28])
projector.visualize_embeddings(projector_writer, config)
for i in range(max_steps):
batch_xs, batch_ys = mnist.train.next_batch(100) #类䌌äºreadïŒäžæ¬¡è¯»å100åŒ åŸç
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary = sess.run([train_step, merged], feed_dict={x : batch_xs, y : batch_ys})[1]
projector_writer.add_run_metadata(run_metadata, "step%03d" % i)
projector_writer.add_summary(summary, i)
if i % 550 == 0:
acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels})
print("第", i, "äžªåšæ", "åç¡®çæ¯", acc)
saver.save(sess, DIR + "projector/projector/a_model.ckpt")
projector_writer.close()
sess.close()
```
|
github_jupyter
|
# Diseño de software para cómputo cientÃfico
----
## Unidad 5: Integración con lenguajes de alto nivel con bajo nivel.
## Agenda de la Unidad 5
- JIT (Numba)
- Cython.
- Integración de Python con FORTRAN.
- **Integración de Python con C.**
## Recapitulando
- Escribimos el código Python.
- Pasamos todo a numpy.
- Hicimos profile.
- Paralelisamos (joblib/dask).
- Hicimos profile.
- Usamos Numba.
- Hicimos profile.
- Si podemos elegir el lenguaje: Cython
- Si no podemos elegir el lenguaje y vamos a hacer cómputo numérico FORTRAN.
- Si no podemos elegir vamos con C/C++/Rust/lo-que-sea.
## Ctypes
- Permite usar bibliotecas existentes en otros idiomas escribiendo envoltorios **simples** en Python.
- Viene con Python.
- Puede ser un poco **Dificil** de usar.
- Es una herramienta ideal para comper Python
### Ejemplo para Ctypes 1/2
El código C que usaremos en este tutorial está diseñado para ser lo más simple posible mientras demuestra los conceptos que estamos cubriendo. Es más un "ejemplo de juguete" y no pretende ser útil por sà solo. Estas son las funciones que utilizaremos:
```c
int simple_function(void) {
static int counter = 0;
counter++;
return counter;
}
```
- `simple_function` simplemente devuelve números de conteo.
- Cada vez que se llama en incrementos de contador y devuelve ese valor.
### Ejemplo para Ctypes 2/2
```c
void add_one_to_string(char *input) {
int ii = 0;
for (; ii < strlen(input); ii++) {
input[ii]++;
}
}
```
- Agrega uno a cada carácter en una matriz de caracteres que se pasa.
- Usaremos esto para hablar sobre las cadenas inmutables de Python y cómo solucionarlas cuando sea necesario.
Estos ejemplos estan guardadoe en `clibc1.c`, y se compilan con:
```bash
gcc -c -Wall -Werror -fpic clib1.c # crea el código objeto
gcc -shared -o libclib1.so clib1.o # crea el .so
```
## Llamando a una función simple
```
import ctypes
# Load the shared library into c types.
libc = ctypes.CDLL("ctypes/libclib1.so")
counter = libc.simple_function()
counter
```
## Cadenas inmutables en Python con Ctypes
```
print("Calling C function which tries to modify Python string")
original_string = "starting string"
print("Before:", original_string)
# This call does not change value, even though it tries!
libc.add_one_to_string(original_string)
print("After: ", original_string)
```
- Como notarán esto **no anda**.
- El `original_string` no está disponible en la función C en absoluto al hacer esto.
- La función C modificó alguna otra memoria, no la cadena.
- La función C no solo no hace lo que desea, sino que también modifica la memoria que no deberÃa, lo que genera posibles problemas de corrupción de memoria.
- Si queremos que la función C tenga acceso a la cadena, necesitamos hacer un poco de trabajo de serialización.
## Cadenas inmutables en Python con Ctypes
- Necesitamos convertir la cadena original a bytes usando `str.encode,` y luego pasar esto al constructor para un `ctypes.string_buffer`.
- Los String_buffers son mutables y se pasan a C como `char *`.
```
# The ctypes string buffer IS mutable, however.
print("Calling C function with mutable buffer this time")
# Need to encode the original to get bytes for string_buffer
mutable_string = ctypes.create_string_buffer(str.encode(original_string))
print("Before:", mutable_string.value)
libc.add_one_to_string(mutable_string) # Works!
print("After: ", mutable_string.value)
```
## Especificación de firmas de funciones en ctypes
- Como vimos anteriormente, podemos especificar el tipo de retorno si es necesario.
- Podemos hacer una especificación similar de los parámetros de la función.
- Además, proporcionar una firma de función le permite a Python verificar que está pasando los parámetros correctos cuando llama a una función C, de lo contrario, pueden suceder cosas **malas**.
Para especificar el tipo de retorno de una función, hayque obtener el bjeto de la función y establecer la propiedad `restype`:
```python
libc.func.restype = ctypes.POINTER(ctypes.c_char)
```
y para especificar las firmas
```python
libc.func.argtypes = [ctypes.POINTER(ctypes.c_char), ]
```
## Escribir una interfaz Python en C
Vamos a "envolver" función de biblioteca C `fputs()`:
```C
int fputs (const char *, FILE *)
```
- Esta función toma dos argumentos:
1. `const char *` es una matriz de caracteres.
2. `FILE *` es un puntero a un stream de archivo.
- `fputs()` escribe la matriz de caracteres en el archivo especificado y devuelve un valor no negativo, si la operación es exitosa, este valor indicará el número de bytes escritos en el archivo.
- Si hay un error, entonces devuelve `EOF`.
## Escribir la función C para `fputs()`
Este es un programa básico de C que usa fputs() para escribir una cadena en una secuencia de archivos:
```C
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main() {
FILE *fp = fopen("write.txt", "w");
fputs("Real Python!", fp);
fclose(fp);
return 0;
}
```
## Envolviendo `fputs()`
El siguiente bloque de código muestra la versión final envuelta de su código C:
```C
#include <Python.h>
static PyObject *method_fputs(PyObject *self, PyObject *args) {
char *str, *filename = NULL;
int bytes_copied = -1;
/* Parse arguments */
if(!PyArg_ParseTuple(args, "ss", &str, &filename)) {
return NULL;
}
FILE *fp = fopen(filename, "w");
bytes_copied = fputs(str, fp);
fclose(fp);
return PyLong_FromLong(bytes_copied);
}
```
Este fragmento de código hace referencia a tres estructuras de objetos que se definen en `Python.h`:
`PyObject`, `PyArg_ParseTuple()` y `PyLong_FromLong()`
## `PyObject`
- `PyObject` es una estructura de objetos que utiliza para definir tipos de objetos para Python.
- Todos los demás tipos de objetos Python son extensiones de este tipo.
- Establecer el tipo de retorno de la función anterior como `PyObject` define los campos comunes que requiere Python para reconocer esto como un tipo válido.
Eche otro vistazo a las primeras lÃneas de su código C:
```C
static PyObject *method_fputs(PyObject *self, PyObject *args) {
char *str, *filename = NULL;
int bytes_copied = -1;
...
```
En la lÃnea 2, declara los tipos de argumento que desea recibir de su código Python:
- `char *str` es la cadena que desea escribir en la secuencia del archivo.
- `char *filename` es el nombre del archivo para escribir.
## `PyArg_ParseTuple()`
`PyArg_ParseTuple()` transforma los argumentos que recibirá de su programa Python en variables locales:
```C
static PyObject *method_fputs(PyObject *self, PyObject *args) {
char *str, *filename = NULL;
int bytes_copied = -1;
if(!PyArg_ParseTuple(args, "ss", &str, &filename)) {
return NULL;
}
...
```
`PyArg_ParseTuple()` toma los siguientes argumentos:
- `args` de tipo `PyObject`.
- `"ss"` especifica el tipo de datos de los argumentos a analizar.
- `&str` y `&filename` son punteros a variables locales a las que se asignarán los valores analizados.
`PyArg_ParseTuple()` retorna `false` frente a un error.
## `fputs()` y `PyLongFromLon()`
```C
static PyObject *method_fputs(PyObject *self, PyObject *args) {
char *str, *filename = NULL;
int bytes_copied = -1;
if(!PyArg_ParseTuple(args, "ss", &str, &filename)) {
return NULL;
}
FILE *fp = fopen(filename, "w");
bytes_copied = fputs(str, fp);
fclose(fp);
return PyLong_FromLong(bytes_copied);
}
```
- Las llamadas a `fputs()` fueron explicadas anteriormente, la única diferencia es que las variables utilizadas son las que provienen de `*args` y almacenadas localmente.
- Finalmente `PyLong_FromLong()` retorna un `PyLongObject`, que representa objecto entero en Python.
## Módulo de extensión
Ya se escribió el código que constituye la funcionalidad principal de su módulo de extensión Python C.
- Sin embargo queda escribir las definiciones de su módulo y los métodos que contiene, de esta manera:
```C
static PyMethodDef FputsMethods[] = {
{"fputs", method_fputs, METH_VARARGS, "Python interface for fputs C library function"},
{NULL, NULL, 0, NULL}
};
static struct PyModuleDef fputsmodule = {
PyModuleDef_HEAD_INIT,
"fputs",
"Python interface for the fputs C library function",
-1,
FputsMethods
};
```
## `PyMethodDef`
- `PyMethodDef` informa al intérprete de Python sobre ello los métodos definidos en el módulo
- Idealmente, habrá más de un método en la. Es por eso que necesita definir una matriz de estructuras:
```C
static PyMethodDef FputsMethods[] = {
{"fputs", method_fputs, METH_VARARGS, "Python interface for fputs C library function"},
{NULL, NULL, 0, NULL}
};
```
Cada miembro individual de la estructura contiene la siguiente información:
- `fputs` es el nombre que el usuario escribirÃa para invocar esta función en particular desde Python.
- `method_fputs` es el nombre de la función C a invocar.
- `METH_VARARGS` indica que la función aceptará dos argumentos de tipo
`PyObject *`:
- `self` es el objeto del módulo.
- `args` es una tupla que contiene los argumentos de la función (descomprimibles `PyArg_ParseTuple()`.
- La cadena final es un valor para representar el docstring.
### `PyModuleDef`
Define un módulo Python (un archivo `.py`) en C.
```C
static struct PyModuleDef fputsmodule = {
PyModuleDef_HEAD_INIT, "fputs",
"Interface for the fputs C function", -1, FputsMethods};```
Hay un total de 9 miembros en esta estructura, pero el bloque de código anterior, inicializa los siguientes cinco:
- `PyModuleDef_HEAD_INIT` es la clase "base" del módulo (normalmente esto siempre es igual).
- `"fputs"` nombre del módulo.
- La cadena es la documentación del módulo.
- `-1` cantidad de memoria necesaria para almacenar el estado del programa. Es útil cuando su módulo se utiliza en múltiples subinterpretadores, y puede tener los siguientes valores:
- Un valor negativo indica que este módulo no tiene soporte para subinterpretadores.
- Un valor no negativo permite la reinicialización del módulo. También especifica el requisito de memoria que se asignará en cada sesión de subinterpretador.
- `FputsMethods` es tabla de métodos.
## Inicializando el módulo
- Ahora que ha definido la extensión Python C y las estructuras de métodos, es hora de ponerlas en uso.
- Cuando un programa Python importa su módulo por primera vez, llamará a `PyInit_fputs()`:
```C
PyMODINIT_FUNC PyInit_fputs(void) {
return PyModule_Create(&fputsmodule);
}
```
`PyMODINIT_FUNC hace 3 cosas implÃcitamente`
- Establece implÃcitamente el tipo de retorno de la función como PyObject *.
- Declara cualquier enlace especial.
- Declara la función como "C" externa. En caso de que esté usando C++, le dice al compilador de C ++ que no haga cambios de nombre en los sÃmbolos.
`PyModule_Create()` devolverá un nuevo objeto de módulo de tipo `PyObject *`.
## Poniendo todo junto - Qué pasa cuando importamos el módulo?

## Poniendo todo junto - Qué retorna cuando se importa el módulo?

## Poniendo todo junto - Qué sucede cuando llamamos a `fputs.fputs()`

## Empaquetado con `distutils`
```python
from distutils.core import setup, Extension
def main():
setup(name="fputs",
ext_modules=[Extension("fputs", ["fputsmodule.c"])],
...)
if __name__ == "__main__":
main()
```
Para instalar:
```bash
$ python3 setup.py install
```
Para compilar
```bash
$ python setup.py build_ext --inplace
```
Si se quiere especificar el compilador
```bash
$ CC=gcc python3 setup.py install
```
## Usando la extensión
```
import sys; sys.path.insert(0, "./c_extensions")
import fputs
fputs?
fputs.fputs?
fputs.fputs("Hola mundo!", "salida.txt")
with open("salida.txt") as fp:
print(fp.read())
```
## Raising Exceptions
- Si desea lanzar excepciones de Python desde C, puede usar la API de Python para hacerlo.
- Algunas de las funciones proporcionadas por la API de Python para generar excepciones son las siguientes:
- `PyErr_SetString(PyObject *type, const char *message)`
- `PyErr_Format(PyObject *type, const char *format)`
- `PyErr_SetObject(PyObject *type, PyObject *value)`
Todas las exceptions de Python estan definidas en las API.
## Raising Exceptions
```C
static PyObject *method_fputs(PyObject *self, PyObject *args) {
char *str, *filename = NULL;
int bytes_copied = -1;
/* Parse arguments */
if(!PyArg_ParseTuple(args, "ss", &str, &fd)) {
return NULL;
}
if (strlen(str) <= 0) {
PyErr_SetString(PyExc_ValueError, "String length must be greater than 0");
return NULL;
}
fp = fopen(filename, "w");
bytes_copied = fputs(str, fp);
fclose(fp);
return PyLong_FromLong(bytes_copied);
}
```
## Raising Custom Exceptions
Para crear y usar excepción personalizada, se debe agregarla instancia de módulo:
```C
static PyObject *StringTooShortError = NULL;
PyMODINIT_FUNC PyInit_fputs(void) {
/* Assign module value */
PyObject *module = PyModule_Create(&fputsmodule);
/* Initialize new exception object */
StringTooShortError = PyErr_NewException("fputs.StringTooShortError", NULL, NULL);
/* Add exception object to your module */
PyModule_AddObject(module, "StringTooShortError", StringTooShortError);
return module;
}
static PyObject *method_fputs(PyObject *self, PyObject *args) {
...
if (strlen(str) <=0 10) {
/* Passing custom exception */
PyErr_SetString(StringTooShortError, "String length must be greater than 0");
return NULL;}
...
}
```
## Referencias
- https://docs.python.org/3.8/library/ctypes.html
- https://dbader.org/blog/python-ctypes-tutorial
- https://realpython.com/build-python-c-extension-module/
|
github_jupyter
|
# An Introduction to Python using Jupyter Notebooks
<a id='toc'></a>
## Table of Contents:
### Introduction
* [Python programs are plain text files](#python-programs)
* [Use the Jupyter Notebook for editing and running Python](#jn-editing-python)
* [How are Jupyter Notebooks stored](#how-its-stored)
* [What you need to know](#need-to-know)
* [The Notebook has Control and Edit modes](#notebook-modes)
* [Use the keyboard and mouse to select and edit cells](#keyboard-mouse)
* [Practice: Run your first Jupyter Notebook cells](#prac-jupyter)
### Using Markdown
* [The Notebook will turn Markdown into pretty-printed documentation](#markdown)
* [How to use Markdown](#how-to-markdown)
* [Markdown Exercises](#md-exercises)
* [Markdown Exercise Soultions](#md-solutions)
### Introduction to Python 1: Data
* [Intro to Python 1: Prerequisites](#python-1)
* [Programming with Python](#python-introduction)
* [What is Python and why would I use it?](#python-introduction)
* [Special Characters](#python-sp-char)
* [Variables](#variables)
* [Practice](#prac-variable)
* [Variables can be used in calculations](#variable-calc)
* [Data Types](#data-types)
* [Practice with Strings](#prac-strings)
* [Practice with Numerics](#numbers)
* [Practice with Booleans](#booleans)
* [Python "Type" function](#py-type)
* [Lists](#py-lists)
* [Tuples](#py-tuples)
* [Differences between lists and tuples](#lists-vs-tuples)
* [Sets](#py-sets)
* [Dictionaries](#py-dictionaries)
* [Python Statements](#py-statements)
* [Conditionals](#py-conditionals)
* [Loops](#py-loops)
* [For Loops](#for-loops)
* [While Loops](#while-loops)
* [Pandas: Working with Existing Data](#pandas)
* [Pandas: Importing Data](#read-data)
* [Pandas: Manipulating Data](#manipulate-data)
* [Pandas: Writing Data](#write-data)
* [Pandas: Working with more than file](#all-countries)
* [Pandas: Slicing and selecting values](#slicing)
* Python I Exercises
* [Problem 5: Assigning variables and printing values](#prob-variable)
* [Problem 6: Print your first and last name](#py-concatenate)
* [Problem 7: What variable type do I have?](#py-data-type)
* [Problem 8: Creating and Working with Lists](#prob-lists)
* [Problem 9: Creating and Accessing Dictionaries](#prob-dictionaries)
* [Problem 10: Writing Conditional If/Else Statements](#prob-if-else)
* [Problem 11: Reverse the string using a for loop](#prob-str-reverse-loop)
* [Problem 12: Looping through Dictionaries](#prob-dict-loop)
* [Problem 13: Checking assumptions about your data](#prob-unique)
* [Problem 14: Slice and save summary statistics](#summary-stats)
* [Python I Exercise Soultions](#py1-solutions)
### Introduction to Python 2: A Tool for Programming
* [Intro to Python 2: Prerequisites](#python-2)
* [Setup if you are joining in for Python II](#python-2-setup)
* [Functions:](#functions)
* [Why Use Functions?](#why-functions)
* [Let's revist the reverse string and turn it into a function](#str-reverse-func)
* [Let's look at a real world example of where constants could be used in functions](#temp-func)
* [Scripting](#scripting)
* Python II Exercises
* [Python II Exercise Soultions](#py2-solutions)
### Common Errors
* [Common Errors](#errors)
<a id='python-programs'></a>
### Python programs are plain text files
[Table of Contents](#toc)
* They have the `.py` extension to let everyone (including the operating system)
know it is a Python program.
* This is convention, not a requirement.
* It's common to write them using a text editor but we are going to use a [Jupyter Notebook](http://jupyter.org/).
* There is a bit of extra setup, but it is well worth it because Jupyter Notebooks provide code completion
and other helpful features such as markdown integration. This means you can take notes in this notebook while we are working throughout the session.
* There are some pitfalls that can also cause confusion if we are unaware of them. While code generally runs from top to bottom, a Jupyter Notebook allows you to run items out of sequence. The order of code blocks running order will appear as a number to the left of the code text field.
* Notebook files have the extension `.ipynb` to distinguish them from plain-text Python programs.
<a id='jn-editing-python'></a>
### Use the Jupyter Notebook for editing and running Python
[Table of Contents](#toc)
* The [Anaconda package manager](http://www.anaconda.com) is an automated way to install the Jupyter notebook.
* See [the setup instructions]({{ site.github.url }}/setup/) for Anaconda installation
instructions.
* It also installs all the extra libraries it needs to run.
* Once you have installed Python and the Jupyter Notebook requirements, open a shell and type:
> `jupyter notebook`
* This will start a Jupyter Notebook server and open your default web browser.
* The server runs locally on your machine only and does not use an internet connection.
* The server sends messages to your browser.
* The server does the work and the web browser renders the notebook.
* You can type code into the browser and see the result when the web page talks to the server.
* This has several advantages:
- You can easily type, edit, and copy and paste blocks of code.
- Tab completion allows you to easily access the names of things you are
using and learn more about them.
- It allows you to annotate your code with links, different sized text, bullets,
etc to make it more accessible to you and your collaborators.
- It allows you to display figures next to the code that produces them to
tell a complete story of the analysis.
- **Note: This will modify and delete files on your local machine.**
* The notebook is stored as JSON but can be saved as a .py file if you would
like to run it from the bash shell or a python interpreter.
* Just like a webpage, the saved notebook looks different to what you see when
it gets rendered by your browser.
<a id='how-its-stored'></a>
### How are Jupyter Notebooks Stored
[Table of Contents](#toc)
* The notebook file is stored in a format called JSON.
* Just like a webpage, what's saved looks different from what you see in your browser.
* But this format allows Jupyter to mix software (in several languages) with documentation and graphics, all in one file.
<a id='need-to-know'></a>
### What you need to know for today's lesson
[Table of Contents](#toc)
**Jupyter Notebook options when running locally:**

**Jupyter Notebook options when running in Binder:**

* Commands are only run when you tell them to run. Some lessons require you to run their code in order.
* The File menu has an option called "Revert to Checkpoint". Use that to reset your file in case you delete something on accident.
* The Kernel menu has an options to restart the interpreter and clear the output.
* The Run button will send the code in the selected cell to the interpreter.
* The command pallate function will show you and let you set hotkeys.
* Saving to browser storage is the button with a cloud and downward facing arrow. Click on this button frequently
to save progress as we go.
* Restoring from browser storage is the button with a cloud and upward facing arrow. Click on this button if you
are disconnected or Binder quits working after you have refreshed the page. This will load your previously save work.
<a id='notebook-modes'></a>
### The Notebook has Control and Edit modes.
[Table of Contents](#toc)
* Open a new notebook from the dropdown menu in the top right corner of the file browser page.
* Each notebook contains one or more cells of various types.
> ## Code vs. Markdown
>
> We often use the term "code" to mean "the source code of software written in a language such as Python".
> A "code cell" in a Jupyter Notebook contains software code or that which is for the computer to read.
> A "markdown cell" is one that contains ordinary prose written for human beings to read.
* If you press `esc` and `return` keys alternately, the outer border of your code cell will change from blue to green.
* The difference in color can be subtle, but indicate different modes of you notebook.
* <span style='color:blue'>Blue</span> is the command mode while <span style='color:green'>Green</span> is the
edit mode.
* If you use the "esc" key to make the surrounding box blue (enter into command mode) and then press the "H" key, a
list of all the shortcut keys will appear.
* When in command mode (esc/blue),
* The `B key` will make a new cell below the currently selected cell.
* The `A key` will make one above.
* The `X key` will delete the current cell.
* There are lots of shortcuts you can try out and most actions can be done with the menus at the top of the page if you forget the shortcuts.
* If you remember the `esc` and `H` shortcuts, you will be able to find all the tools you need to work in a notebook.
<a id='keyboard-mouse'></a>
### Use the keyboard and mouse to select and edit cells.
[Table of Contents](#toc)
* Pressing the `return key turns the surrounding box green to signal edit mode and allows you type in the cell.
* Because we want to be able to write many lines of code in a single cell, pressing the `return` key when the
border is green moves the cursor to the next line in the cell just like in a text editor.
* We need some other way to tell the Notebook we want to run what's in the cell.
* Pressing the `shift` and the `return` keys together will execute the contents of the cell.
* Notice that the `return` and `shift` keys on the right of the keyboard are right next to each other.
<a id='prac-jupyter'></a>
### Practice: Running Jupyter Notebook Cell
[Table of Contents](#toc)
```
# Find the shortcut in the command pallate and run this cell.
message = "run me first"
```
If you ran the above cell correctly, there should be a number **1** inside the square brackets to the left of the cell. **Note:** the number will increase everytime you run the cell.
```
# Run this cell and see what the output is.
print(message)
```
**If the output beneath the cell looks like this:**
```python
run me first
```
Then you have run the cells in the correct order and received the expected output. Why did we get this output?
**If the output beneath the cell looks like this:**
```python
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-a4525a899574> in <module>
1 # Run this cell and see what the output is.
----> 2 print(message)
NameError: name 'message' is not defined
```
Then you have received an error. Read the error message to see what went wrong. Here we have a `NameError`because the computer does not know what the variable `message` is. We need to go back to the first code cell and run it correctly first to define the variable `message`. Then we should be able to run the second code cell and receive the first output (prints the string we assigned to the variable `message`).
**Getting Error Messages**:
Error messages are commonplace for anyone writing code. You should expect to get them frequently and learn how to
interpret them as best as possible. Some languages give more descriptive error messages than others, but in both
cases you are likely to find the answer with a quick Google search.
## Using Markdown
<a id='markdown'></a>
### The Notebook will turn Markdown into pretty-printed documentation.
[Table of Contents](#toc)
* Notebooks can also render [Markdown][markdown].
* A simple plain-text format for writing lists, links and other things that might go into a web page.
* Equivalently, a subset of HTML that looks like what you would send in an old-fashioned email.
* Turn the current cell into a Markdown cell by entering the command mode (esc/blue) and press the `M key`.
* `In [ ]:` will disappear to show it is no longer a code cell and you will be able to write in Markdown.
* Turn the current cell into a Code cell by entering the command mode (esc/blue) and press the `Y key`.
<a id='how-to-markdown'></a>
### How to use Markdown
[Table of Contents](#toc)
<div class="row">
<div class="col-md-6" markdown="1">
**The asterisk is a special character in markdown. It will create a bulleted list.**
```markdown
Markdown syntax to produce output below.
* Use asterisks
* to create
* bullet lists.
```
* Use asterisks
* to create
* bulleted lists.
**But what happens when I want to use asterisk in my text. We can use another special, the back slash `\`, also known as
an escape charatcer. Place the back slash before any markdown special character without a space to use the special character in your text.**
```markdown
Markdown syntax to produce output below.
\* Use asterisks
\* to create
\* bullet lists.
```
\* Use asterisks
\* to create
\* bullet lists.
Note: Escape characters can change depending on the language you are writing in.
**Use can use numbers to create a numbered list:**
```markdown
Markdown syntax to produce numbered lists.
1. Use numbers
1. to create
1. numbered lists.
```
1. Use numbers
1. to create
1. numbered lists.
Note: That we did not have to type numbers in order but markdown still converted correctly in output. This is nice
because it saves us time when we modify or edit lists later because we do not have to renumber the entire list.
**Using differnt Headings to keep consistency through document:**
```markdown
Markdown syntax to produce headings.
# A Level-1 Heading
## A Level-2 Heading
### A Level-3 Heading
```
Print version of the three lines of markdown code from above.
# A Level-1 Heading
## A Level-2 Heading
### A Level-3 Heading
**Line breaks don't matter. But blank lines create new paragraphs.**
```markdown
**Markdown syntax:**
Line breaks
do not matter. _(accomplished by pressing the return key once)_
Sometimes though we want to include a line break without starting a new paragraph. We can accomplish this by including two spaces at the end of the line.
Here is the first line.
The second line is on the second line but in same paragraph (no blank line).
```
**Print version of markdown code from above:**
Line breaks
don't matter. _(accomplished by pressing the return key once)_
Sometimes though we want to include a line break without starting a new paragraph. We can accomplish this by including two spaces at the end of the line.
Here is the first line.
The second line is on the second line but in same paragraph (no blank line).
**Creating links in markdown:**
The information inside the `[...]` is what the user will see and the information inside the `(...)` is the pointer or url that the link will take the user to.
```markdown
**Markdown Syntax:**
[Create links](http://software-carpentry.org) with the following syntax `[...](...)`.
Or use [named links][data_carpentry].
_Notice the line below only defines the link and is not in printed output. Double click on the cell below this one if you don't believe me._
[data_carpentry]: http://datacarpentry.org
```
**Output of markdown syntax:**
[Create links](http://software-carpentry.org) with `[...](...)`.
Or use [named links][data_carpentry].
[data_carpentry]: http://datacarpentry.org
<a id='md-exercises'></a>
## Markdown Exercises
[Table of Contents](#toc)
### Creating Lists in Markdown
<a id='md-exercises-p01'></a>
**Problem 1: Creating Lists** Create a nested list in a Markdown cell in a notebook that looks like this:
1. Get funding.
1. Do work.
* Design experiment.
* Collect data.
* Analyze.
1. Write up.
1. Publish.
**Hint:**_Double click this cell to see the answer._
[Solution](#md-solutions-p01)
<a id='md-exercises-p02'></a>
### Math anyone?
**Problem 2: Math in python** What is displayed when a Python cell in a notebook that contains several calculations is executed? For example, what happens when this cell is executed?
```
7 * 3
```
What is displayed when a Python cell in a notebook that contains several calculations is executed? For example, what happens when this cell is executed?
```
7 * 3
2 + 1
6 * 7 + 12
```
[Solution](#md-solutions-p02)
<a id='md-exercises-p03'></a>
**Problem 3: Math in markdown** Change an Existing Cell from Code to Markdown
What happens if you write some Python in a code cell and then you switch it to a Markdown cell? For example, put the following in a code cell.
1. Run the cell below with `shift + return` to be sure that it works as a code cell. _Hint: it should give you the
same result as **Problem 2**_.
1. Select the cell below and use `escape + M` to switch the cell to Markdown and run it again with `shift + return`. What happened and how might this be useful?
```
7 * 3
2 + 1
x = 6 * 7 + 12
print(x)
```
Print statements can help us find errors or unexpected results from our code. They allow us to check our assumptions.
Does the computer have stored what we think it does?
This could also be useful if you wanted to show what the code generating your document looks like. Think code reviews,
colleagues, advisors, etc.
[Solution](#md-solutions-p03)
<a id='md-exercises-p04'></a>
**Problem 4:** Equations
Standard Markdown (such as weâre using for these notes) wonât render equations, but the Notebook will.
`$\Sigma_{i=1}^{N} 2^{-i} \approx 1$`
Think about the following questions:
1. What will it display?
1. What do you think the underscore `_` does?
1. What do you think the circumflex `^` does?
1. What do you think the dollar sign `$` does?
Change the Code cell below containing the equation to a Markdown cell and run it.
```
$\Sigma_{i=1}^{N} 2^{-i} \approx 1$
```
**Note:** If you received a <span style='color:red'> SyntaxError</span>, then you need to change the cell to a Markdown
cell and rerun.
[Solution](#md-solutions-p04)
<a id='md-solutions'></a>
## Markdown Exercise Solutions
[Table of Contents](#toc)
<a id='md-solutions-p01'></a>
### Problem 1: Creating Lists
This challenge integrates both the numbered list and bullet list. Note that the bullet list is tabbed over to create the nesting necesary for the list.
```markdown
**Type the following in your Markdown cell:**
1. Get funding.
1. Do work.
* Design experiment.
* Collect data.
* Analyze.
1. Write up.
1. Publish.
```
[Back to Problem](#md-exercises-p01)
<a id='md-solutions-p02'></a>
### Problem 2: Math in python
The output of running the code cell is 54 because 6 multiplied by 7 is 42 and 42 plus 12 equals 54. This equation was stored as a variable called `x` and the last line executed was `print(x)`, which simply prints out the value of variable `x` at the current time. However, it still did all the other mathematical equations `7*3` and `2+1`, but it did not print them out because we did not ask the computer to do so.
[Back to Problem](#md-exercises-p02)
<a id='md-solutions-p03'></a>
### Problem 3: Math in markdown
In step 1, The output of running the code cell is 54 because 6 multiplied by 7 is 42 and 42 plus 12 equals 54. This
equation was stored as a variable called `x` and the last line executed was `print(x)`, which simply prints out the
value of variable `x` at the current time. However, it still did all the other mathematical equations `7*3` and
`2+1`, but it did not print them out because we did not store the value and ask the computer to print them.
The Python code gets treated like markdown text. The lines appear as if they are part of one contiguous paragraph.
This could be useful to temporarily turn on and off cells in notebooks that get used for multiple purposes. It is
also useful when you want to show the code you have written rather than the output of the code execution.
```markdown
7*3
2+1
x = 6 * 7 + 12
print(x)
```
[Back to Problem](#md-exercises-p03)
<a id='md-solutions-p04'></a>
### Problem 4: Equations
`$\Sigma_{i=1}^{N} 2^{-i} \approx 1$`
$\Sigma_{i=1}^{N} 2^{-i} \approx 1$
The notebook shows the equation as it would be rendered from latex equation syntax. The dollar sign,`$`, is used to tell markdown that the text in between is a latex equation. If you are not familiar with latex, the underscore, `_`, is used for subscripts and the circumflex, `^`, is used for superscripts. A pair of curly braces, `{` and `}`, is used to group text together so that the statement `i=1` becomes the the subscript and `N` becomes the superscript. Similarly, `-i` is in curly braces to make the whole statement the superscript for `2`. `\sum` and `\approx` are latex commands for âsum overâ and âapproximateâ symbols.
[anaconda]: https://docs.continuum.io/anaconda/install
[markdown]: https://en.wikipedia.org/wiki/Markdown
**A common error is to forgot to run the cell as markdown.** The python interpreter does not know what to do with the \$. Syntax errors generally mean that the user has entered something incorrectly (check for typos before assuming the line of code is wrong altogether.
```markdown
File "<ipython-input-1-a80a20b3c603>", line 1
$\Sigma_{i=1}^{N} 2^{-i} \approx 1$
^
SyntaxError: invalid syntax
```
[Back to Problem](#md-exercises-p04)
<a id='python-1'></a>
# Intro to Python I: Data
[Table of Contents](#toc)
**Prerequisites:** None
This workshop will help researchers with no prior programming experience learn how to utilize Python to analyze research data. You will learn how to open data files in Python, complete basic data manipulation tasks and save your work without compromising original data. Oftentimes, researchers find themselves needing to do the same task with different data and you will gain basic experience on how Python can help you make more efficient use of your time.
**Learning Objectives:**
1. Clean/manipulate data
1. Automate repetitive tasks
**Learning Outcomes:** you will be able toâŠ
1. read data into Pandas dataframe
1. use Pandas to manipulate data
1. save work to a datafile useable in other programs needed by researcher
1. write if/else statements
1. build for and while loops
<a id='python-introduction'></a>
## Programming with Python
[Table of Contents](#toc)
### What is Python and why would I use it?
A programming language is a way of writing commands so that an interpreter or compiler can turn them into machine
instructions. Python is just one of many different programming languages.
Even if you are not using Python in your work, you can use Python to learn the fundamentals of programming that will apply across languages.
**We like using Python in workshops for lots of reasons:**
* It is widely used in science
* It's easy to read and write
* There is a huge supporting community - lots of ways to learn and get help
* This Jupyter Notebook. Not a lot of languages have this kind of thing (name comes from Julia, Python, and R).
<a id='python-sp-char'></a>
### Special Characters
[Table of Contents](#toc)
We have already worked with special characters in markdown. Similarly, python uses certain special characters as part of its syntax. **Note:** special characters are not consistent across languages so make sure you familiarize yourself with the special characters in the languages in which you write code.
**Python Special Characters:**
* `[` : left `square bracket`
* `]` : right `square bracket`
* `(` : left `paren` (parentheses)
* `)` : right `paren` (parentheses)
* `{` : left `curly brace`
* `}` : right `curly brace`
* `<` : left `angle bracket`
* `>` : right `angle bracket`
* `-` `dash` (not hyphen. Minus only when used in an equation or formula)
* `"` : `double quote`
* `'` : `single quote` (apostrophe)
<a id='variables'></a>
### Variables
[Table of Contents](#toc)
Variables are used to store information in the computer that can later be referenced, manipulated and/or used by our programs. Important things to remember about variables include:
* We store values inside variables.
* We can refer to variables in other parts of our programs.
* In Python, the variable is created when a value is assigned to it.
* Values are assigned to variable names using the equals sign `=`.
* A variable can hold two types of things.
* Basic data types. For descriptions and details [(See Data Types)](#data-types)
* Objects - ways to structure data and code. In Python, all variables are objects.
* Variable naming convention:
* Cannot start with a digit
* Cannot contain spaces, quotation marks, or other punctuation
* Using a descriptive name can make the code easier to read **(You will thank yourself later)**
<a id='prac-variable'></a>
### Practice
[Table of Contents](#toc)
```
# What is happening in this code python cell
age = 34
first_name = 'Drake'
```
In the cell above, Python assigns an age (in this example 34) to a variable `age` and a name (Drake) in quotation marks to a variable `first_name`.
If you want to see the stored value of a variable in python, you can display the value by using the print command
`print()` with the variable name placed inside the parenthesis.
```
# what is the current value stored in the variable `age`
print(age)
```
**Write a print statement to show the value of variable `first_name` in the code cell below.**
```
# Print out the current value stored in the variable `first_name``
```
<a id='prob-variable'></a>
### Problem 5: Assigning variables and printing values
[Table of Contents](#toc)
1. Create two new variables called `age` and `first_name` with your own age and name
2. Print each variable out to dispaly it's value
**Extra Credit:** Combine values in a single print command by separating them with commas
```
# Insert your variable values into the print statement below
print(<insert variable here>, 'is', <insert variable here>, 'years old.')
```
The `print` command automatically puts a single space between items to separate them and wraps around to a new line at the end.
[Solution](#prob-variable-sol)
<a id='variable-calc'></a>
## Variables can be used in calculations.
[Table of Contents](#toc)
* We can use variables in calculations just as if they were values.
* Remember, we assigned **our own age** to `age` a few lines ago.
```
age = age + 3
print('My age in three years:', age)
```
* This now sets our age value **our current age + 3 years**.
* We can also add strings together, but it works a bit differently. When you add strings together it is called **concatenating**.
```
name = "Sonoran"
full_name = name + " Desert"
print(full_name)
```
* Notice how I included a space in the quotes before "Desert". If we hadn't, we would have had "SonoranDesert"
* Can we subtract, multiply, or divide strings?
<a id='py-concatenate'></a>
## Problem 6: Printing your first and last name
[Table of Contents](#toc)
In the code cell below, create a new variable called last_name with your own last name.
Create a second new variable called full_name that is a combination of your first and last name.
```
# Print full name
```
[Solution](#py-concatenate-sol)
<a id='data-types'></a>
### Data Types
[Table of Contents](#toc)
**Some data types you will find in almost every language include:**
| Data Type| Abbreviation | Type of Information | Examples |
| :-| :-| :-| :-|
| Strings | str | characters, words, sentences or paragraphs| 'a' 'b' 'c' 'abc' '0' '3' ';' '?'|
| Integers | int | whole numbers | 1 2 3 100 10000 -100 |
| Floating point or Float | float | decimals | 10.0 56.9 -3.765 |
| Booleans | bool | logical test | True, False |
<a id='strings'></a>
### Strings
[Table of Contents](#toc)
One or more characters strung together and enclosed in quotes (single or double): "Hello World!"
```
greeting = "Hello World!"
print("The greeting is:", greeting)
greeting = 'Hello World!'
print('The greeting is:', greeting)
```
#### Need to use single quotes in your string?
Use double quotes to make your string.
```
greeting = "Hello 'World'!"
print("The greeting is:", greeting)
```
#### Need to use both?
```
greeting1 = "'Hello'"
greeting2 = '"World"!'
print("The greeting is:", greeting1, greeting2)
```
#### Concatenation
```
bear = "wild"
down = "cats"
print(bear + down)
```
Why aren't `greeting`, `greeting1`, `greeting2`, `bear`, or `down` enclosed in quotes in the statements above?
<a id='prac-strings'></a>
### Practice: Strings
[Table of Contents](#toc)
#### Use an index to get a single character from a string.
* The characters (individual letters, numbers, and so on) in a string are ordered.
* For example, the string âABâ is not the same as âBAâ. Because of this ordering, we can treat the string as a list of characters.
* Each position in the string (first, second, etc.) is given a number. This number is called an index or sometimes a subscript.
* Indices are numbered from 0.
* Use the positionâs index in square brackets to get the character at that position.
```
# String : H e l i u m
# Index Location: 0 1 2 3 4 5
atom_name = 'helium'
print(atom_name[0], atom_name[3])
```
<a id='numbers'></a>
### Numbers
[Table of Contents](#toc)
* Numbers are stored as numbers (no quotes) and are either integers (whole) or real numbers (decimal).
* In programming, numbers with decimal precision are called floating-point, or float.
* Floats use more processing than integers so use them wisely!
* Floats and integers come in various sizes but Python switches between them transparently.
```
my_integer = 10
my_float = 10.99998
my_value = my_integer
print("My numeric value:", my_value)
```
<a id='py-type'></a>
### Using Python built-in type() function
[Table of Contents](#toc)
If you are not sure of what your variables' types are, you can call a python function called `type()` in the same manner as you used `print()` function.
Python is an object-oriented language, so any defined variable has a type. Default common types are **str, int, float, list and tuple.** We will cover [list](#py-list) and [tuple](#py-tuple) later.
```
print("Type:", type(age))
print("Type:", type(first_name))
# Print out datatype of variables
print("my_value Type:", type(my_value))
print("my_float Type:", type(my_float))
```
<a id='booleans'></a>
### Boolean
[Table of Contents](#toc)
* Boolean values are binary, meaning they can only either true or false.
* In python True and False (no quotes) are boolean values
```
is_true = True
is_false = False
print("My true boolean variable:", is_true)
print("Type:", type(is_false))
```
<a id='py-data-type'></a>
### Problem 7: What variable type do I have?
[Table of Contents](#toc)
size = '1024'
What data type is `size`? Use some of the python you have learned to provide proof of your answer.
<ol style="list-style-type:lower-alpha">
<li>float</li>
<li>string</li>
<li>integer</li>
<li>boolean</li>
</ol>
```
# Write your explanation as a comment and write the python code that outputs support for your answer.
```
[Solution](#py-data-type)
<a id='py-data-structures'></a>
## Data Structures
[Table of Contents](#toc)
Python has many objects that can be used to structure data including:
| Object | Data Structure | Mutable |
| :- | :- | :- |
| List | collections of values held together in brackets | Mutable |
| Tuple | collection of grouped values held together in parentheses | Immutable |
| Set | collections of unique values held together in curly braces | Mutable |
| Dictionary | collections of keys & values held together in curly braces | Mutable |
<a id='py-lists'></a>
### Lists
[Table of Contents](#toc)
Lists are collections of values held together in brackets:
```
list_of_characters = ['a', 'b', 'c']
print(list_of_characters)
```
<a id='prob-lists'></a>
### Problem 8: Creating and Working with Lists
[Table of Contents](#toc)
1. Create a new list called list_of_numbers with four numbers in it.
```
# Print out the list of numbers you created
```
* Just like strings, we can access any value in the list by it's position in the list.
* **IMPORTANT:** Indexes start at 0
~~~
list: ['a', 'b', 'c', 'd']
index location: 0 1 2 3
~~~
```
# Print out the second value in the list list_of_numbers
```
2. Once you have created a list you can add more items to it with the append method
```
# Append a number to your list_of_numbers
```
[Solution](#prob-lists-sol)
#### Aside: Sizes of data structures
To determine how large (how many values/entries/elements/etc.) any Python data structure has, use the `len()` function
```
len(list_of_numbers)
```
Note that you cannot compute the length of a numeric variable:
```
len(age)
```
This will give an error: `TypeError: object of type 'int' has no len()`
However, `len()` can compute the lengths of strings
```
# Get the length of the string
print(len('this is a sentence'))
# You can also get the lengths of strings in a list
list_of_strings = ["Python is Awesome!", "Look! I'm programming.", "E = mc^2"]
# This will get the length of "Look! I'm programming."
print(len(list_of_strings[1]))
```
<a id='py-tuples'></a>
### Tuples
[Table of Contents](#toc)
Tuples are like a List, but **cannot be changed (immutable).**
Tuples can be used to represent any collection of data. They work well for things like coordinates. Notice below that
tuples are surrounded by parentheses `()` rather than square brackets `[]` that were used for lists.
```
tuple_of_x_y_coordinates = (3, 4)
print (tuple_of_x_y_coordinates)
```
Tuples can have any number of values
```
coordinates = (1, 7, 38, 9, 0)
print (coordinates)
icecream_flavors = ("strawberry", "vanilla", "chocolate")
print (icecream_flavors)
```
... and any types of values.
Once created, you `cannot add more items to a tuple` (but you can add items to a list). If we try to append, like we did with lists, we get an error
```
icecream_flavors.append('bubblegum')
```
<a id='lists-vs-tuples'></a>
### The Difference Between Lists and Tuples
[Table of Contents](#toc)
Lists are good for manipulating data sets. It's easy for the computer to add, remove and sort items. Sorted tuples are easier to search and index. This happens because tuples reserve entire blocks of memory to make finding specific locations easier while lists use addressing and force the computer to step through the whole list.

Let's say you want to get to the last item. The tuple can calculate the location because:
(address)=(size of data)Ã(index of the item)+(original address)
This is how zero indexing works. The computer can do the calculation and jump directly to the address. The list would need to go through every item in the list to get there.
Now lets say you wanted to remove the third item. Removing it from the tuple requires it to be resized and copied. Python would even make you do this manually. Removing the third item in the list is as simple as making the second item point to the fourth. Python makes this as easy as calling a method on the tuple object.
<a id='py-sets'></a>
### Sets
[Table of Contents](#toc)
Sets are similar to lists and tuples, but can only contain unique values and are held inside curly braces.
For example a list could contain multiple exact values
```
# In the gapminder data that we will use, we will have data entries for the continents
# of each country in the dataset
my_list = ['Africa', 'Europe', 'North America', 'Africa', 'Europe', 'North America']
print("my_list is", my_list)
# A set would only allow for unique values to be held
my_set = {'Africa', 'Europe', 'North America', 'Africa', 'Europe', 'North America'}
print("my_set is", my_set)
```
Just like lists, you can append to a set using the add() method.
```
my_set.add('Asia')
# Now let's try to append one that is in:
my_set.add('Europe')
```
What will the print statements show now in the code cell below?
```
print("my_list is", my_list)
print("my_set is", my_set)
```
<a id='py-dictionaries'></a>
### Dictionaries
[Table of Contents](#toc)
* Dictionaries are collections of things that you can lookup like in a real dictionary:
* Dictionarys can organized into key and value pairs separated by commas (like lists) and surrounded by curly braces.
* E.g. {key1: value1, key2: value2}
* We call each association a "key-value pair".
```
dictionary_of_definitions = {"aardvark" : "The aardvark is a medium-sized, burrowing, nocturnal mammal native to "
"Africa.",
"boat" : "A boat is a thing that floats on water"}
```
We can find the definition of aardvark by giving the dictionary the "key" to the definition we want in brackets.
In this case the key is the word we want to lookup
```
print("The definition of aardvark is:", dictionary_of_definitions["aardvark"])
# Print out the definition of a boat
```
Just like lists and sets, you can add to dictionaries by doing the following:
```
dictionary_of_definitions['ocean'] = "An ocean is a very large expanse of sea, in particular each of the main areas into which the sea is divided geographically."
print(dictionary_of_definitions)
```
<a id='prob-dictionaries'></a>
### Problem 9: Creating and Accessing Dictionaries
[Table of Contents](#toc)
1. Create a dictionary called `zoo` with at least three animal types with a different count for each animal (How many
animals of that type are found at the zoo).
1. `print` out the count of the second animal in your dictionary
```
# Zoo Dictionary
```
[Solution](#prob-dictionaries-sol)
<a id='py-statements'></a>
## Statements
[Table of Contents](#toc)
OK great. Now what can we do with all of this?
We can plug everything together with a bit of logic and python language to make a program that can do things like:
* process data (data wrangling or manipulation)
* parse files
* data analysis
What kind of logic are we talking about?
We are talking about something called a "logical structure" which starts at the top (first line) and reads down the page in order
In python a logical structure are often composed of statements. Statements are powerful operators that control the
flow of your script. There are two main types of statements:
* conditionals (if, while)
* loops (for, while)
<a id='py-conditionals'></a>
### Conditionals
[Table of Contents](#toc)
Conditionals are how we make a decision in the program.
In python, conditional statements are called if/else statements.
* If statement use boolean values to define flow.
* If something is True, do this. Else, do this
* While something is True, do some process.
**Building if/else statements in Python:**
1. Start first line with `if`
1. Then `some-condition` must be a logical test that can be evaulated as True or False
1. End the first line with `:`
1. Indent the next line(s) with `tab` or `4 spaces` (Jupyter does the indent automatically!)
1. `do-things`: give python commands to execute
1. End the statement with `else` and `:` (notice that if and else are in the same indent)
1. Indent the next line(s) with `tab` or `4 spaces` (Jupyter does the indent automatically!)
1. `do-different-things`: give python commands to execute
### Comparison operators:
`==` equality
`!=` not equal
`>` greater than
`>=` greater than or equal to
`<` less than
`<=` less than or equal to
```
weight = 3.56
if weight >= 2:
print(weight,'is greater than or equal to 2')
else:
print(weight,'is less than 2')
```
### Membership operators:
`in` check to see if data is **present** in some collection
`not in` check to see if data is **absent** from some collection
```
groceries=['bread', 'tomato', 'hot sauce', 'cheese']
if 'basil' in groceries:
print('Will buy basil')
else:
print("Don't need basil")
# this is the variable that holds the current condition of it_is_daytime
# which is True or False
it_is_daytime = True
# if/else statement that evaluates current value of it_is_daytime variable
if it_is_daytime:
print ("Have a nice day.")
else:
print ("Have a nice night.")
# before running this cell
# what will happen if we change it_is_daytime to True?
# what will happen if we change it_is_daytime to False?
```
* Often if/else statement use a comparison between two values to determine True or False
* These comparisons use "comparison operators" such as ==, >, and <.
* \>= and <= can be used if you need the comparison to be inclusive.
* **NOTE**: Two equal signs `==` is used to compare values, while one equals sign `=` is used to assign a value
* E.g.
1 > 2 is False<br/>
2 > 2 is False<br/>
2 >= 2 is True<br/>
'abc' == 'abc' is True
```
user_name = "Ben"
if user_name == "Marnee":
print ("Marnee likes to program in Python.")
else:
print ("We do not know who you are.")
```
* What if a condition has more than two choices? Does it have to use a boolean?
* Python if-statments will let you do that with elif
* `elif` stands for "else if"
```
if user_name == "Marnee":
print ("Marnee likes to program in Python.")
elif user_name == "Ben":
print ("Ben likes maps.")
elif user_name == "Brian":
print ("Brian likes plant genomes")
else:
print ("We do not know who you are")
# for each possibility of user_name we have an if or else-if statment to check the
# value of the name and print a message accordingly.
```
What does the following statement print?
my_num = 42
my_num = 8 + my_num
new_num = my_num / 2
if new_num >= 30:
print("Greater than thirty")
elif my_num == 25:
print("Equals 25")
elif new_num <= 30:
print("Less than thirty")
else:
print("Unknown")
<a id='prob-if-else'></a>
### Problem 10: Writing Conditional If/Else Statements
[Table of Contents](#toc)
Check to see if you have more than three entries in the `zoo` dictionary you created earlier. If you do, print "more than three animals". If you don't, print "three or less animals"
```
# write an if/else statement
```
Can you modify your code above to tell the user that they have exactly three animals in the dictionary?
```
# Modify conditional to include exactly three as potential output
```
[Solution](#prob-if-else-sol)
<a id='py-loops'></a>
### Loops
[Table of Contents](#toc)
Loops tell a program to do the same thing over and over again until a certain condition is met.
* In python two main loop types:
* For loops
* While loops
<a id='for-loops'></a>
### For Loops
[Table of Contents](#toc)
A for loop executes the same command through each value in a collection.
Building blocks of a for loop:
> `for` each-item `in` variable `:`
>> `do-something`
**Building for loops in python:**
1. Start the first line with `for`
1. `each-item` is an arbitrary name for each item in the variable/list.
1. Use `in` to indicate the variable that hold the collection of information
1. End the first line with `:`
1. indent the following line(s) with `tab` or `4 spaces` (Jupyter does the indent automatically!)
1. `do-something` give python commands to execute
In the example below, `number` is our `each-item` and the `print()` command is our `do-something`.
```
# Running this cell will give a NameError because it has not be defined yet.
print(number)
# Run this cell and see if you figure out what this for loop does
for number in range(10): # does not include 10!
print(number)
```
#### LOOPING a set number of times
We can do this with the function `range()`. Range automatically creates a list of numbers in a specified range.
In the example above, we have a list of 10 numbers starting with 0 and increasing by one until we have 10 numbers. In the example below, we get the same end result although we have give two numbers to `range()`. In the example below we have given the start and end points of the range. **Note: Do not forget about python's zero-indexing**
```
# What will be printed
for number in range(0,10):
print(number)
# What will be printed
for number in range(1,11):
print(number)
# What will be printed
for number in range(10,0, -1):
print(number)
# Change the code from the cell above so that python prints 9 to 0 in descending order
# This loop prints in each iteration of the loop which shows us a value for each of the 10 runs.
total = 0 # global variable
for i in range(10):
total=total+i
print(total)
# This loop prints the value for last value after the 10 runs have occured.
total=0
for i in range(10):
total=total+i
print(total)
```
#### Saving Time
Looping can save you lots of time. We will look at a simple example to see how it works with lists, but imagine if your list was 100 items long. You do not want to write 100 individual print commands, do you?
```
# LOOPING over a collection
# LIST
# If I want to print a list of fruits, I could write out each print statment like this:
print("apple")
print("banana")
print("mango")
# or I could create a list of fruit
# loop over the list
# and print each item in the list
list_of_fruit = ["apple", "banana", "mango"]
# this is how we write the loop
# "fruit" here is a variable that will hold each item in the list, the fruit, as we loop
# over the items in the list
print (">>looping>>")
for fruit in list_of_fruit:
print (fruit)
```
#### Creating New Data
You can also use loops to create new datasets as well. In the cell below, we use a mathematical operator to create a new list `data_2` where each value is double that of the value in the original list `data`.
```
data = [35,45,60,1.5,40,50]
data_2 = []
for i in data:
data_2.append(i*2)
print(data_2)
```
<a id='prob-str-reverse-loop'></a>
### Problem 11: Reverse the string using a for loop
[Table of Contents](#toc)
There are many ways to reverse a string. I want to challenge you to use a for loop. The goal is to practice how to build a for loop (use multiple print statements) to help you understand what is happening in each step.
```
string = "waterfall"
reversed_string = ""
# For loop reverses the string given as input
# Print out the both the original and reversed strings
```
**Extra Credit: Accomplish the same task (reverse a string) with out using a for loop.** _Hint: the reversing range example above gives you a clue AND Google always has an answer!_
```
# Reversing the string can be done by writing only one more line
string = "waterfall"
```
We can loop over collections of things like lists or dictionaries or we can create a looping structure.
```
# LOOPING over a collection
# DICTIONARY
# We can do the same thing with a dictionary and each association in the dictionary
fruit_price = {"apple" : 0.10, "banana" : 0.50, "mango" : 0.75}
for key, value in fruit_price.items():
print ("%s price is %s" % (key, value))
```
[Solution](#prob-str-reverse-loop-sol)
<a id='prob-dict-loop'></a>
### Problem 12: Looping through Dictionaries
[Table of Contents](#toc)
1. For each entry in your `zoo` dictionary, print that key
```
# print only dictionary keys using a for loop
```
2. For each entry in your zoo dictionary, print that value
```
# print only dictionary values using a for loop
```
3. Can you print both the key and its associated value using a for loop?
```
# print dictionary keys and values using a single for loop
```
[Solution](#prob-dict-loop-sol)
<a id='while-loops'></a>
### While Loops
[Table of Contents](#toc)
Similar to if statements, while loops use a boolean test to either continue looping or break out of the loop.
```
# While Loops
my_num = 10
while my_num > 0:
print("My number", my_num)
my_num = my_num - 1
print('My value is no longer greater than zero and I have exited the "while" loop as a result.')
```
NOTE: While loops can be dangerous, because if you forget to include an operation that modifies the variable being
tested (above, we're subtracting 1 at the end of each loop), it will continue to run forever and your script will never finish.
That is it. With just these data types, structures and logic, you can build a program. We will write program with functions in [Python II: A tool for programming](#python-2)
<a id='pandas'></a>
## Pandas: Working with Existing Data
[Table of Contents](#toc)
Thus far, we have been creating our own data as we go along and you are probably thinking "How in the world can this save me time?" This next section is going to help you learn how to import data that you already have. [Pandas](https://pandas.pydata.org/docs/) is a python package that is great for doing data manipulation.
<a id='read-data'></a>
### Pandas: Importing Data
[Table of Contents](#toc)
**Importing packages:** Pandas is a package that is written for python but is not part of the base python install. In
order to use these add on packages, we must first import them. This is conventionally the first thing you do in a
script. If I were building a script using Jupyter Notebooks, I generally do all the importing of packages I need for
the entire notebook in the first code cell.
```
# Import packages
import pandas
```
**Note:** pandas is a long name and you will generally find a shortened version of the name in online help resources. As such, we will use the same convention in this workshop. It only requires a small modification to the import statement.
```
# Import packages
import pandas as pd
```
Now that we have access to pandas at our disposal we are ready to import some data. We will be working a freely available dataset called [gapminder](https://www.gapminder.org/). The first data set we are going to look at is called `Afghanistan_Raw`.
```
# import from excel spreadsheet
afghanistan_xlsx = pd.read_excel('gapminder_data/Afghanistan_Raw.xlsx')
# import from csv file
afghanistan_csv = pd.read_csv('gapminder_data/Afghanistan_Raw.csv')
```
The cell above assigns a `variable` to a pandas dataframe.
To create a pandas dataframe:
1. We use `pd` to tell python that we want to use the pandas package that we imported.
1. We use `.read_excel()` or `.read_csv()` to tell pandas what type of file format we are giving it.
1. We have given the `relative path` to the file in parentheses.
**Relative paths** are your best friend when you want your code to be easily moved or shared with collaborators. They
use your current position in the computer's file structure as the starting point.
* If you work on a script with relative paths on your work computer, email it to yourself and try to continue working
on your personal home computer, it should work because the usernames may be different but are bypassed and
computer's file structure are the same from the directory in which we are working.
* The `current working directory` is where the Jupyter Notebook is stored unless you manually change it.
#### Project Directory
Intro_Python_Resbaz_2021.ipynb
âââ array_vs_list.png
âââ gapminder_data
â  âââ Afghanistan_Raw.csv
â  âââ Afghanistan_Raw.xlsx
â  âââ Afghanistan_Fixed.csv
â  âââ gapminder_by_country
âââ jn_binder_options.png
âââ jn_options.png
âââ Intro_Python_Resbaz_2021.ipynb
âââ scripting_practice.ipynb
**Absolute paths** can be useful if the thing you are trying to access is never going to move. They start at the root of the computer's file structure and work out to the file's location. **Note: this includes the computer's username.**
* If you work on a script with absolute paths on your work computer, email it to yourself and try to continue working on your personal home computer, it will fail because the usernames and computer's file structure are different.
* My absolute path (work): /Users/**drakeasberry**/Desktop/2021_Resbaz_Python/intro_python
* My absolute path (home): /Users/**drake**/Desktop/2021_Resbaz_Python/intro_python
```
print('This is the excel file:\n\n', afghanistan_xlsx)
print('\nThis is the csv file:\n\n', afghanistan_csv)
```
This prints out each file separatly and I have added a few line break `\n` just to make it a little easier read when it is printed. However, these may still feel unfamiliar and hard to read for you or your colleagues. If we do not include the data varibale inside the `print()`, then pandas will render a formatted table that is more visually pleasing. Let's look at the difference.
```
# Use print to label the output, but let pandas render the table
print('This is the excel file:')
afghanistan_xlsx
# Use print to label the output, but let pandas render the table
print('This is the csv file:')
afghanistan_csv
```
<a id='manipulate-data'></a>
### Pandas: Manipulating Data
[Table of Contents](#toc)
As you can see above, both ways of importing data have produced the same results. The type of data file you use is a personal choice, but not one that should be taken for granted. Microsoft Excel is licensed product and not everyone may have access to open `.xlsx` files whereas a `.csv`file is a comma separated values document that can be read by many free text editors. `.csv` files are also genereally smaller than the same information stored in a `.xlsx` file. My preferred choice is using `.csv` files due to smaller size and easier accessibility.
```
afghanistan_csv.country.unique()
# Drop all rows with no values
afghanistan_csv.dropna(how='all')
# What prints now and why?
afghanistan_csv
# If we want to save the operations we need to store it in a variable (we will overwrite the existing one here)
afghanistan_csv = afghanistan_csv.dropna(how='all')
afghanistan_csv
# we will store a new dataframe called df to save some typing
# we will subset the data to only rows that have a country name
df = afghanistan_csv.dropna(subset=['country'])
df
df = df.rename(columns={'pop':'population'})
# We are only expecting Afghanistan to be the only country in this file
# Let's check our assumptions
df.country.unique()
```
<a id='prob-unique'></a>
### Problem 13: Checking assumptions about your data
[Table of Contents](#toc)
You can use df.info() to general idea about the data and then you can investigate the remaining columns to see if the
data is as you expect.
```
# this will give a quick overview of the data frame to give you an idea of where to start looks
# Hint: Check your assumptions about values dataframe
```
[Solution](#prob-unique-sol)
Our investigation has showed us that some of data has errors but probably still useful if we correct them.
* The year column is being read as a float instead of an object (we will not be doing mathematics on years)
* The year column still has a missing value
* The population column is being read as an object instead of an integer (we may want to do mathematics on population)
* The continent column has a typo `Asiaa` and `tbd`
Let's see if we can fix these issues together.
```
# Let's fix the typos in continent column
df = df.replace(to_replace =["Asiaa", "tbd"], value ="Asia")
df
# Let's take a closer look at year column by sorting
df.sort_values(by='year')
```
By sorting the dataframe based on year, we can see that the years are incrementing by 5 years. We can also deduce that the year 1982 is missing.
Depending on the data, you will have to make a decision as the researcher:
* Are you confident that you can say that you have replaced the value correctly and the rest of the data is good?
* Do you delete the data based on the fact that it had missing data?
In this case, we are going to replace the missing value with 1982 because we believe it is the right thing to do in this particular case.
**Note:** In general, you should be very selective on replacing missing values.
```
df['year'] = df['year'].fillna(1982)
df
# Finally, let's fix the datatypes of columns
df = df.astype({"year": int, "population": int})
df
# Let's check to see if it is working the way we think it is
df.info()
```
<a id='write-data'></a>
### Pandas: Writing Data
[Table of Contents](#toc)
Now that we have made all the changes necessary, we should save our corrected datafram as a new file.
```
# Save file with changes we made
df.to_csv('gapminder_data/Afghanistan_Fixed.csv')
```
<a id='all-countries'></a>
### Pandas: Working with more than file
[Table of Contents](#toc)
```
#Import pandas library using an alias
import pandas as pd
# Import glob library which allows us to use regular expressions to select multiple files
import glob
# Let's see where we are within the computer's directory structure
# The exclamation point allows us to utilize a bash command in the notebook
!pwd
# Let's see what files and folders are in our current location
!ls
# Let's see what files and folders are in the gapminder_data directory
!ls gapminder_data/
# Let's see what files and folders are in the gapminder_data/gapminder_by_country directory
!ls gapminder_data/gapminder_by_country/
```
We worked with one file `Afghanistan` in the previous section, now we will combine everything we have seen to work with all the countries data that we have.
1. Find files in `gapminder_data/gapminder_by_country/`
1. Get all filenames into a list
1. Remove `country.cc.txt`
1. For loop to append file lines into a pandas dataframe
1. Add column names from `country.cc.txt`
```
# glob.glob will match files in the current directory based on a pattern
countries = sorted(glob.glob('gapminder_data/gapminder_by_country/*.cc.txt'))
len(countries)
# Remove header item from item of files
# If you try to run this cell more than once, you will get an error
# because the item does not exist once it has been removed after the first execution of this cell
countries.remove('gapminder_data/gapminder_by_country/country.cc.txt')
# Check the length of the list to ensure the item was correctly removed
len(countries)
# creating dataframe from a for loop:
df = pd.DataFrame()
# Go through each of 142 files and append until all countries are in one dataframe
for country in countries:
c=pd.read_csv(country,sep='\t',header=None)
df=df.append(c,ignore_index=True)
# Import header and store as list
header = pd.read_csv('gapminder_data/gapminder_by_country/country.cc.txt', sep='\t')
column_names = list(header.columns)
# Add header to dataframe created with the loop
df.columns = column_names
# Gives us number of rows and columns
df.shape
# Get summary statistics
df.describe()
# Do you remember how to change column types
# Solution
# Do you remember how to change column types
df = df.astype({"year": int, "pop": int})
df.describe()
```
Save to summary of the dataframe `to_csv`, create a NEW file name, otherwise will overwrite the files we downloaded!
```
df.describe().to_csv('gapminder_summ_stats.csv')
ls
```
<a id='slicing'></a>
### Pandas: Slicing and selecting values
[Table of Contents](#toc)
<div class="alert alert-block alert-success">
<b>Pandas Dataframe:</b>
- 2-dimensional representation of a table
- Series is the data-structure Pandas use to represent a column.
</div>
Because it's 2 dimensional, we have to specify which rows and which columns we want to select.
```
# see the first 5 rows in the dataframe by default, but you can use any number in parentheses to see more or less
df.head()
```
**`.loc[]` to select values by the name**
**`.loc[a:b,i:j]`**, where
a and b are the rows
i and j are the columns
Need to set index first:
```
df=df.set_index('country')
df
# this returns all the rows and columns where the index is Brazil
df.loc['Brazil']
# this returns all the rows and columns where the index is Brazil through Ecuador (alphabetically)
df.loc['Brazil':'Ecuador']
# this returns all the rows where the index is Brazil through Ecuador (alphabetically), but only includes the columns
# between year and lifeExp (moving from left to right across the dataframe)
df.loc['Brazil':'Ecuador','year':'lifeExp']
# this returns all the rows where the index is Brazil or Ecuador, but only includes the columns
# between year and lifeExp (moving from left to right across the dataframe)
df.loc[['Brazil','Ecuador'],'year':'lifeExp']
```
**`.iloc[]` to select values by the index**
**`.iloc[a:b,i:j]`**, where
a and b are the indexes of rows
i and j are the indexes of columns
```
# this returns rows 10 through 16 and all but the last column (gdpPercap)
df.iloc[9:16,:-1]
```
**Observation:**
```
-3:-1, omits the final index (column gdpPercap) in the range provided, while a named slice includes the final element.
```
```
# this returns rows 10 and 17 and all but the columns (continent and lifeExp)
df.iloc[[9,16],-3:-1]
# this also returns rows 10 and 17 and all but the columns (continent and lifeExp)
df.iloc[[9,16],2:4]
```
<a id='summary-stats'></a>
### Problem 14: Slice and save summary statistics
[Table of Contents](#toc)
Select two countries of your interest. Slice the `df` to select only these countries. Then, obtain summary statistics by country, and save to a file.
```
# pick two countries to subset and save file with a descriptive name
```
[Solution](#summary-stats-sol)
<a id='py1-solutions'></a>
## Python I: Problem Solutions
[Table of Contents](#toc)
<a id='prob-variable-sol'></a>
### Problem 5: Assigning variables and printing values
1. Create two new variables called `age` and `first_name` with your own age and name
2. Print each variable out to dispaly it's value
[Back to Problem](#prob-variable)
```
age = '<your age>'
first_name = '<your first name>'
print(age)
print(first_name)
```
**Extra Credit:** You can also combine values in a single print command by separating them with commas
```
# Insert your variable values into the print statement below
print(first_name, 'is', age, 'years old')
```
Correct Output:
If you received this output, then you correctly assigned new variables and combined them correctly in the print statment. The information represented between `<>` should reflect your personal information at this point.
```markdown
<your age>
<your first name>
<your first name> is <your age> years old
```
If you received this output, then you forget to assign new variables.
```markdown
34
Drake
Drake is 34 years old
```
If you received this output, then you correctly assigned new variables but mixed up the order in the combined print statment.
```markdown
<your age>
<your first name>
<your age> is <your first name> years old
```
<a id='py-concatenate-sol'></a>
### Problem 6: Printing your first and last name
In the code cell below, create a new variable called last_name with your own last name.
Create a second new variable called full_name that is a combination of your first and last name.
```
# Print full name
first_name = 'Drake'
last_name = 'Asberry'
print(first_name, last_name)
```
[Back to Problem](#py-concatenate-sol)
<a id='py-data-type-sol'></a>
### Problem 7: What variable type do I have?
size = '1024'
What data type is `size`? Use some of the python you have learned to provide proof of your answer.
<ol style="list-style-type:lower-alpha">
<li>float</li>
<li>string</li>
<li>integer</li>
<li>boolean</li>
</ol>
```
# Write your explanation as a comment and write the python code that outputs support for your answer.
size = '1024'
print(type(size), "is a string because when we stored the variable, we wrapped it in single quotes ''. Python "
"understood this to be a string instead of an integer as a result.")
```
[Back to Problem](#py-data-type)
<a id='prob-lists-sol'></a>
### Problem 8: Creating and Working with Lists
1. Create a new list called list_of_numbers with four numbers in it.
```
# Print out the list of numbers you created
list_of_numbers = [0, 1, 2, 3]
print(list_of_numbers)
# Print out the second value in the list list_of_numbers
print(list_of_characters[1])
```
2. Once you have created a list you can add more items to it with the append method
```
# Append a number to your list
list_of_numbers.append(5)
print(list_of_numbers)
```
[Back to Problem](#prob-lists)
### Problem 9: Creating and Accessing Dictionaries
1. Create a dictionary called `zoo` with at least three animal types with a different count for each animal.
1. `print` out the count of the second animal in your dictionary
```
# Zoo Dictionary
zoo = {'bears':25, 'lions':19, 'monkeys':67}
print(zoo['lions'])
```
[Back to Problem](#prob-dictionaries)
<a id='prob-if-else-sol'></a>
### Problem 10: Writing Conditional If/Else Statements
Check to see if you have more than three entries in the `zoo` dictionary you created earlier. If you do, print "more than three animals". If you don't, print "three or less animals"
```
# write an if/else statement
if len(zoo) > 3:
print("more than three animals")
else:
print("three or less animals")
```
Can you modify your code above to tell the user that they have exactly three animals in the dictionary?
```
# Modify conditional to include exactly three as potential output
if len(zoo) > 3:
print("more than three animals")
elif len(zoo) < 3:
print("less than three animals")
else:
print("exactly three animals")
```
[Back to Problem](#prob-if-else)
<a id='prob-str-reverse-loop-sol'></a>
### Problem 11: Reversing Strings
There are many ways to reverse a string. I want to challenge you to use a for loop. The goal is to practice how to build a for loop (use multiple print statements) to help you understand what is happening in each step.
```
string = "waterfall"
reversed_string = ""
for char in string:
#print(reversed_string)
reversed_string = char + reversed_string
#print(char)
#print(reversed_string)
print('The original string was:', string)
print('The reversed string is:', reversed_string)
```
**Extra Credit: Accomplish the same task (reverse a string) with out using a for loop.** _Hint: the reversing range example above gives you a clue AND Google always has an answer!_
```
string = "waterfall"
print(string[::-1])
```
[Back to Problem](#prob-str-reverse-loop)
<a id='prob-dict-loop'></a>
### Problem 12: Looping through Dictionaries
[Table of Contents](#toc)
1. For each entry in your `zoo` dictionary, print that key
```
# print only dictionary keys using a for loop
for key in zoo.keys():
print(key)
```
2. For each entry in your zoo dictionary, print that value
```
# print only dictionary values using a for loop
for value in zoo.values():
print(value)
```
3. Can you print both the key and its associated value using a for loop?
```
# print dictionary keys and values using a single for loop
for key, value in zoo.items():
print(key,value)
```
[Back to Problem](#prob-dict-loop)
<a id='prob-unique'></a>
### Problem 13: Checking assumptions about your data
[Table of Contents](#toc)
You can use df.info() to general idea about the data and then you can investigate the remaining columns to see if the
data is as you expect.
```
# this will give a quick overview of the data frame to give you an idea of where to start looks
print('total rows in dataframe:', len(df))
df.info()
# Hint: Check your assumptions about values dataframe
df.year.unique()
columns = list(df.columns)
for column in columns:
unique_val = eval('df.' + column + '.unique()')
print(column, ':\nunique values:\n', unique_val, '\n\n')
```
[Back to Problem](#prob-unique)
<a id='summary-stats-sol'></a>
### Problem 14: Slice and save summary statistics
Select two countries of your interest. Slice the `df` to select only these countries. Then, obtain summary statistics by country, and save to a file.
```
# My Solution
my_countries = df.loc[['China','Germany'],'pop':]
my_countries.describe().to_csv('china_germany_summ_stats.csv')
my_countries
```
[Back to Problem](#summary-stats)
<a id='python-2'></a>
## Intro to Python II: A Tool for Programming
[Table of Contents](#toc)
**Prerequisites:** Intro to Python 1: Data OR knowledge of another programming language
This workshop will help attendees build on previous knowledge of Python or other programming language in order to harness the powers of Python to make your computer work for you. You will learn how to write their own Python functions, save their code as scripts that can be called from future projects and build a workflow to chain multiple scripts together.
**Learning Objectives:**
1. Understand the syntax of python functions
1. Understand the basics of scripting in python
1. Understand data analysis cycles
**Learning Outcomes:** you will be able toâŠ
1. Write your own functions
1. Save code as a script
1. Build a workflow
<a id='python-2-setup'></a>
## Setup if you are joining in for Python II
[Table of Contents](#toc)
**Run the next three code cells to have the data you need to work with in this section.**
```
# import libraries
import pandas as pd
# Create a dictionary with rainfall, temperature and pressure
data={'rainfall_inches':[1.34,1.56,4.33],
'temperature_F':[75,80,96],
'pressure_psi':[10,2,35]}
data
string = "waterfall"
print(string[::-1])
```
<a id='functions'></a>
## Functions:
[Table of Contents](#toc)
Create your own functions, especially if you need to make the same operation many times. This will make you code cleaner.
* Functions are known by many names in other languages. Most commonly methods and subroutines.
* A function has a contract that guarantees certain output based on certain input(s)
* Variables get passed into the function
* The function then preforms actions based on the variables that are passed
* A new value is returned from the function
In python we are able to define a function with `def`. First you define the function and later you call the defined function.
Here we define a function that we will call "add_two_numbers"
* def add_two_numbers():
```
# this defines our function
def add_two_numbers():
answer = 50 + 15
return answer
# this calls the function and stores in the variable `x`
x = add_two_numbers()
x
```
That function seems a little silly because we could just add 50 and 15 easier than defining a function to do it for us. However, imagine 50 was some constant that we need to add observations to. Now we could rewrite the function to accept an observation to add to our constant of 50.
```
# this defines our function
# the "num1" inside the parentheses means it is expecting us to pass a value to the function when we call it
def add_to_constant(num1):
answer = 50 + num1
return answer
# this calls the function and stores in the variable `y`
# the value we want to pass goes inside the parentheses in the call
y = add_to_constant(10)
y
```
Change the value that you pass to the function to see how it works.
<a id='why-functions'></a>
### Why Use Functions?
[Table of Contents](#toc)
Functions let us break down our programs into smaller bits that can be reused and tested.
Human beings can only keep a few items in working memory at a time. we can only understand larger/more complicated ideas
by understanding smaller pieces and combining them. Functions serve the same purpose in programs. We encapsulate
complexity so that we can treat it as a single âthingâ and this enables reusablility. Write code one time, but use
many times in our program or programs.
1. Testability
* Imagine a really big program with lots of lines of code. There is a problem somewhere in the code because you are
not getting the results you expect.
* How do you find the problem in your code?
* If your program is composed of lots of small functions that only do one thing then you can test each function individually.
2. Reusability
* Imagine a really big program with lots of lines of code. There is a section of code you want to use in a different part of the program.
* How do you reuse that part of the code?
* If you just have one big program then you have to copy and paste that bit of code where you want it to go, but
if that bit was a function, you could just use that function again.
3. Writing cleaner code
* Always keep both of these concepts in mind when writing programs.
* Write small functions that do one thing.
* Never have one giant function that does a million things.
* A well written script is composed of lots of functions that do one thing.
<a id='str-reverse-func'></a>
### Let's revist the reverse string and turn it into a function
[Table of Contents](#toc)
```
# Create the function
def reverse_text(string):
"""Function to reverse text in strings.
"""
result=string[::-1]
return result
# Call the function and pass a string as input
reverse_text("waterfall")
# you can also pass a variable to function
original='pool'
reverse_text(original)
```
This may seem trivial, but we could use a function like to ask a user for a word that they would like to see written
in reverse. Each time the input is given, we would run the same code to return the reversed spelling of the word they
gave us.
<a id='temp-func'></a>
### Let's look at a real world example of where constants could be used in functions
[Table of Contents](#toc)
```
# Create the function
def convert_temp(temperature,unit):
"""Function to convert temperature from F to C, and vice-versa.
Need temperature (integer or float) and unit (string, uppercase F or C)
"""
t=int(temperature)
u=str(unit)
if u == 'C':
fahr=(9/5*t)+32
print('{}C is {}F'.format(t,int(fahr)))
elif u == 'F': # or else:
celsius=(t-32)*5/9
print('{}F is {}C'.format(t,int(celsius)))
convert_temp(85,'C')
# Using the question mark following the function name, we see information about the function and how we might use it
convert_temp?
# will demonstrate this depending on time
def convert_temp2():
"""Function to convert temperature from F to C, and vice-versa.
User input.
"""
t=int(input('Enter temperature:'))
u=str(input('Enter unit (F or C):'))
if u == 'C':
fahr=9/5*t+32
return '{}C is {}F'.format(t,int(fahr))
elif u == 'F':
celsius=(t-32)*5/9
return '{}F is {}C'.format(t,int(celsius))
else:
return "Don't know how to convert..."
convert_temp2()
convert_temp2()
convert_temp2()
```
<a id='scripting'></a>
## Scripting
[Table of Contents](#toc)
For this section we are going to open the other Jupyter Notebook found in our repository to ensure we are starting
with a clean slate.
1. Save your progress in the current notebook and you may want download a copy for your records as well which can be
done using the `File` menu.
1. `Go to File > Open > scripting_practice.ipynb` to open the notebook.
<a id='errors'></a>
## Common Errors
[Table of Contents](#toc)
### Help yourself
```
help(print)
help(len)
?len
?data
dir(data)
```
```
help(your_data_object)
dir(your_data_object)
```
### Variable errors
```
# need to create/define a variable before using it
chocolate_cake
# this also includes mispellings...
first_name='Nathalia'
firt_name
```
### Syntax errors
```
# Syntax errors: when you forget to close a )
## EOF - end of file
## means that the end of your source code was reached before all code blocks were completed
print(len(first_name)
print(len(first_name))
# Syntax errors: when you forgot a ,
tires=4
print('My car has'tires,' tires')
# Syntax errors: forgot to close a quote ' in a string
## EOL = end of line
print('My car has',tires,' tires)
tires=4
print('My car has',tires,' tires')
# Syntax errors: when you forget the colon at the end of a line
data=[1,2,3,4]
for i in data
print(i**2)
# Indentation errors: forgot to indent
for i in data:
print(i**2)
for i in data:
print(i**2)
```
### Index errors
```
groceries=['banana','cheese','bread']
groceries[3]
```
### Character in strings are IMMUTABLE
```
fruit='mango'
fruit[3]
fruit[3]='G'
```
### Item in list is MUTABLE
```
fruits=['mango','cherry']
fruits[1]
fruits[1]='apple'
fruits
```
### Character in item of a list is IMMUTABLE
```
fruits[1]
fruits[1][2]
fruits[1][2]='P'
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/vndee/pytorch-vi/blob/master/chatbot_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## CHATBOT
**Tác giả**: [Matthew Inkawhich](https://github.com/MatthewInkawhich)
Trong hưá»ng dẫn nà y chúng ta sẜ khám phá má»t ứng dụng thú vá» cá»§a mÃŽ hình seq2seq. Chúng ta sẜ huấn luyá»n má»t chatbot ÄÆ¡n giản sá» dụng data là lá»i thoại trong phim từ [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html)
Các mÃŽ hình có khả nÄng Äà m thoại là má»t mảng nghiên cứu Äang rất ÄÆ°á»£c chú Ü cá»§a trà tuá» nhân tạo. Chatbot có thá» tìm thấy trong rất nhiá»u sản phẩm tiá»n Ãch như bá» pháºn chÄm sóc khách hà ng hoặc các dá»ch vụ tư vấn online. Nhưng con bot nà y thưá»ng thuá»c dạng retrieval-based (dá»±a trên truy xuất), Äó là các mÃŽ hình mà câu trả lá»i Äã ÄÆ°á»£c Äá»nh sẵn cho má»i loại câu há»i nhất Äá»nh. Dạy má»t cá» máy Äá» nó có khả nÄng Äà m thoại vá»i con ngưá»i má»t cách tá»± nhiên vẫn là má»t bà i toán khó và còn xa Äá» Äi Äến lá»i giải. Gần Äây, Äi theo sá»± bùng ná» cá»§a há»c sâu, các mÃŽ hình sinh mạnh mẜ như Google's Neural Conversational Model Äã tạo ra má»t bưá»c nhảy vá»t ấn tượng. Trong bà i hưá»ng dẫn nà y, chúng ta sẜ hiá»n thá»±c má»t kiá»u mÃŽ hình sinh như váºy vá»i PyTorch.

```
> hello?
Bot: hello .
> where am I?
Bot: you re in a hospital .
> who are you?
Bot: i m a lawyer .
> how are you doing?
Bot: i m fine .
> are you my friend?
Bot: no .
> you're under arrest
Bot: i m trying to help you !
> i'm just kidding
Bot: i m sorry .
> where are you from?
Bot: san francisco .
> it's time for me to leave
Bot: i know .
> goodbye
Bot: goodbye .
```
### Các phần chÃnh:
- Load và tiá»n xá» lÜ [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html) dataset.
- Hiá»n thá»±c mÃŽ hình seq2seq vá»i Luong's attention.
- Phá»i hợp huấn luyá»n mÃŽ hình encoder-decoder vá»i mini-batches.
- Hiá»n thá»±c thuáºt toán decoding bằng tìm kiếm tham lam.
- Tương tác vá»i mÃŽ hình Äã huấn luyá»n.
### Lá»i cảm Æ¡n:
Code trong bà i viết nà y ÄÆ°á»£c mượn từ các project mã nguá»n má» sau:
- Yuan-Kuei Wuâs pytorch-chatbot implementation: https://github.com/ywk991112/pytorch-chatbot
- Sean Robertsonâs practical-pytorch seq2seq-translation example: https://github.com/spro/practical-pytorch/tree/master/seq2seq-translation
- FloydHubâs Cornell Movie Corpus preprocessing code: https://github.com/floydhub/textutil-preprocess-cornell-movie-corpus
## Chuẩn bá»
Äầu tiên chúng ta cần tải dữ liá»u tại [Äây](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html) và giải nén.
```
!wget --header 'Host: www.cs.cornell.edu' --user-agent 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0' --header 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' --header 'Accept-Language: en-US,en;q=0.5' --header 'Upgrade-Insecure-Requests: 1' 'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip' --output-document 'cornell_movie_dialogs_corpus.zip'
!unzip cornell_movie_dialogs_corpus.zip
!ls cornell\ movie-dialogs\ corpus
```
Import má»t sá» thư viá»n há» trợ:
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
```
## Load và tiá»n xá» lÜ dữ liá»u
Bưá»c tiếp theo chúng ta cần tá» chức lại dữ liá»u. Cornell Movie-Dialogs Corpus là má»t táºp dữ liá»u lá»n gá»m các Äoạn há»i thoại cá»§a các nhân váºt trong phim.
- 220,579 Äoạn há»i thoại cá»§a 10,292 cặp nhân váºt.
- 9,035 nhân váºt từ 617 bá» phim.
- 304,713 cách diá»
n Äạt.
Táºp dữ liá»u nà y rất lá»n và phân tán, Äa dạng trong phong cách ngÃŽn ngữ, thá»i gian, Äá»a Äiá»m cÅ©ng như Ü nghÄ©a. Chúng ta hi vá»ng mÃŽ hình cá»§a mình sẜ Äá»§ tá»t Äá» là m viá»c vá»i nhiá»u cách nói hay truy vấn khác nhau.
Trưá»c hết, hãy xem má»t và i dòng từ dữ liá»u gá»c, xem chúng ta có gì á» Äây.
```
corpus_name = 'cornell movie-dialogs corpus'
def printLines(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
printLines(os.path.join(corpus_name, 'movie_lines.txt'))
```
Äá» thuáºn tiá»n, chúng ta sẜ tá» chức lại dữ liá»u theo má»t format má»i dòng trong file sẜ ÄÆ°á»£c tách ra bá»i dấu tab cho má»t câu há»i và má»t câu trả lá»i.
PhÃa dưá»i chúng ta sẜ cần má»t sá» phương thức Äá» phân tÃch dữ liá»u từ file movie_lines.tx
- `loadLines': Tách má»i dòng dữ liá»u thà nh má»t Äá»i tượng dictionary trong python gá»m các thuá»c tÃnh (lineID, characterID, movieID, character, text).
-`loadConversations`: Nhóm các thuá»c tÃnh cá»§a từng dòng trong `loadLines` thà nh má»t Äoạn há»i thoại dá»±a trên movie_conversations.txt.
- `extractSentencePairs`: TrÃch xuất má»t cặp câu trong Äoạn há»i thoại.
```
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
# Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
lineIds = eval(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the lines of the conversation
for i in range(len(conversation["lines"]) - 1): # We ignore the last line (no answer for it)
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i+1]["text"].strip()
# Filter wrong samples (if one of the lists is empty)
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
```
Bây giá» chúng ta sẜ gá»i các phương thức á» trên Äá» tạo ra má»t file dữ liá»u má»i tên là formatted_movie_lines.txt.
```
# Define path to new file
datafile = os.path.join(corpus_name, 'formatted_movie_lines.txt')
delimiter = '\t'
# Unescape the delimiter
delimiter = str(codecs.decode(delimiter, 'unicode_escape'))
# Initialize lines dict, conversations list, and field ids
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
# Load lines and process conversations
print("\nProcessing corpus...")
lines = loadLines(os.path.join(corpus_name, "movie_lines.txt"), MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(os.path.join(corpus_name, "movie_conversations.txt"),
lines, MOVIE_CONVERSATIONS_FIELDS)
# Write new csv file
print("\nWriting newly formatted file...")
with open(datafile, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
# Print a sample of lines
print("\nSample lines from file:")
printLines(datafile)
```
### Äá»c và cắt dữ liá»u
Sau khi Äã tá» chức lại dữ liá»u, chúng ta cần tạo má»t từ Äiá»n các từ dùng trong táºp dữ liá»u và Äá»c các cặp câu truy vấn - phản há»i và o bá» nhá».
Chú Ü rằng chúng ta xem má»t câu là má»t chuá»i liên tiếp các **từ**, khÃŽng có má»t ánh xạ ngầm nà o cá»§a nó á» má»t khÃŽng gian sá» há»c rá»i rạc. Do Äó chúng ta cần phải tạo má»t hà m ánh xạ sao cho má»i từ riêng biá»t chá» có duy nhất má»t giá trá» chá» sá» Äại diá»n chÃnh là vá» trà cá»§a nó trong từ Äiá»n.
Äá» là m Äiá»u Äó chúng ta Äá»nh nghÄ©a lá»p `Voc`, nÆ¡i sẜ lưu má»t dictionary ánh xạ **từ** sang **chá» sá»**, má»t dictionary ánh xạ ngược **chá» sá»** sang **từ**, má»t biến Äếm cho má»i từ và má»t biến Äếm tá»ng sá» các từ. Lá»p `Voc` cÅ©ng cung cắp các phương thức Äá» thêm má»t từ và o từ Äiá»n (`addWord`), thêm tất cả các từ trong má»t câu (`addSentence`) và lược bá» (trimming) các từ khÃŽng thưá»ng gặp. Chúng ta sẜ nói vá» trimming sau:
```
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count SOS, EOS, PAD
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count default tokens
for word in keep_words:
self.addWord(word)
```
Trưá»c khi ÄÆ°a và o huấn luyá»n ta cần má»t sá» thao tác tiá»n xá» lÜ dữ liá»u. Äầu tiên, chúng ta cần chuyá»n Äá»i các chuá»i Unicode thà nh ASCII sá» dụng `unicodeToAscii`. Tiếp theo phải chuyá»n tất cả các kà tá»± thà nh chữ viết thưá»ng và lược bá» các kà tá»± khÃŽng á» trong bảng chữ cái ngoại trừ má»t sá» dấu câu (`normalizedString`). Cuá»i cùng Äá» giúp quá trình huấn luyá»n nhanh chóng há»i tụ chúng ta sẜ lá»c ra các câu có Äá» dà i lá»n hÆ¡n ngưỡng `MAX_LENGTH` (`filterPairs`).
```
MAX_LENGTH = 10 # Maximum sentence length to consider
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# Read query/response pairs and return a voc object
def readVocs(datafile, corpus_name):
print("Reading lines...")
# Read the file and split into lines
lines = open(datafile, encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
# Returns True iff both sentences in a pair 'p' are under the MAX_LENGTH threshold
def filterPair(p):
# Input sequences need to preserve the last word for EOS token
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
# Filter pairs using filterPair condition
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
# Using the functions defined above, return a populated voc object and pairs list
def loadPrepareData(corpus_name, datafile, save_dir):
print("Start preparing training data ...")
voc, pairs = readVocs(datafile, corpus_name)
print("Read {!s} sentence pairs".format(len(pairs)))
pairs = filterPairs(pairs)
print("Trimmed to {!s} sentence pairs".format(len(pairs)))
print("Counting words...")
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print("Counted words:", voc.num_words)
return voc, pairs
# Load/Assemble voc and pairs
save_dir = os.path.join("save")
voc, pairs = loadPrepareData(corpus_name, datafile, save_dir)
# Print some pairs to validate
print("\npairs:")
for pair in pairs[:10]:
print(pair)
```
Má»t chiến thuáºt khác Äá» giúp mÃŽ hình há»c nhanh hÆ¡n Äó là lược bá» các từ hiếm gặp trong dữ liá»u. Viá»c nà y giúp là m giảm Äi Äá» khó cá»§a bà i toán, và do Äó mÃŽ hình sẜ há»i tụ nhanh hÆ¡n. Chúng ta sẜ là m Äiá»u nà y bằng 2 bưá»c.
- Lược bá» các từ vá»i tần suất xuất hiá»n Ãt hÆ¡n `MIN_COUNT` sá» dụng phương thức `voc.trim`.
- Lược bá» các cặp câu há»i thoại có chứa từ bá» cắt á» bưá»c trên.
```
MIN_COUNT = 3 # Minimum word count threshold for trimming
def trimRareWords(voc, pairs, MIN_COUNT):
# Trim words used under the MIN_COUNT from the voc
voc.trim(MIN_COUNT)
# Filter out pairs with trimmed words
keep_pairs = []
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
# Check input sentence
for word in input_sentence.split(' '):
if word not in voc.word2index:
keep_input = False
break
# Check output sentence
for word in output_sentence.split(' '):
if word not in voc.word2index:
keep_output = False
break
# Only keep pairs that do not contain trimmed word(s) in their input or output sentence
if keep_input and keep_output:
keep_pairs.append(pair)
print("Trimmed from {} pairs to {}, {:.4f} of total".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)))
return keep_pairs
# Trim voc and pairs
pairs = trimRareWords(voc, pairs, MIN_COUNT)
```
## Chuẩn bá» dữ liá»u cho mÃŽ hình
Mặc dù á» trên chúng ta Äã là m rất nhiá»u thứ Äá» có má»t bá» dữ liá»u tá»t gá»m các cặp câu há»i thoại, từ Äiá»n. Nhưng mÃŽ hình cá»§a chúng ta luÃŽn mong Äợi dữ liá»u và o cá»§a nó phải là numerical torch tensor. Cách Äá» chuyá»n dữ liá»u dạng nà y thà nh tensor có thá» tìm thấy á» bà i viết [seq2seq translation tutorial](https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html). Trong bà i viết nà y chúng ta chá» dùng batch size bằng 1, tất cả những gì chúng ta phải là m là chuyá»n tất cả các từ trong má»t cặp câu thà nh chá» sá» tương ứng cá»§a nó trong từ Äiá»n vÃ ÄÆ°a và o mÃŽ hình huấn luyá»n.
Tuy nhiên, nếu muá»n quá trình huấn luyá»n nhanh hÆ¡n và táºn dụng ÄÆ°á»£c khả nÄng tÃnh toán song song cá»§a GPU chúng ta nên huấn luyá»n theo mini-batches.
Sá» dụng mini-batches thì cần phải chú Ü rằng các câu trong má»t batch có thá» sẜ có Äá» dà i khÃŽng giá»ng nhau. Vì váºy chúng ta nên Äặt sá» chiá»u cá»§a các tensor batch cá» Äá»nh là (max_length, batch_size). Các câu có Äá» dà i nhá» hÆ¡n max_length sẜ ÄÆ°á»£c thêm zero padding phÃa sau kà tá»± EOS_token (kà tá»± kết thúc câu).
Má»t vấn Äá» khác Äặt ra là nếu chúng ta chuyá»n tất cả các từ cá»§a má»t cặp câu và o má»t batch tensor, lúc nà y tensor cá»§a chúng ta sẜ có kÃch thưá»c là (max_length, batch_size). Tuy nhiên cái chúng ta cần là má»t tensor vá»i kÃch thưá»c (batch_size, max_length) và lúc Äó cần phải hiá»n thá»±c thêm má»t phưá»ng thức Äá» chuyá»n vá» ma tráºn. Thay vì rưá»m ra như váºy, chúng ta sẜ thá»±c hiá»n viá»c chuyá»n vá» Äó ngay từ trong hà m `zeroPadding`.

```
def indexesFromSentence(voc, sentence):
return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token]
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
# Returns padded input sequene tensor and lengths
def inputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tensor, padding mask, and max target length
def outputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.ByteTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
def batch2TrainData(voc, pair_batch):
pair_batch.sort(key=lambda x: len(x[0].split(' ')), reverse=True)
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp, lengths = inputVar(input_batch, voc)
output, mask, max_target_len = outputVar(output_batch, voc)
return inp, lengths, output, mask, max_target_len
# Example for validation
small_batch_size = 5
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print('input_variable:', input_variable)
print('lengths:', lengths)
print('target_variable:', target_variable)
print('mask:', mask)
print('max_target_len:', max_target_len)
```
##Äá»nh nghÄ©a mÃŽ hình
###MÎ hình Seq2Seq
Bá» não chatbot cá»§a chúng ta là má»t mÃŽ hình sequence-to-sequence (seq2seq). Mục tiêu cá»§a mÃŽ hình seq2seq là nháºn má»t chuá»i Äầu và o và dá»± Äoán chuá»i Äầu ra dá»±a trên mÃŽ mÃŽ hình cá» Äá»nh.
[Sutskever và các cá»ng sá»±](https://arxiv.org/abs/1409.3215) Äã Äá» xuất má»t phương pháp dá»±a trên hai mÃŽ hình mạng nÆ¡-ron há»i quy (RNN) có thá» giải quyết ÄÆ°á»£c bà i toán nà y. Má»t RNN hoạt Äá»ng như má»t encoder (bá» mã hóa), encoder có nhiá»m vụ mã hóa chuá»i Äầu và o thà nh má»t context vector (vector ngữ cảnh). Trên lÜ thuyết, context vector (layer cuá»i cùng cá»§a RNN) sẜ chứa các thÃŽng tin ngữ nghÄ©a cá»§a chuá»i Äầu và o. RNN thứ hai là decoder (bá» giải mã), nó dùng context vector cá»§a encoder Äá» dá»± Äoán chuá»i Äầu ra tương ứng.

*Nguá»n ảnh: https://jeddy92.github.io/JEddy92.github.io/ts_seq2seq_intro/*
###Encoder
Bá» mã hóa sá» dụng mạng nÆ¡-ron há»i quy (encoder RNN) duyá»t qua từng token cá»§a chuá»i Äầu và o, tại má»i thá»i Äiá»m xuất ra má»t "output" vector và má»t "hidden state" vector. Hidden state vector sau Äó sẜ ÄÆ°á»£c dùng Äá» tÃnh hidden state vector tại thá»i Äiá»m tiếp theo như trong Ü tưá»ng cÆ¡ bản cá»§a RNN. Mạng encoder sẜ cá» gắn g chuyá»n Äá»i những cái gì nó nhìn thấy trong chuá»i Äầu và o bao gá»m cả ngữ cảnh và ngữ nghÄ©a thà nh má»t táºp hợp các Äiá»m trong má»t khÃŽng gian nhiá»u chiá»u, nÆ¡i decoder nhìn và o Äá» giải mã chuá»i Äầu ra có Ü nghÄ©a.
Trái tim cá»§a encoder là multi-layered Gate Recurrent Unit, ÄÆ°á»£c Äá» xuất bá»i [Cho và các cá»ng sư](https://arxiv.org/pdf/1406.1078v3.pdf) và o nÄm 2014. Chúng ta sẜ dùng dạng hai chiá»u cá»§a GRU, Äá»ng nghÄ©a vá»i viá»c có 2 mạng RNN Äá»c láºp: má»t Äá»c chuá»i Äầu và o theo má»t thứ tá»± từ trái sáng phải, má»t từ phải sang trái.

*Nguá»n ảnh: https://colah.github.io/posts/2015-09-NN-Types-FP/*
Chú Ü rằng `embedding` layer ÄÆ°á»£c dùng Äá» mã hóa từng từ trong câu vÄn Äầu và o thà nh má»t vector trong khÃŽng gian ngữ nghÄ©a cá»§a nó.
Cuá»i cùng, nếu ÄÆ°a má»t batch dữ liá»u và o RNN, chúng ta cần phải "unpack" zeros padding xung quanh cá»§a từng chuá»i.
####Các bưá»c tÃnh toán
1. Chuyá»n word index thà nh embedding vector.
2. Äóng gói các câu thà nh má»t các batch.
3. ÄÆ°a từng batch qua GRU Äá» tÃnh toán.
4. Unpack padding.
5. Cá»ng tất cả các output cá»§a GRU hai chiá»u.
6. Trả vá» kết quả và hidden state cuá»i cùng.
####Input:
- `input_seq`: batch of input sentences, kÃch thưá»c (max_length, batch_size)
- `input_lengths`: Danh sách chứa Äá» dà i câu tương ứng vá»i từng câu trong batch, kÃch thưá»c (batch_size)
- `hidden`: hidden state, kÃch thưá»c (n_layers * num_directions, batch_size, hidden_size)
####Output:
- `output`: Layer cá»§a cuá»i cùng cá»§a GRU, kÃch thưá»c (max_length, batch_size, hidden_size)
- `hidden`: cáºp nháºt hidden state từ GRU, kÃch thưá»c (n_layers * num_directions, batch_size, hidden_size)
```
class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
# Initialize GRU; the input_size and hidden_size params are both set to
# 'hidden_size' because our input size is a word embedding with number
# of features == hidden_size
self.gru = nn.GRU(hidden_size, hidden_size, n_layers,
dropout=(0 if n_layers == 1 else dropout), bidirectional=True)
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embedding vector
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
output = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
# Return output and final hidden state
return outputs, hidden
```
###Decoder
Bá» giải mã RNN sẜ sinh ra chuá»i Äầu ra theo từng token. Nó sá» dụng context vector cá»§a encoder và hidden state Äá» sinh từ tiếp theo trong chuá»i Äầu ra cho Äến khi gặp phải EOS_token (kà hiá»u kết thúc câu). Má»t vấn Äá» vá»i bà i toán seq2seq truyá»n thá»ng Äó là nếu chá» dùng context vector và hidden state thì sẜ bá» mất mát thÃŽng tin, Äặc biá»t là vá»i những câu dà i.
Äá» Äá»i phó vá»i Äiá»u Äó, [Bahdanau](https://arxiv.org/abs/1409.0473) Äã Äá» xuất má»t phương pháp gá»i là cÆ¡ chế attention. CÆ¡ chế nà y cho phép decoder Äặt sá»± chú Ü lên má»t và i Äiá»m nhất Äá»nh trong câu thay vì nhìn các từ vá»i mức Äá» quan trá»ng y như nhau.
Attention ÄÆ°á»£c tÃnh toán dá»±a và o hidden state hiá»n tại cá»§a decoder và kết quả cá»§a encoder. Bá» trá»ng sá» cá»§a attention có cùng kÃch thưá»c vá»i chuá»i Äầu và o.

[Luong](https://arxiv.org/abs/1508.04025) attention là má»t phiên bản cải tiến vá»i Ü tưá»ng "Global attention". Sá»± khác biá»t là vá»i "Global attention" chúng ta sẜ nhìn tất cả các hidden state cá»§a encoder, thay vì chá» nhìn hidden state cuá»i cùng cá»§a encoder như cá»§a Bahdanau. Má»t khác biá»t nữa là "global attention" tÃnh dá»±a trên duy nhất hidden state hiá»n tại cá»§a decoder chứ khÃŽng như phiên bản cá»§a Bahdanau cần phải tÃnh qua hidden state tại các bưá»c trưá»c Äó.

Trong Äó: $h_{t}$ là hidden state hiá»n tại cá»§a decoder và $h_{s}$ là toà n bá» hidden state cá»§a encoder.
Nhìn chung, global attention có thá» tá»ng hợp như hình bên dưá»i.

```
# Luong attention layer
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method, 'is not an appropriate attention method.')
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_ouput, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_outputs):
energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1),
encoder_ouputs), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs):
# Calculate the attention weights (energies) based on the given method
if self.method == 'general':
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == 'concat':
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == 'dot':
attn_energies = self.dot_score(hidden, encoder_outputs)
# Transpose max_length and batch_size dimensions
attn_energies = attn_energies.t()
# Return the softmax normalized probability scores (with added dimension)
return F.softmax(attn_energies, dim=1).unsqueeze(1)
```
####Các bưá»c tÃnh toán
1. Lấy embedding vector cá»§a từ hiá»n tại
2. ÄÆ°a dữ liá»u qua GRU hai chiá»u Äá» tÃnh toán
3. TÃnh trá»ng sá» attention từ output cá»§a GRU
4. Nhân trá»ng sá» cá»§a attention cá»§a encoder output Äá» có ÄÆ°á»£c trá»ng sá» má»i cá»§a context vector.
5. Ná»i (concat) context vector và GRU hidden state như trong cÃŽng thức cá»§a Luong attention.
6. Dá»± Äoán từ tiếp theo dá»±a trên Luong attention
7. Trả vá» kết quả và hidden state cuá»i cùng
####Inputs:
- `input_step`: Má»t step là má»t ÄÆ¡n vá» thá»i gian, kÃch thưá»c (1, batch_size)
- `last_hidden`: hidden layer cuá»i cá»§a GRU, kÃch thưá»c (n_layers * num_directión, batch_size, hidden_size)
- `encoder_outputs`: encoder output, kÃch thưá»c (max_length, batch_size, hidden_size)
####Outputs:
- `output`: softmax normalized tensor, kÃch thưá»c (batch_size, voc.num_words)
- `hidden`: hidden state cuá»i cá»§a GRU, kÃch thưá»c (n_layers * num_directions, batch_size, hidden_size)
```
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Note: we run this one step (word) at a time
# Get embedding of current input word
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden
```
##Huấn luyá»n
###Masked loss
Vì chúng ta Äang là m viá»c vá»i batch of padded sentences, cho nên khÃŽng thá» dá»
dà ng Äá» tÃnh loss cho tất cả các thà nh phần cá»§a tensor. Chúng ta Äá»nh nghÄ©a hà m `maskNLLLoss` Äá» tÃnh loss dá»±a trên output cá»§a decoder. Kết quả trả vá» là trung bình negative log likelihood cá»§a các thà nh phần trong tensor (má»i thà nh phần là má»t câu).
```
def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -troch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_selected(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
```
###Training
Hà m `train` hiá»n thá»±c thuáºt toán huấn luyá»n cho má»t lần lặp.
Chúng ta sẜ dùng má»t và i kỹ thuáºt Äá» quá trình training diá»
n ra tá»t hÆ¡n:
- **Teacher forcing**: Kỹ thuáºt nà y cho phép vá»i má»t xác suất ÄÆ°á»£c quy Äá»nh sẵn `teacher_forcing_ratio`, decoder sẜ dùng target word tại thá»i Äiá»m hiá»n tại Äá» dá»± Äoán từ tiếp theo thay vì dùng từ ÄÆ°á»£c dá»± Äoán bá»i decoder tại thá»i Äiá»m hiá»n tại.
- **Gradient clipping**: Äây là má»t kỹ thuáºt thưá»ng dùng Äá» Äá»i phá» vá»i "exploding gradient". Kỹ thuáºt nà y ÄÆ¡n giản là chặn giá trá» gradient á» má»t ngưỡng trên, khÃŽng Äá» nó trá» nên quá lá»n.

*Nguá»n ảnh: Goodfellow et al. Deep Learning. 2016. https://www.deeplearningbook.org/*
####Các bưá»c tÃnh toán
1. ÄÆ°a toà n bá» batch và o encoder Äê tÃnh toán.
2. Khá»i tạo input cho decoder bằng SOS_token và hidden state bằng vá»i hidden state cuá»i cùng cá»§a encoder.
3. ÄÆ°a chuá»i input qua decoder.
4. If teacher_forcing: gán input tại thá»i Äiá»m tiếp theo cá»§a decoder bằng nhãn Äúng cá»§a từ dá»± Äoán hiá»n tại, ngược lại gán bằng từ ÄÆ°á»£c decoder dá»± Äoán tại thá»i Äiá»m hiá»n tại.
5. TÃnh loss
6. Thá»±c hiá»n giải thuáºt lan truyá»n ngược.
7. Clip gradients.
8. Cáºp nháºt trá»ng sá» encoder và decoder.
```
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding,
encoder_optimizer, decoder_optimizer, batch_size, clip, max_length=MAX_LENGTH):
# Zero gradients
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Set device options
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
# Initialize variables
loss = 0
print_losses = []
n_totals = 0
# Forward pass through encoder
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
# Create initial decoder input (start with SOS tokens for each sentence)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Set initial decoder hidden state to the encoder's final hidden state
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Determine if we are using teacher forcing this iteration
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# Forward batch of sequences through decoder one time step at a time
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# Teacher forcing: next input is current target
decoder_input = target_variable[t].view(1, -1)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
# No teacher forcing: next input is decoder's own current output
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
# Perform backpropatation
loss.backward()
# Clip gradients: gradients are modified in place
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Adjust model weights
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses) / n_totals
```
|
github_jupyter
|
# Tema 4.1 <a class="tocSkip">
# Imports
```
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import graphviz
import sklearn.tree
import sklearn.neighbors
import sklearn.naive_bayes
import sklearn.svm
import sklearn.metrics
import sklearn.preprocessing
import sklearn.model_selection
```
# Data
https://www.drivendata.org/competitions/54/machine-learning-with-a-heart/page/109/
- Numeric
- slope\_of\_peak\_exercise\_st\_segment (int, semi-categorical, 1-3)
- resting\_blood\_pressure (int)
- chest\_pain\_type (int, semi-categorical, 1-4)
- num\_major\_vessels (int, semi-categorical, 0-3)
- resting\_ekg\_results (int, semi-categorical, 0-2)
- serum\_cholesterol\_mg\_per\_dl (int)
- oldpeak\_eq\_st\_depression (float)
- age (int)
- max\_heart\_rate\_achieved (int)
- Categorical
- thal
- normal
- fixed\_defect
- reversible\_defect
- fasting\_blood\_sugar\_gt\_120\_mg\_per\_dl (blood sugar > 120)
- 0
- 1
- sex
- 0 (f)
- 1 (m)
- exercise\_induced\_angina
- 0
- 1
```
features = pd.read_csv('train_values.csv')
labels = pd.read_csv('train_labels.csv')
features.head()
labels.head()
FEATURES = ['slope_of_peak_exercise_st_segment',
'thal',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'fasting_blood_sugar_gt_120_mg_per_dl',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'sex',
'age',
'max_heart_rate_achieved',
'exercise_induced_angina']
LABEL = 'heart_disease_present'
EXPLANATIONS = {'slope_of_peak_exercise_st_segment' : 'Quality of Blood Flow to the Heart',
'thal' : 'Thallium Stress Test Measuring Blood Flow to the Heart',
'resting_blood_pressure' : 'Resting Blood Pressure',
'chest_pain_type' : 'Chest Pain Type (1-4)',
'num_major_vessels' : 'Major Vessels (0-3) Colored by Flourosopy',
'fasting_blood_sugar_gt_120_mg_per_dl' : 'Fasting Blood Sugar > 120 mg/dl',
'resting_ekg_results' : 'Resting Electrocardiographic Results (0-2)',
'serum_cholesterol_mg_per_dl' : 'Serum Cholesterol in mg/dl',
'oldpeak_eq_st_depression' : 'Exercise vs. Rest\nA Measure of Abnormality in Electrocardiograms',
'age' : 'Age (years)',
'sex' : 'Sex (m/f)',
'max_heart_rate_achieved' : 'Maximum Heart Rate Achieved (bpm)',
'exercise_induced_angina' : 'Exercise-Induced Chest Pain (yes/no)'}
NUMERICAL_FEATURES = ['slope_of_peak_exercise_st_segment',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'age',
'max_heart_rate_achieved']
CATEGORICAL_FEATURES = ['thal',
'fasting_blood_sugar_gt_120_mg_per_dl',
'sex',
'exercise_induced_angina']
CATEGORICAL_FEATURE_VALUES = {'thal' : [[0, 1, 2], ['Normal',
'Fixed Defect',
'Reversible Defect']],
'fasting_blood_sugar_gt_120_mg_per_dl' : [[0, 1], ['No', 'Yes']],
'sex' : [[0, 1], ['F', 'M']],
'exercise_induced_angina' : [[0, 1], ['No', 'Yes']]}
SEMI_CATEGORICAL_FEATURES = ['slope_of_peak_exercise_st_segment',
'chest_pain_type',
'num_major_vessels',
'resting_ekg_results']
SEMI_CATEGORICAL_FEATURE_LIMITS = {'slope_of_peak_exercise_st_segment' : [1, 3],
'chest_pain_type' : [1, 4],
'num_major_vessels' : [0, 3],
'resting_ekg_results' : [0, 2]}
LABEL_VALUES = [[0, 1], ['No', 'Yes']]
for feature in CATEGORICAL_FEATURES:
if len(CATEGORICAL_FEATURE_VALUES[feature][0]) > 2:
onehot_feature = pd.get_dummies(features[feature])
feature_index = features.columns.get_loc(feature)
features.drop(feature, axis=1, inplace=True)
onehot_feature.columns = [f'{feature}={feature_value}' for feature_value in onehot_feature.columns]
for colname in onehot_feature.columns[::-1]:
features.insert(feature_index, colname, onehot_feature[colname])
features.head()
x = features.values[:,1:].astype(int)
y = labels.values[:,-1].astype(int)
print('x =\n', x)
print('y =\n', y)
stratified_kflod_validator = sklearn.model_selection.StratifiedKFold(n_splits=5, shuffle=True)
stratified_kflod_validator
```
# Decision Trees
```
tree_mean_acc = 0
tree_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall'])
for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
dec_tree = sklearn.tree.DecisionTreeClassifier(min_samples_split = 5)
dec_tree.fit(x_train, y_train)
acc = dec_tree.score(x_test, y_test)
tree_mean_acc += acc
y_pred = dec_tree.predict(x_test)
precision = sklearn.metrics.precision_score(y_test, y_pred)
recall = sklearn.metrics.recall_score(y_test, y_pred)
tree_score_df.loc[fold_ind] = [f'{fold_ind}',
f'{acc*100:.2f} %',
f'{precision*100:.2f} %',
f'{recall*100:.2f} %']
tree_plot_data = sklearn.tree.export_graphviz(dec_tree, out_file = None,
feature_names = features.columns[1:],
class_names = [f'{labels.columns[1]}={label_value}'
for label_value
in LABEL_VALUES[1]],
filled = True,
rounded = True,
special_characters = True)
graph = graphviz.Source(tree_plot_data)
graph.render(f'Fold {fold_ind}')
next_ind = len(tree_score_df) + 1
mean_acc = tree_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean()
mean_prec = tree_score_df['Precision'].apply(lambda n: float(n[:-2])).mean()
mean_rec = tree_score_df['Recall'].apply(lambda n: float(n[:-2])).mean()
tree_score_df.loc[next_ind] = ['Avg', f'{mean_acc:.2f} %', f'{mean_prec:.2f} %', f'{mean_rec:.2f} %']
tree_score_df
```
# KNN
```
# TODO Normalize
knn_mean_score_df = pd.DataFrame(columns = ['k', 'Avg. Accuracy', 'Avg. Precision', 'Avg. Recall'])
normalized_x = sklearn.preprocessing.normalize(x) # No improvement over un-normalized data.
mean_accs = []
for k in list(range(1, 10)) + [math.ceil(len(features) * step) for step in [0.1, 0.2, 0.3, 0.4, 0.5]]:
knn_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall'])
mean_acc = 0
for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1):
x_train, x_test = normalized_x[train_indices], normalized_x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors = k)
knn.fit(x_train, y_train)
acc = knn.score(x_test, y_test)
mean_acc += acc
y_pred = knn.predict(x_test)
precision = sklearn.metrics.precision_score(y_test, y_pred)
recall = sklearn.metrics.recall_score(y_test, y_pred)
knn_score_df.loc[fold_ind] = [f'{fold_ind}',
f'{acc*100:.2f} %',
f'{precision*100:.2f} %',
f'{recall*100:.2f} %']
next_ind = len(knn_score_df) + 1
mean_acc = knn_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean()
mean_prec = knn_score_df['Precision'].apply(lambda n: float(n[:-2])).mean()
mean_rec = knn_score_df['Recall'].apply(lambda n: float(n[:-2])).mean()
knn_score_df.loc[next_ind] = ['Avg',
f'{acc*100:.2f} %',
f'{precision*100:.2f} %',
f'{recall*100:.2f} %']
knn_mean_score_df.loc[k] = [k,
f'{mean_acc:.2f} %',
f'{mean_prec:.2f} %',
f'{mean_rec:.2f} %']
# print(f'k = {k}')
# print(knn_score_df)
# print()
best_accuracy = knn_mean_score_df.sort_values(by = ['Avg. Accuracy']).iloc[-1]
print('Best avg. accuracy is', best_accuracy['Avg. Accuracy'], 'for k =', best_accuracy['k'], '.')
knn_mean_score_df.sort_values(by = ['Avg. Accuracy'])
```
# Naive Bayes
```
nb_classifier_types = [sklearn.naive_bayes.GaussianNB,
sklearn.naive_bayes.MultinomialNB,
sklearn.naive_bayes.ComplementNB,
sklearn.naive_bayes.BernoulliNB]
nb_mean_score_df = pd.DataFrame(columns = ['Type', 'Avg. Accuracy', 'Avg. Precision', 'Avg. Recall'])
for nb_classifier_type in nb_classifier_types:
nb_score_df = pd.DataFrame(columns = ['Fold', 'Accuracy', 'Precision', 'Recall'])
mean_acc = 0
for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
nb = nb_classifier_type()
nb.fit(x_train, y_train)
acc = nb.score(x_test, y_test)
mean_acc += acc
y_pred = nb.predict(x_test)
precision = sklearn.metrics.precision_score(y_test, y_pred)
recall = sklearn.metrics.recall_score(y_test, y_pred)
nb_score_df.loc[fold_ind] = [f'{fold_ind}',
f'{acc*100:.2f} %',
f'{precision*100:.2f} %',
f'{recall*100:.2f} %']
next_ind = len(nb_score_df) + 1
mean_acc = nb_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean()
mean_prec = nb_score_df['Precision'].apply(lambda n: float(n[:-2])).mean()
mean_rec = nb_score_df['Recall'].apply(lambda n: float(n[:-2])).mean()
nb_score_df.loc[next_ind] = ['Avg',
f'{mean_acc:.2f} %',
f'{mean_prec:.2f} %',
f'{mean_rec:.2f} %']
nb_mean_score_df.loc[len(nb_mean_score_df) + 1] = [nb_classifier_type.__name__,
f'{mean_acc:.2f} %',
f'{mean_prec:.2f} %',
f'{mean_rec:.2f} %']
print(nb_classifier_type.__name__)
print()
print(nb_score_df)
print()
nb_mean_score_df.sort_values(by = ['Avg. Accuracy'])
```
# SVM
```
svm_classifier_type = sklearn.svm.SVC
# Avg.
# Args -> acc / prec / rec
#
# kernel: linear -> 78.89 % 78.31 % 73.75 %
# kernel: linear, C: 0.1 -> 84.44 % 88.54 % 75.00 %
#
# * No improvement for larger C.
#
# kernel: poly, max_iter: 1 -> 46.67 % 34.67 % 21.25 %
# kernel: poly, max_iter: 10 -> 57.22 % 51.27 % 66.25 %
# kernel: poly, max_iter: 100 -> 61.67 % 60.18 % 40.00 %
# kernel: poly, max_iter: 100, coef0: 1 -> 62.22 % 62.19 % 41.25 %
#
# * No improvement for more iters.
# * No improvement for larger C.
# * No improvement for higher degree.
# * No improvement for different coef0.
#
# kernel: rbf, max_iter: 10 -> 48.89 % 46.07 % 72.50 %
# kernel: rbf, max_iter: 100 -> 60.00 % 74.00 % 17.50 %
# kernel: rbf, max_iter: 1000 -> 60.56 % 78.33 % 15.00 %
args = {'kernel': 'linear', 'C': 0.1}
svm_score_df = pd.DataFrame(columns = ['Type', 'Accuracy', 'Precision', 'Recall'])
# normalized_x = sklearn.preprocessing.normalize(x)
mean_acc = 0
for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
svm = svm_classifier_type(**args, gamma = 'scale', cache_size = 256)
svm.fit(x_train, y_train)
acc = svm.score(x_test, y_test)
mean_acc += acc
y_pred = svm.predict(x_test)
precision = sklearn.metrics.precision_score(y_test, y_pred)
recall = sklearn.metrics.recall_score(y_test, y_pred)
svm_score_df.loc[fold_ind] = [f'{fold_ind}',
f'{acc*100:.2f} %',
f'{precision*100:.2f} %',
f'{recall*100:.2f} %']
next_ind = len(svm_score_df) + 1
mean_acc = svm_score_df['Accuracy'].apply(lambda n: float(n[:-2])).mean()
mean_prec = svm_score_df['Precision'].apply(lambda n: float(n[:-2])).mean()
mean_rec = svm_score_df['Recall'].apply(lambda n: float(n[:-2])).mean()
svm_score_df.loc[next_ind] = ['Avg',
f'{mean_acc:.2f} %',
f'{mean_prec:.2f} %',
f'{mean_rec:.2f} %']
print(svm_score_df)
```
# Shallow Neural Nets
## Import deps
```
import pandas as pd
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization, LeakyReLU
```
## Import data
```
features = pd.read_csv('train_values.csv')
labels = pd.read_csv('train_labels.csv')
print(labels.head())
features.head()
FEATURES = ['slope_of_peak_exercise_st_segment',
'thal',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'fasting_blood_sugar_gt_120_mg_per_dl',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'sex',
'age',
'max_heart_rate_achieved',
'exercise_induced_angina']
LABEL = 'heart_disease_present'
EXPLANATIONS = {'slope_of_peak_exercise_st_segment' : 'Quality of Blood Flow to the Heart',
'thal' : 'Thallium Stress Test Measuring Blood Flow to the Heart',
'resting_blood_pressure' : 'Resting Blood Pressure',
'chest_pain_type' : 'Chest Pain Type (1-4)',
'num_major_vessels' : 'Major Vessels (0-3) Colored by Flourosopy',
'fasting_blood_sugar_gt_120_mg_per_dl' : 'Fasting Blood Sugar > 120 mg/dl',
'resting_ekg_results' : 'Resting Electrocardiographic Results (0-2)',
'serum_cholesterol_mg_per_dl' : 'Serum Cholesterol in mg/dl',
'oldpeak_eq_st_depression' : 'Exercise vs. Rest\nA Measure of Abnormality in Electrocardiograms',
'age' : 'Age (years)',
'sex' : 'Sex (m/f)',
'max_heart_rate_achieved' : 'Maximum Heart Rate Achieved (bpm)',
'exercise_induced_angina' : 'Exercise-Induced Chest Pain (yes/no)'}
NUMERICAL_FEATURES = ['slope_of_peak_exercise_st_segment',
'resting_blood_pressure',
'chest_pain_type',
'num_major_vessels',
'resting_ekg_results',
'serum_cholesterol_mg_per_dl',
'oldpeak_eq_st_depression',
'age',
'max_heart_rate_achieved']
CATEGORICAL_FEATURES = ['thal',
'fasting_blood_sugar_gt_120_mg_per_dl',
'sex',
'exercise_induced_angina']
CATEGORICAL_FEATURE_VALUES = {'thal' : [[0, 1, 2], ['Normal',
'Fixed Defect',
'Reversible Defect']],
'fasting_blood_sugar_gt_120_mg_per_dl' : [[0, 1], ['No', 'Yes']],
'sex' : [[0, 1], ['F', 'M']],
'exercise_induced_angina' : [[0, 1], ['No', 'Yes']]}
SEMI_CATEGORICAL_FEATURES = ['slope_of_peak_exercise_st_segment',
'chest_pain_type',
'num_major_vessels',
'resting_ekg_results']
SEMI_CATEGORICAL_FEATURE_LIMITS = {'slope_of_peak_exercise_st_segment' : [1, 3],
'chest_pain_type' : [1, 4],
'num_major_vessels' : [0, 3],
'resting_ekg_results' : [0, 2]}
LABEL_VALUES = [[0, 1], ['No', 'Yes']]
for feature in CATEGORICAL_FEATURES:
if len(CATEGORICAL_FEATURE_VALUES[feature][0]) > 2:
onehot_feature = pd.get_dummies(features[feature])
feature_index = features.columns.get_loc(feature)
features.drop(feature, axis=1, inplace=True)
onehot_feature.columns = ['%s=%s' % (feature, feature_value) for feature_value in onehot_feature.columns]
for colname in onehot_feature.columns[::-1]:
features.insert(feature_index, colname, onehot_feature[colname])
x = features.values[:,1:].astype(int)
y = labels.values[:,-1].astype(int)
print('x =\n', x)
print('y =\n', y)
# for fold_ind, (train_indices, test_indices) in enumerate(stratified_kflod_validator.split(x, y), 1):
# x_train, x_test = x[train_indices], x[test_indices]
# y_train, y_test = y[train_indices], y[test_indices]
x_train, x_test, y_train, y_test = \
train_test_split(x, y, test_size=0.2, random_state=42)
print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)
```
## Define model
```
input_shape = (1,15)
num_classes = 2
print(x.shape)
print(y.shape)
print(x[:1])
print(y[:1])
```
### Architecture 0 - Inflating Dense 120-225, 0.5 Dropout, Batch Norm, Sigmoid Classification
```
arch_cnt = 'arch-0-3'
model = Sequential()
model.add(
Dense(120, input_dim=15, kernel_initializer='normal',
# kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(225, input_dim=15, kernel_initializer='normal', activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=5, min_lr=0.001,
verbose=1)
# es_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# min_delta=0.1,
# patience=7,
# verbose=1,
# mode='auto'
# )
# 'restore_best_weights' in dir(keras.callbacks.EarlyStopping()) # FALSE = library is not up-to-date
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, es_cb, tb_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 1 - **`Deflating Dense 225-112`**, 0.5 Dropout, Batch Norm, Sigmoid Classification
```
arch_cnt = 'arch-1'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='normal',
# kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(112, input_dim=15, kernel_initializer='normal', activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 2 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, **`HE Initialization`**
```
arch_cnt = 'arch-2'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='he_uniform',
kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(112, input_dim=15, kernel_initializer='he_uniform', activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, **`L2 = 1e^-4`**
```
arch_cnt = 'arch-3-4'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.0001), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(
Dense(112, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.0001), # pierd 0.2 acc
activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, **`L2 = 1e^-3`**
```
arch_cnt = 'arch-3-3'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(
Dense(112, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.001), # pierd 0.2 acc
activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, **`L2 = 1e^-2`**
```
arch_cnt = 'arch-3-2'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.01), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(
Dense(112, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.01), # pierd 0.2 acc
activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
### Architecture 3 - Deflating Dense 225-112, 0.5 Dropout, Batch Norm, Sigmoid Classification, **`L2 = 1e^-1`**
```
arch_cnt = 'arch-3-1'
model = Sequential()
model.add(
Dense(225, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.1), # pierd 0.2 acc
activation='relu'))
model.add(Dropout(0.5))
model.add(
Dense(112, input_dim=15, kernel_initializer='normal',
kernel_regularizer=keras.regularizers.l2(0.1), # pierd 0.2 acc
activation='relu'))
# model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(axis = 1))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
%%time
# earlystop_cb = keras.callbacks.EarlyStopping(
# monitor='val_loss',
# patience=5, restore_best_weights=True,
# verbose=1)
reduce_lr_cb = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.05,
patience=7, min_lr=0.001,
verbose=1)
tb_cb = keras.callbacks.TensorBoard(log_dir='./tensorboard/%s' % arch_cnt, histogram_freq=0,
write_graph=True, write_images=True)
epochs = 50
batch_size = 32
model.fit(
x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=False,
validation_data=(x_test, y_test),
callbacks=[reduce_lr_cb, tb_cb]
# callbacks=[earlystop_cb, reduce_lr_cb]
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
---
# Ensemble Methods
```
import matplotlib.pyplot as plt
%matplotlib inline
```
## Bagging Strategies
### Random Forests
```
from sklearn.ensemble import RandomForestClassifier
# x_train, x_test, y_train, y_test
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
clf.fit(x_train, y_train)
print(clf.feature_importances_)
print(clf.predict(x_test))
# make predictions for test data
y_pred = clf.predict(x_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
```
### ExtraTrees
```
from sklearn.ensemble import ExtraTreesClassifier
# x_train, x_test, y_train, y_test
clf = ExtraTreesClassifier(n_estimators=100, max_depth=2, random_state=0)
clf.fit(x_train, y_train)
print(clf.feature_importances_)
print(clf.predict(x_test))
# make predictions for test data
y_pred = clf.predict(x_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
fig = plt.figure(figsize=(10,5))
plot_learning_curves(x_train, y_train, x_test, y_test, clf)
plt.show()
```
## Stacking Strategies
### SuperLearner
## Boosting Strategies
### xgboost
```
# import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
# x_train, x_test, y_train, y_test
model = XGBClassifier()
model.fit(x_train, y_train)
print(model)
# make predictions for test data
y_pred = model.predict(x_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
```
---
# Bibliography
+ https://medium.com/@datalesdatales/why-you-should-be-plotting-learning-curves-in-your-next-machine-learning-project-221bae60c53
+ https://slideplayer.com/slide/4684120/15/images/6/Outline+Bias%2FVariance+Tradeoff+Ensemble+methods+that+minimize+variance.jpg
+ https://slideplayer.com/slide/4684120/
+ plot confusion matrix
+ http://rasbt.github.io/mlxtend/user_guide/plotting/plot_learning_curves/
+ https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/
+
+ http://docs.h2o.ai/h2o-tutorials/latest-stable/tutorials/ensembles-stacking/index.html
---
|
github_jupyter
|
# Super Resolution with PaddleGAN and OpenVINO
This notebook demonstrates converting the RealSR (real-world super-resolution) model from [PaddlePaddle/PaddleGAN](https://github.com/PaddlePaddle/PaddleGAN) to OpenVINO's Intermediate Representation (IR) format, and shows inference results on both the PaddleGAN and IR models.
For more information about the various PaddleGAN superresolution models, see [PaddleGAN's documentation](https://github.com/PaddlePaddle/PaddleGAN/blob/develop/docs/en_US/tutorials/single_image_super_resolution.md). For more information about RealSR, see the [research paper](https://openaccess.thecvf.com/content_CVPRW_2020/papers/w31/Ji_Real-World_Super-Resolution_via_Kernel_Estimation_and_Noise_Injection_CVPRW_2020_paper.pdf) from CVPR 2020.
This notebook works best with small images (up to 800x600).
## Imports
```
import sys
import time
import warnings
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
import paddle
from IPython.display import HTML, FileLink, ProgressBar, clear_output, display
from IPython.display import Image as DisplayImage
from PIL import Image
from openvino.runtime import Core, PartialShape
from paddle.static import InputSpec
from ppgan.apps import RealSRPredictor
sys.path.append("../utils")
from notebook_utils import NotebookAlert
```
## Settings
```
# The filenames of the downloaded and converted models
MODEL_NAME = "paddlegan_sr"
MODEL_DIR = Path("model")
OUTPUT_DIR = Path("output")
OUTPUT_DIR.mkdir(exist_ok=True)
model_path = MODEL_DIR / MODEL_NAME
ir_path = model_path.with_suffix(".xml")
onnx_path = model_path.with_suffix(".onnx")
```
## Inference on PaddlePaddle Model
### Investigate PaddleGAN Model
The [PaddleGAN documentation](https://github.com/PaddlePaddle/PaddleGAN) explains to run the model with `sr.run()`. Let's see what that function does, and check other relevant functions that are called from that function. Adding `??` to the methods shows the docstring and source code.
```
# Running this cell will download the model weights if they have not been downloaded before
# This may take a while
sr = RealSRPredictor()
sr.run??
sr.run_image??
sr.norm??
sr.denorm??
```
The run checks whether the input is an image or a video. For an image, it loads the image as an RGB image, normalizes it, and converts it to a Paddle tensor. It is propagated to the network by calling `self.model()` and then "denormalized". The normalization function simply divides all image values by 255. This converts an image with integer values in the range of 0 to 255 to an image with floating point values in the range of 0 to 1. The denormalization function transforms the output from network shape (C,H,W) to image shape (H,W,C). It then clips the image values between 0 and 255, and converts the image to a standard RGB image with integer values in the range of 0 to 255.
To get more information about the model, we can check what it looks like with `sr.model??`.
```
# sr.model??
```
### Do Inference
To show inference on the PaddlePaddle model, set PADDLEGAN_INFERENCE to True in the cell below. Performing inference may take some time.
```
# Set PADDLEGAN_INFERENCE to True to show inference on the PaddlePaddle model.
# This may take a long time, especially for larger images.
#
PADDLEGAN_INFERENCE = False
if PADDLEGAN_INFERENCE:
# load the input image and convert to tensor with input shape
IMAGE_PATH = Path("data/coco_tulips.jpg")
image = cv2.cvtColor(cv2.imread(str(IMAGE_PATH)), cv2.COLOR_BGR2RGB)
input_image = image.transpose(2, 0, 1)[None, :, :, :] / 255
input_tensor = paddle.to_tensor(input_image.astype(np.float32))
if max(image.shape) > 400:
NotebookAlert(
f"This image has shape {image.shape}. Doing inference will be slow "
"and the notebook may stop responding. Set PADDLEGAN_INFERENCE to False "
"to skip doing inference on the PaddlePaddle model.",
"warning",
)
if PADDLEGAN_INFERENCE:
# Do inference, and measure how long it takes
print(f"Start superresolution inference for {IMAGE_PATH.name} with shape {image.shape}...")
start_time = time.perf_counter()
sr.model.eval()
with paddle.no_grad():
result = sr.model(input_tensor)
end_time = time.perf_counter()
duration = end_time - start_time
result_image = (
(result.numpy().squeeze() * 255).clip(0, 255).astype("uint8").transpose((1, 2, 0))
)
print(f"Superresolution image shape: {result_image.shape}")
print(f"Inference duration: {duration:.2f} seconds")
plt.imshow(result_image);
```
## Convert PaddleGAN Model to ONNX and OpenVINO IR
To convert the PaddlePaddle model to OpenVINO IR, we first convert the model to ONNX, and then convert the ONNX model to the IR format.
### Convert PaddlePaddle Model to ONNX
```
# Ignore PaddlePaddle warnings:
# The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1)
warnings.filterwarnings("ignore")
sr.model.eval()
# ONNX export requires an input shape in this format as parameter
x_spec = InputSpec([None, 3, 299, 299], "float32", "x")
paddle.onnx.export(sr.model, str(model_path), input_spec=[x_spec], opset_version=13)
```
### Convert ONNX Model to OpenVINO IR
```
## Uncomment the command below to show Model Optimizer help, which shows the possible arguments for Model Optimizer
# ! mo --help
if not ir_path.exists():
print("Exporting ONNX model to IR... This may take a few minutes.")
! mo --input_model $onnx_path --input_shape "[1,3,299,299]" --model_name $MODEL_NAME --output_dir "$MODEL_DIR" --data_type "FP16" --log_level "CRITICAL"
```
## Do Inference on IR Model
```
# Read network and get input and output names
ie = Core()
model = ie.read_model(model=ir_path)
input_layer = next(iter(model.inputs))
# Load and show image
IMAGE_PATH = Path("data/coco_tulips.jpg")
image = cv2.cvtColor(cv2.imread(str(IMAGE_PATH)), cv2.COLOR_BGR2RGB)
if max(image.shape) > 800:
NotebookAlert(
f"This image has shape {image.shape}. The notebook works best with images with "
"a maximum side of 800x600. Larger images may work well, but inference may "
"be slow",
"warning",
)
plt.imshow(image)
# Reshape network to image size
model.reshape({input_layer.any_name: PartialShape([1, 3, image.shape[0], image.shape[1]])})
# Load network to the CPU device (this may take a few seconds)
compiled_model = ie.compile_model(model=model, device_name="CPU")
output_layer = next(iter(compiled_model.outputs))
# Convert image to network input shape and divide pixel values by 255
# See "Investigate PaddleGAN model" section
input_image = image.transpose(2, 0, 1)[None, :, :, :] / 255
start_time = time.perf_counter()
# Do inference
ir_result = compiled_model([input_image])[output_layer]
end_time = time.perf_counter()
duration = end_time - start_time
print(f"Inference duration: {duration:.2f} seconds")
# Get result array in CHW format
result_array = ir_result.squeeze()
# Convert array to image with same method as PaddleGAN:
# Multiply by 255, clip values between 0 and 255, convert to HWC INT8 image
# See "Investigate PaddleGAN model" section
image_super = (result_array * 255).clip(0, 255).astype("uint8").transpose((1, 2, 0))
# Resize image with bicubic upsampling for comparison
image_bicubic = cv2.resize(image, tuple(image_super.shape[:2][::-1]), interpolation=cv2.INTER_CUBIC)
plt.imshow(image_super)
```
### Show Animated GIF
To visualize the difference between the bicubic image and the superresolution image, we create an imated gif that switches between both versions.
```
result_pil = Image.fromarray(image_super)
bicubic_pil = Image.fromarray(image_bicubic)
gif_image_path = OUTPUT_DIR / Path(IMAGE_PATH.stem + "_comparison.gif")
final_image_path = OUTPUT_DIR / Path(IMAGE_PATH.stem + "_super.png")
result_pil.save(
fp=str(gif_image_path),
format="GIF",
append_images=[bicubic_pil],
save_all=True,
duration=1000,
loop=0,
)
result_pil.save(fp=str(final_image_path), format="png")
DisplayImage(open(gif_image_path, "rb").read(), width=1920 // 2)
```
### Create Comparison Video
Create a video with a "slider", showing the bicubic image to the right and the superresolution image on the left.
For the video, the superresolution and bicubic image are resized to half the original width and height, to improve processing speed. This gives an indication of the superresolution effect. The video is saved as an .avi video. You can click on the link to download the video, or open it directly from the images directory, and play it locally.
```
FOURCC = cv2.VideoWriter_fourcc(*"MJPG")
IMAGE_PATH = Path(IMAGE_PATH)
result_video_path = OUTPUT_DIR / Path(f"{IMAGE_PATH.stem}_comparison_paddlegan.avi")
video_target_height, video_target_width = (
image_super.shape[0] // 2,
image_super.shape[1] // 2,
)
out_video = cv2.VideoWriter(
str(result_video_path),
FOURCC,
90,
(video_target_width, video_target_height),
)
resized_result_image = cv2.resize(image_super, (video_target_width, video_target_height))[
:, :, (2, 1, 0)
]
resized_bicubic_image = cv2.resize(image_bicubic, (video_target_width, video_target_height))[
:, :, (2, 1, 0)
]
progress_bar = ProgressBar(total=video_target_width)
progress_bar.display()
for i in range(2, video_target_width):
# Create a frame where the left part (until i pixels width) contains the
# superresolution image, and the right part (from i pixels width) contains
# the bicubic image
comparison_frame = np.hstack(
(
resized_result_image[:, :i, :],
resized_bicubic_image[:, i:, :],
)
)
# create a small black border line between the superresolution
# and bicubic part of the image
comparison_frame[:, i - 1 : i + 1, :] = 0
out_video.write(comparison_frame)
progress_bar.progress = i
progress_bar.update()
out_video.release()
clear_output()
video_link = FileLink(result_video_path)
video_link.html_link_str = "<a href='%s' download>%s</a>"
display(HTML(f"The video has been saved to {video_link._repr_html_()}"))
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
import importlib
import vsms
import torch
import torch.nn as nn
import clip
from vsms import *
from vsms import BoxFeedbackQuery
class StringEncoder(object):
def __init__(self):
variant ="ViT-B/32"
device='cpu'
jit = False
self.device = device
model, preproc = clip.load(variant, device=device, jit=jit)
self.model = model
self.preproc = preproc
self.celoss = nn.CrossEntropyLoss(reduction='none')
def encode_string(self, string):
model = self.model.eval()
with torch.no_grad():
ttext = clip.tokenize([string])
text_features = model.encode_text(ttext.to(self.device))
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
return text_features.detach().cpu().numpy()
def get_text_features(self, actual_strings, target_string):
s2id = {}
sids = []
s2id[target_string] = 0
for s in actual_strings:
if s not in s2id:
s2id[s] = len(s2id)
sids.append(s2id[s])
strings = [target_string] + actual_strings
ustrings = list(s2id)
stringids = torch.tensor([s2id[s] for s in actual_strings], dtype=torch.long).to(self.device)
tstrings = clip.tokenize(ustrings)
text_features = self.model.encode_text(tstrings.to(self.device))
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
return text_features, stringids, ustrings
def forward(self, imagevecs, actual_strings, target_string):
## uniquify strings
text_features, stringids, ustrings = get_text_features(self, actual_strings, target_string)
image_features = torch.from_numpy(imagevecs).type(text_features.dtype)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
image_features = image_features.to(self.device)
scores = image_features @ text_features.t()
assert scores.shape[0] == stringids.shape[0]
return scores, stringids.to(self.device), ustrings
def forward2(self, imagevecs, actual_strings, target_string):
text_features, stringids, ustrings = get_text_features(self, actual_strings, target_string)
actual_vecs = text_features[stringids]
sought_vec = text_features[0].reshape(1,-1)
image_features = torch.from_numpy(imagevecs).type(text_features.dtype)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
image_features = image_features.to(self.device)
search_score = image_features @ sought_vec.reshape(-1)
confounder_score = (image_features * actual_vecs).sum(dim=1)
return search_score, confounder_score
import torch.optim
import torch.nn.functional as F
nn.HingeEmbeddingLoss()
import ray
ray.init('auto')
xclip = ModelService(ray.get_actor('clip'))
from vsms import *
benchparams = dict(
objectnet=dict(loader=objectnet_cropped, idxs=np.load('./data/object_random_idx.npy')[:10000])
)
def load_ds(evs, dsnames):
for k,v in tqdm(benchparams.items(), total=len(benchparams)):
if k in dsnames:
def closure():
ev0 = v['loader'](xclip)
idxs = v['idxs']
idxs = np.sort(idxs) if idxs is not None else None
ev = extract_subset(ev0, idxsample=idxs)
evs[k] = ev
closure()
evs = {}
load_ds(evs, 'objectnet')
ev = evs['objectnet']
vecs = ev.embedded_dataset
hdb = AugmentedDB(raw_dataset=ev.image_dataset, embedding=ev.embedding,
embedded_dataset=vecs, vector_meta=ev.fine_grained_meta)
def show_scores(se, vecs, actual_strings, target_string):
with torch.no_grad():
se.model.eval()
scs,stids,rawstrs = forward(se, vecs, actual_strings, target_string=target_string)
scdf = pd.DataFrame({st:col for st,col in zip(rawstrs,scs.cpu().numpy().transpose())})
display(scdf.style.highlight_max(axis=1))
def get_feedback(idxbatch):
strids = np.where(ev.query_ground_truth.iloc[idxbatch])[1]
strs = ev.query_ground_truth.columns[strids]
strs = [search_terms['objectnet'][fbstr] for fbstr in strs.values]
return strs
curr_firsts = pd.read_parquet('./data/cats_objectnet_ordered.parquet')
class Updater(object):
def __init__(self, se, lr, rounds=1, losstype='hinge'):
self.se = se
self.losstype=losstype
self.opt = torch.optim.AdamW([{'params': se.model.ln_final.parameters()},
{'params':se.model.text_projection},
# {'params':se.model.transformer.parameters(), 'lr':lr*.01}
], lr=lr, weight_decay=0.)
# self.opt = torch.optim.Adam@([{'params': se.model.parameters()}], lr=lr)
self.rounds = rounds
def update(self, imagevecs, actual_strings, target_string):
se = self.se
se.model.train()
losstype = self.losstype
opt = self.opt
margin = .3
def opt_closure():
opt.zero_grad()
if losstype=='ce':
scores, stringids, rawstrs = forward(se, imagevecs, actual_strings, target_string)
# breakpoint()
iidx = torch.arange(scores.shape[0]).long()
actuals = scores[iidx, stringids]
midx = scores.argmax(dim=1)
maxes = scores[iidx, midx]
elif losstype=='hinge':
#a,b = forward2(se, imagevecs, actual_strings, target_string)
scores, stringids, rawstrs = forward(se, imagevecs, actual_strings, target_string)
# breakpoint()
iidx = torch.arange(scores.shape[0]).long()
maxidx = scores.argmax(dim=1)
actual_score = scores[iidx, stringids].reshape(-1,1)
#max_score = scores[iidx, maxidx]
#target_score = scores[:,0]
losses1 = F.relu(- (actual_score - scores - margin))
#losses2 = F.relu(- (actual_score - target_score - margin))
#losses = torch.cat([losses1, losses2])
losses = losses1
else:
assert False
loss = losses.mean()
#print(loss.detach().cpu())
loss.backward()
for _ in range(self.rounds):
opt.step(opt_closure)
def closure(search_query, max_n, firsts, show_display=False, batch_size=10):
sq = search_terms['objectnet'][search_query]
se = StringEncoder()
up = Updater(se, lr=.0001, rounds=1)
bs = batch_size
bfq = BoxFeedbackQuery(hdb, batch_size=bs, auto_fill_df=None)
tvecs = []
dbidxs = []
accstrs = []
gts = []
while True:
tvec = se.encode_string(sq)
tvecs.append(tvec)
idxbatch, _ = bfq.query_stateful(mode='dot', vector=tvec, batch_size=bs)
dbidxs.append(idxbatch)
gtvals = ev.query_ground_truth[search_query][idxbatch].values
gts.append(gtvals)
if show_display:
display(hdb.raw.show_images(idxbatch))
display(gtvals)
#vecs = ev.embedded_dataset[idxbatch]
actual_strings = get_feedback(idxbatch)
accstrs.extend(actual_strings)
if show_display:
display(actual_strings)
if gtvals.sum() > 0 or len(accstrs) > max_n:
break
# vcs = ev.embedded_dataset[idxbatch]
# astrs = actual_strings
vcs = ev.embedded_dataset[np.concatenate(dbidxs)]
astrs = accstrs
if show_display:
show_scores(se, vcs, astrs, target_string=sq)
up.update(vcs, actual_strings=astrs, target_string=sq)
if show_display:
show_scores(se, vcs, astrs, target_string=sq)
frsts = np.where(np.concatenate(gts).reshape(-1))[0]
if frsts.shape[0] == 0:
firsts[search_query] = np.inf
else:
firsts[search_query] = frsts[0] + 1
cf = curr_firsts[curr_firsts.nfirst_x > batch_size]
x.category
firsts = {}
batch_size = 10
for x in tqdm(curr_firsts.itertuples()):
closure(x.category, max_n=30, firsts=firsts, show_display=True, batch_size=batch_size)
print(firsts[x.category], x.nfirst_x)
if x.nfirst_x <= batch_size:
break
firsts = {}
batch_size = 10
for x in tqdm(curr_firsts.itertuples()):
closure(x.category, max_n=3*x.nfirst_x, firsts=firsts, show_display=True, batch_size=batch_size)
print(firsts[x.category], x.nfirst_x)
if x.nfirst_x <= batch_size:
break
rdf = pd.concat([pd.Series(firsts).rename('feedback'), cf[['category', 'nfirst_x']].set_index('category')['nfirst_x'].rename('no_feedback')], axis=1)
((rdf.feedback < rdf.no_feedback).mean(),
(rdf.feedback == rdf.no_feedback).mean(),
(rdf.feedback > rdf.no_feedback).mean())
rdf
rdf.to_parquet('./data/objectnet_nfirst_verbal.parquet')
```
|
github_jupyter
|
```
import sys, os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas_profiling as pp
sys.path.insert(0, os.path.abspath('..'))
from script.functions import *
```
#### First, we import the data and display it after passing it through the function.
```
df = load_and_process('../../data/raw/adult.data')
df
#MANAGED THE IMPORT THANK GOD
```
#### Next, we describe the data to show the mean surveyed age (38) and the mean work hours of 40/week
```
df.describe()
```
#### Create a profile report for the dataset.
```
df.to_csv('../../data/processed/processed1.csv')
report = pp.ProfileReport(df).to_file('../../data/processed/report.html')
```
#### Let's check the relationship between Age and Education
```
ageEdu = df.loc[:,['Age', 'Education']]
ageEdu
```
### Create countplot for different education levels:
This is to see what education the majority of the people surveyed in this dataset had.
We can clearly see that most of the people only joined the workforce with a HS degree
while others mainly did some college courses or compeleted a full Bachelors
```
ageEdu.replace({'Some-college': 'Some\ncollege','Prof-school':'Prof-\nschool',
'Assoc-voc':'Assoc\n-voc','Assoc-acdm':'Assoc\n-acdm',
'Preschool':'Pre-\nschool'}, inplace = True)
#names didn't fit on graph so I had to change them.
sns.despine()
plt.figure(figsize = (20,10))
sns.set(style = 'white', font_scale = 1.5)
eduCountGraph = sns.countplot(x = 'Education', data = ageEdu, palette = 'viridis', order = ageEdu['Education'].value_counts().index)
eduCountGraph.get_figure().savefig('../../images/eduCount.jpg',dpi = 500)
#Replace all row values in Education with new format.
df.replace({'Some-college': 'Some\ncollege','Prof-school':'Prof-\nschool',
'Assoc-voc':'Assoc\n-voc','Assoc-acdm':'Assoc\n-acdm',
'Preschool':'Pre-\nschool'}, inplace = True)
df
```
## Let's check the relationship of Age vs Earnings:
#### First, we check the count of ages that have >50K earning/year
This shows that the most amount of people with above 50K yearly income are aged 37 - 47
which just shows that this is the age when adults become most settled with a good job
```
ageEarn = df.loc[:,['Age', 'Yearly Income']]
ageEarnAbove50k = ageEarn.loc[lambda x: x['Yearly Income'] == '>50K']
#We'll check both Ages that have above and below 50K income
sns.set(style = 'white', font_scale = 1.5,rc={'figure.figsize':(30,10)})
ageEarnGraph = sns.histplot(x = 'Age',data = ageEarnAbove50k,shrink = 0.9, bins = 6, kde = True)
ageEarnGraph.set(ylabel = 'Yearly Income\nAbove 50K Count')
ageEarnGraph.get_figure().savefig('../../images/ageEarnAbove50k.jpg', dpi = 500)
```
#### Next, we check the count of ages with <=50K earning/year
This shows that the most amount of people with below 50K yearly income are aged 19 - 36
which is understandable for young people
```
ageEarnBelow50k = ageEarn.loc[lambda x: x['Yearly Income'] == '<=50K']
ageEarnGraph = sns.histplot(x = 'Age', data = ageEarnBelow50k,bins = 6, shrink = 0.9, kde = True)
ageEarnGraph.set(ylabel = 'Yearly Income\nBelow 50K Count')
ageEarnGraph.get_figure().savefig('../../images/ageEarnBelow50k.jpg', dpi = 500)
```
## Let's make a density plot to see the trends in each graph and where they overlap
#### TL;DR Mo money mo money mo money - as we age..to the peak of 47.
This just shows the immense density of those aged around 40 in the above 50K
group. It also shows that at around age 25 most is where most people make under 50K
then they start to climb the above 50K ladder at the peak age of 40. From
this data it is evident that we make more money as we get older.
```
ageEarnDenisty = sns.kdeplot(x = 'Age',hue = 'Yearly Income', data = ageEarn,
alpha = 0.3, fill = True, common_norm = False)
```
# Time to look at research questions.
## RESEARCH QUESTION 1:
### How much of a role does education play into someone's yearly income?
I will conduct this analysis through a count plot of each education category to see which of them has the highest count of >50k/year earners and which have the lowest and the same with <=50k/year earners.
#### TL;DR Bachelor is all you need for a good paying job
**Start with >50K wages**
This data shows that most of the people in the Above50k dataset only have their
Bachelors degree with HS-grad and some college education trailing behind. Of course the data becomes skewed
as we can't directly compare against other educational paths since they are not
in equal numbers.
```
eduWageAbove50k = df.loc[lambda x: x['Yearly Income'] == '>50K']
#Let's make a countplot for that.
eduWageGraph = sns.countplot(x = 'Education', data = eduWageAbove50k,
palette = 'Spectral', order = eduWageAbove50k['Education'].value_counts().index)
eduWageGraph.set(title = 'Education VS Salary (Over 50K) Count', ylabel = 'Count')
```
**Now with Below50k dataset**
#### TL;DR HS grads who don't go to post secondary and finish a degree have lower paying jobs
This data shows that most of the people with jobs paying below 50k/year are the ones
with only a HS-grad education with people that have only done some college courses
as second place. Unless you complete a program at post-secondary or go into trades
after finishing school, you may make less than 50k/year
```
eduWageBelow50k = df.loc[lambda x: x['Yearly Income'] == '<=50K']
#Let's make a countplot for that.
sns.countplot(x = 'Education', data = eduWageBelow50k, order = eduWageBelow50k['Education'].value_counts().index, palette = 'Spectral')
```
**Since my data is all categorical and a violin, distplot, plot doesn't count occurences of categorical data I am limited to a certain amount of graphs.**
## RESEARCH QUESTION 2:
### Which industries of work pay the most amount of money on average?
To analyze this I will create a count plot of every job category to observe the amount of people earning above or below 50k/year
#### TL;DR Own a suit, managerial and executive have most top earners while trades/clerical industries have most low earners
We can see from this data that no one in armed forces makes above 50K/year
with the Exec/Managerial and Prof-specialty occupations making the majority of
the people with wages above 50K/year
```
#change row values to fit graph
df.replace({'Adm-clerical':'Adm-\nclerical','Exec-managerial':'Exec-\nmanagerial',
'Handlers-cleaners':'Handlers\n-cleaners','Tech-support':'Tech-\nsupport',
'Craft-repair':'Craft-\nrepair','Other-service':'Other-\nservice',
'Prof-specialty':'Prof-\nspecialty','Machine-op-inspct':'Machine\n-op-\ninspct','Farming-fishing':'Farming\n-fishing'}, inplace = True)
wageOc = df.loc[:,['Occupation', 'Yearly Income']]
wageOcAbove50k = wageOc.loc[lambda x:x['Yearly Income'] == '>50K']
wageOcGraph = sns.countplot(data = wageOcAbove50k, x = 'Occupation', palette = 'Spectral', order = wageOcAbove50k['Occupation'].value_counts().index)
wageOcGraph.set(title = 'Occupation VS Yearly Income', ylabel = 'Count of People with >50K\nEarnings per Occupation')
```
**Check jobs with below 50k/year earnings**
Now seeing the second half of the data we can observe that no one surveyed worked
in the armed forces. It also shows that the majority of people making below 50K a year
strike a 3 way tie between Adm-clerical, Other-services, and Craft-repair jobs.
Although Exec/managerial jobs make up most of the people who make >50K/year,
they also make up a decent chunk of the people who make less than 50K/year.
```
wageOcBelow50k = wageOc.loc[lambda x:x['Yearly Income'] == '<=50K']
wageOcGraph = sns.countplot(data = wageOcBelow50k, x = 'Occupation',palette = 'Spectral', order = wageOcBelow50k['Occupation'].value_counts().index)
wageOcGraph.set(title = 'Occupation VS Yearly Income', ylabel = 'Count of People with <=50K\nEarnings per Occupation')
```
## RESEARCH QUESTION 3:
### What is the most common occupation surveyed in this dataset?
I will conduct this analysis through a count plot of each occupation.
### Results:
In an interesting 3 way tie, we have prof-specialty, craft-repair, and exec-managerial occupations with the highest counts although not far behind are adm-clerical, sales, and other-services. It is super interesting to see that execute/managerial roles are so prevelant in this dataset as it can be thought by some as a difficult role to obtain. The occupations in this category are also the first place holder for most number of wages above 50k/year.
```
occ = df.loc[:,['Occupation']]
sns.countplot(x='Occupation', data = occ, order = occ['Occupation'].value_counts().index)
```
## RESEARCH QUESTION 4:
### What is the ratio of people earning >50k/year and <=50k/year by sex?
I will conduct this in two seperate graphs by first focusing on people who manke above 50k/year and in the second graph I will focus on those earning <=50k/year.
### Results:
The graphs show that this dataset shows a majority of men that were surveyed. In the first half of the data, men just about sextuple the women in earning above 50k/year. The ratio of high earners/low earners of each sex is about
6100/14000 = 44% of men are high earners where 1000/8000 = 12.5% of women are high earners.
```
earnSex = df.loc[:,['Sex', 'Yearly Income']]
earnSexAbove50k = earnSex.loc[lambda x: x['Yearly Income'] == '>50K']
plt.figure(figsize=(5,5))
graph = sns.countplot(data = earnSexAbove50k, x = 'Sex')
graph.set(ylabel = 'Number of People who Make\n >50K/year')
earnSexBelow50k = earnSex.loc[lambda x: x['Yearly Income'] == '<=50K']
plt.figure(figsize=(5,5))
graph = sns.countplot(data = earnSexBelow50k, x = 'Sex')
graph.set(ylabel = 'Number of People who Make\n <=50K/year')
#replace some values so they fit on graph
df.replace({'Married-civ-spouse':'Married\n-civ-\nspouse', 'Never-married':'Never-\nMarried',
'Married-spouse-absent':'Married\n-spouse\n-absent','Married-AF-spouse':'Married\n-AF-\nspouse'}, inplace = True)
df
```
## RESEARCH QUESTION 5:
### What is the relationship of yearly earnings and marital status?
I will conduct this through splitting the data into the top earners and low earners (>50K/year,<=50K/year) and comparing them to their marital status.
### Results
People who are married are most likely to make over 50k/year while people who have never married top the charts for below 50k/year.
```
earnMar = df.loc[:,['Yearly Income', 'Marital Status']]
earnMarAbove50k = earnMar.loc[lambda x: x['Yearly Income'] == '>50K']
plt.figure(figsize= (10,5))
graph = sns.countplot(data = earnMarAbove50k, x = 'Marital Status')
graph.set_ylabel('Number of Top Earners\nby Marital Status')
earnMarBelow50k = earnMar.loc[lambda x: x['Yearly Income'] == '<=50K']
plt.figure(figsize= (10,5))
graph = sns.countplot(data = earnMarBelow50k, x = 'Marital Status')
graph.set_ylabel('Number of Low Earners\nby Marital Status')
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/wesleybeckner/technology_fundamentals/blob/main/C4%20Machine%20Learning%20II/SOLUTIONS/SOLUTION_Tech_Fun_C4_S2_Computer_Vision_Part_2_(Defect_Detection_Case_Study).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Technology Fundamentals Course 4, Session 2: Computer Vision Part 2 (Defect Detection Case Study)
**Instructor**: Wesley Beckner
**Contact**: [email protected]
**Teaching Assitants**: Varsha Bang, Harsha Vardhan
**Contact**: [email protected], [email protected]
<br>
---
<br>
In this session we will continue with our exploration of CNNs. In the previous session we discussed three flagship layers for the CNN: convolution ReLU and maximum pooling. Here we'll discuss the sliding window, how to build your custom CNN, and data augmentation for images.
<br>
_images in this notebook borrowed from [Ryan Holbrook](https://mathformachines.com/)_
---
<br>
<a name='top'></a>
# Contents
* 4.0 [Preparing Environment and Importing Data](#x.0)
* 4.0.1 [Enabling and Testing the GPU](#x.0.1)
* 4.0.2 [Observe TensorFlow on GPU vs CPU](#x.0.2)
* 4.0.3 [Import Packages](#x.0.3)
* 4.0.4 [Load Dataset](#x.0.4)
* 4.0.4.1 [Loading Data with ImageDataGenerator](#x.0.4.1)
* 4.0.4.2 [Loading Data with image_dataset_from_directory](#x.0.4.2)
* 4.1 [Sliding Window](#x.1)
* 4.1.1 [Stride](#x.1.1)
* 4.1.2 [Padding](#x.1.2)
* 4.1.3 [Exercise: Exploring Sliding Windows](#x.1.3)
* 4.2 [Custom CNN](#x.2)
* 4.2.1 [Evaluate Model](#x.2.1)
* 4.3 [Data Augmentation](#x.3)
* 4.3.1 [Evaluate Model](#x.3.1)
* 4.3.2 [Exercise: Image Preprocessing Layers](#x.3.2)
* 4.4 [Transfer Learning](#x.4)
<br>
---
<a name='x.0'></a>
## 4.0 Preparing Environment and Importing Data
[back to top](#top)
<a name='x.0.1'></a>
### 4.0.1 Enabling and testing the GPU
[back to top](#top)
First, you'll need to enable GPUs for the notebook:
- Navigate to EditâNotebook Settings
- select GPU from the Hardware Accelerator drop-down
Next, we'll confirm that we can connect to the GPU with tensorflow:
```
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
```
<a name='x.0.2'></a>
### 4.0.2 Observe TensorFlow speedup on GPU relative to CPU
[back to top](#top)
This example constructs a typical convolutional neural network layer over a
random image and manually places the resulting ops on either the CPU or the GPU
to compare execution speed.
```
%tensorflow_version 2.x
import tensorflow as tf
import timeit
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print(
'\n\nThis error most likely means that this notebook is not '
'configured to use a GPU. Change this in Notebook Settings via the '
'command palette (cmd/ctrl-shift-P) or the Edit menu.\n\n')
raise SystemError('GPU device not found')
def cpu():
with tf.device('/cpu:0'):
random_image_cpu = tf.random.normal((100, 100, 100, 3))
net_cpu = tf.keras.layers.Conv2D(32, 7)(random_image_cpu)
return tf.math.reduce_sum(net_cpu)
def gpu():
with tf.device('/device:GPU:0'):
random_image_gpu = tf.random.normal((100, 100, 100, 3))
net_gpu = tf.keras.layers.Conv2D(32, 7)(random_image_gpu)
return tf.math.reduce_sum(net_gpu)
# We run each op once to warm up; see: https://stackoverflow.com/a/45067900
cpu()
gpu()
# Run the op several times.
print('Time (s) to convolve 32x7x7x3 filter over random 100x100x100x3 images '
'(batch x height x width x channel). Sum of ten runs.')
print('CPU (s):')
cpu_time = timeit.timeit('cpu()', number=10, setup="from __main__ import cpu")
print(cpu_time)
print('GPU (s):')
gpu_time = timeit.timeit('gpu()', number=10, setup="from __main__ import gpu")
print(gpu_time)
print('GPU speedup over CPU: {}x'.format(int(cpu_time/gpu_time)))
```
<a name='x.0.3'></a>
### 4.0.3 Import Packages
[back to top](#top)
```
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
#importing required libraries
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense, Conv2D, MaxPooling2D, InputLayer
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import classification_report,confusion_matrix
```
<a name='x.0.4'></a>
### 4.0.4 Load Dataset
[back to top](#top)
We will actually take a beat here today. When we started building our ML frameworks, we simply wanted our data in a numpy array to feed it into our pipeline. At some point, especially when working with images, the data becomes too large to fit into memory. For this reason we need an alternative way to import our data. With the merger of keras/tf two popular frameworks became available, `ImageDataGenerator` and `image_dataset_from_directory` both under `tf.keras.preprocessing.image`. `image_dataset_from_directory` can sometimes be faster (tf origin) but `ImageDataGenerator` is a lot simpler to use and has on-the-fly data augmentation capability (keras).
For a full comparison of methods visit [this link](https://towardsdatascience.com/what-is-the-best-input-pipeline-to-train-image-classification-models-with-tf-keras-eb3fe26d3cc5)
```
# Sync your google drive folder
from google.colab import drive
drive.mount("/content/drive")
```
<a name='x.0.4.1'></a>
#### 4.0.4.1 Loading Data with `ImageDataGenerator`
[back to top](#top)
```
# full dataset can be attained from kaggle if you are interested
# https://www.kaggle.com/ravirajsinh45/real-life-industrial-dataset-of-casting-product?select=casting_data
path_to_casting_data = '/content/drive/MyDrive/courses/tech_fundamentals/TECH_FUNDAMENTALS/data/casting_data_class_practice'
image_shape = (300,300,1)
batch_size = 32
technocast_train_path = path_to_casting_data + '/train/'
technocast_test_path = path_to_casting_data + '/test/'
image_gen = ImageDataGenerator(rescale=1/255) # normalize pixels to 0-1
#we're using keras inbuilt function to ImageDataGenerator so we
# dont need to label all images into 0 and 1
print("loading training set...")
train_set = image_gen.flow_from_directory(technocast_train_path,
target_size=image_shape[:2],
color_mode="grayscale",
batch_size=batch_size,
class_mode='binary',
shuffle=True)
print("loading testing set...")
test_set = image_gen.flow_from_directory(technocast_test_path,
target_size=image_shape[:2],
color_mode="grayscale",
batch_size=batch_size,
class_mode='binary',
shuffle=False)
```
<a name='x.0.4.2'></a>
#### 4.0.4.2 loading data with `image_dataset_from_directory`
[back to top](#top)
This method should be approx 2x faster than `ImageDataGenerator`
```
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.data.experimental import AUTOTUNE
path_to_casting_data = '/content/drive/MyDrive/courses/tech_fundamentals/TECH_FUNDAMENTALS/data/casting_data_class_practice'
technocast_train_path = path_to_casting_data + '/train/'
technocast_test_path = path_to_casting_data + '/test/'
# Load training and validation sets
image_shape = (300,300,1)
batch_size = 32
ds_train_ = image_dataset_from_directory(
technocast_train_path,
labels='inferred',
label_mode='binary',
color_mode="grayscale",
image_size=image_shape[:2],
batch_size=batch_size,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
technocast_test_path,
labels='inferred',
label_mode='binary',
color_mode="grayscale",
image_size=image_shape[:2],
batch_size=batch_size,
shuffle=False,
)
train_set = ds_train_.prefetch(buffer_size=AUTOTUNE)
test_set = ds_valid_.prefetch(buffer_size=AUTOTUNE)
# view some images
def_path = '/def_front/cast_def_0_1001.jpeg'
ok_path = '/ok_front/cast_ok_0_1.jpeg'
image_path = technocast_train_path + ok_path
image = tf.io.read_file(image_path)
image = tf.io.decode_jpeg(image)
plt.figure(figsize=(6, 6))
plt.imshow(tf.squeeze(image), cmap='gray')
plt.axis('off')
plt.show();
```
<a name='x.1'></a>
## 4.1 Sliding Window
[back to top](#top)
The kernels we just reviewed, need to be swept or _slid_ along the preceding layer. We call this a **_sliding window_**, the window being the kernel.
<p align=center>
<img src="https://i.imgur.com/LueNK6b.gif" width=400></img>
What do you notice about the gif? One perhaps obvious observation is that you can't scoot all the way up to the border of the input layer, this is because the kernel defines operations _around_ the centered pixel and so you bang up against the margin of the input array. We can change the behavior at the boundary with a **_padding_** hyperparameter. A second observation, is that the distance we move the kernel along in each step could be variable, we call this the **_stride_**. We will explore the affects of each of these.
```
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Conv2D(filters=64,
kernel_size=3,
strides=1,
padding='same',
activation='relu'),
layers.MaxPool2D(pool_size=2,
strides=1,
padding='same')
# More layers follow
])
```
<a name='x.1.1'></a>
### 4.1.1 Stride
[back to top](#top)
Stride defines the the step size we take with each kernel as it passes along the input array. The stride needs to be defined in both the horizontal and vertical dimensions. This animation shows a 2x2 stride
<p align=center>
<img src="https://i.imgur.com/Tlptsvt.gif" width=400></img>
The stride will often be 1 for CNNs, where we don't want to lose any important information. Maximum pooling layers will often have strides greater than 1, to better summarize/accentuate the relevant features/activations.
If the stride is the same in both the horizontal and vertical directions, it can be set with a single number like `strides=2` within keras.
### 4.1.2 Padding
[back to top](#top)
Padding attempts to resolve our issue at the border: our kernel requires information surrounding the centered pixel, and at the border of the input array we don't have that information. What to do?
We have a couple popular options within the keras framework. We can set `padding='valid'` and only slide the kernel to the edge of the input array. This has the drawback of feature maps shrinking in size as we pass through the NN. Another option is to set `padding='same'` what this will do is pad the input array with 0's, just enough of them to allow the feature map to be the same size as the input array. This is shown in the gif below:
<p align=center>
<img src="https://i.imgur.com/RvGM2xb.gif" width=400></img>
The downside of setting the padding to same will be that features at the edges of the image will be diluted.
<a name='x.1.3'></a>
### 4.1.3 Exercise: Exploring Sliding Windows
[back to top](#top)
```
from skimage import draw, transform
from itertools import product
# helper functions borrowed from Ryan Holbrook
# https://mathformachines.com/
def circle(size, val=None, r_shrink=0):
circle = np.zeros([size[0]+1, size[1]+1])
rr, cc = draw.circle_perimeter(
size[0]//2, size[1]//2,
radius=size[0]//2 - r_shrink,
shape=[size[0]+1, size[1]+1],
)
if val is None:
circle[rr, cc] = np.random.uniform(size=circle.shape)[rr, cc]
else:
circle[rr, cc] = val
circle = transform.resize(circle, size, order=0)
return circle
def show_kernel(kernel, label=True, digits=None, text_size=28):
# Format kernel
kernel = np.array(kernel)
if digits is not None:
kernel = kernel.round(digits)
# Plot kernel
cmap = plt.get_cmap('Blues_r')
plt.imshow(kernel, cmap=cmap)
rows, cols = kernel.shape
thresh = (kernel.max()+kernel.min())/2
# Optionally, add value labels
if label:
for i, j in product(range(rows), range(cols)):
val = kernel[i, j]
color = cmap(0) if val > thresh else cmap(255)
plt.text(j, i, val,
color=color, size=text_size,
horizontalalignment='center', verticalalignment='center')
plt.xticks([])
plt.yticks([])
def show_extraction(image,
kernel,
conv_stride=1,
conv_padding='valid',
activation='relu',
pool_size=2,
pool_stride=2,
pool_padding='same',
figsize=(10, 10),
subplot_shape=(2, 2),
ops=['Input', 'Filter', 'Detect', 'Condense'],
gamma=1.0):
# Create Layers
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=1,
kernel_size=kernel.shape,
strides=conv_stride,
padding=conv_padding,
use_bias=False,
input_shape=image.shape,
),
tf.keras.layers.Activation(activation),
tf.keras.layers.MaxPool2D(
pool_size=pool_size,
strides=pool_stride,
padding=pool_padding,
),
])
layer_filter, layer_detect, layer_condense = model.layers
kernel = tf.reshape(kernel, [*kernel.shape, 1, 1])
layer_filter.set_weights([kernel])
# Format for TF
image = tf.expand_dims(image, axis=0)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Extract Feature
image_filter = layer_filter(image)
image_detect = layer_detect(image_filter)
image_condense = layer_condense(image_detect)
images = {}
if 'Input' in ops:
images.update({'Input': (image, 1.0)})
if 'Filter' in ops:
images.update({'Filter': (image_filter, 1.0)})
if 'Detect' in ops:
images.update({'Detect': (image_detect, gamma)})
if 'Condense' in ops:
images.update({'Condense': (image_condense, gamma)})
# Plot
plt.figure(figsize=figsize)
for i, title in enumerate(ops):
image, gamma = images[title]
plt.subplot(*subplot_shape, i+1)
plt.imshow(tf.image.adjust_gamma(tf.squeeze(image), gamma))
plt.axis('off')
plt.title(title)
```
Create an image and kernel:
```
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
image = circle([64, 64], val=1.0, r_shrink=3)
image = tf.reshape(image, [*image.shape, 1])
# Bottom sobel
kernel = tf.constant(
[[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]],
)
show_kernel(kernel)
```
What do we think this kernel is meant to detect for?
We will apply our kernel with a 1x1 stride and our max pooling with a 2x2 stride and pool size of 2.
```
show_extraction(
image, kernel,
# Window parameters
conv_stride=1,
pool_size=2,
pool_stride=2,
subplot_shape=(1, 4),
figsize=(14, 6),
)
```
Works ok! what about a higher conv stride?
```
show_extraction(
image, kernel,
# Window parameters
conv_stride=2,
pool_size=3,
pool_stride=4,
subplot_shape=(1, 4),
figsize=(14, 6),
)
```
Looks like we lost a bit of information!
Sometimes published models will use a larger kernel and stride in the initial layer to produce large-scale features early on in the network without losing too much information (ResNet50 uses 7x7 kernels with a stride of 2). For now, without having much experience it's safe to set conv strides to 1.
Take a moment here with the given kernel and explore different settings for applying both the kernel and the max_pool
```
conv_stride=YOUR_VALUE, # condenses pixels
pool_size=YOUR_VALUE,
pool_stride=YOUR_VALUE, # condenses pixels
```
Given a total condensation of 8 (I'm taking condensation to mean `conv_stride` x `pool_stride`). what do you think is the best combination of values for `conv_stride, pool_size, and pool_stride`?
<a name='x.2'></a>
## 4.2 Custom CNN
[back to top](#top)
As we move through the network, small-scale features (lines, edges, etc.) turn to large-scale features (shapes, eyes, ears, etc). We call these blocks of convolution, ReLU, and max pool **_convolutional blocks_** and they are the low level modular framework we work with. By this means, the CNN is able to design it's own features, ones suited for the classification or regression task at hand.
We will design a custom CNN for the Casting Defect Detection Dataset.
In the following I'm going to double the filter size after the first block. This is a common pattern as the max pooling layers forces us in the opposite direction.
```
#Creating model
model = Sequential()
model.add(InputLayer(input_shape=(image_shape)))
model.add(Conv2D(filters=8, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(224))
model.add(Activation('relu'))
# Last layer
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['binary_accuracy'])
early_stop = EarlyStopping(monitor='val_loss',
patience=5,
restore_best_weights=True,)
# with CPU + ImageDataGenerator runs for about 40 minutes (5 epochs)
# with GPU + image_dataset_from_directory runs for about 4 minutes (16 epochs)
with tf.device('/device:GPU:0'):
results = model.fit(train_set,
epochs=20,
validation_data=test_set,
callbacks=[early_stop])
# model.save('inspection_of_casting_products.h5')
```
<a name='x.2.1'></a>
### 4.2.1 Evaluate Model
[back to top](#top)
```
# model.load_weights('inspection_of_casting_products.h5')
losses = pd.DataFrame(results.history)
# losses.to_csv('history_simple_model.csv', index=False)
fig, ax = plt.subplots(1, 2, figsize=(10,5))
losses[['loss','val_loss']].plot(ax=ax[0])
losses[['binary_accuracy','val_binary_accuracy']].plot(ax=ax[1])
# predict test set
pred_probability = model.predict(test_set)
# convert to bool
predictions = pred_probability > 0.5
# precision / recall / f1-score
# test_set.classes to get images from ImageDataGenerator
# for image_dataset_from_directory we have to do a little gymnastics
# to get the labels
labels = np.array([])
for x, y in ds_valid_:
labels = np.concatenate([labels, tf.squeeze(y.numpy()).numpy()])
print(classification_report(labels,predictions))
plt.figure(figsize=(10,6))
sns.heatmap(confusion_matrix(labels,predictions),annot=True)
```
<a name='x.3'></a>
## 4.3 Data Augmentation
[back to top](#top)
Alright, alright, alright. We've done pretty good making our CNN model. But let's see if we can make it even better. There's a last trick we'll cover here in regard to image classifiers. We're going to perturb the input images in such a way as to create a pseudo-larger dataset.
With any machine learning model, the more relevant training data we give the model, the better. The key here is _relevant_ training data. We can easily do this with images so long as we do not change the class of the image. For example, in the small plot below, we are changing contrast, hue, rotation, and doing other things to the image of a car; and this is okay because it does not change the classification from a car to, say, a truck.
<p align=center>
<img src="https://i.imgur.com/UaOm0ms.png" width=400></img>
Typically when we do data augmentation for images, we do them _online_, i.e. during training. Recall that we train in batches (or minibatches) with CNNs. An example of a minibatch then, might be the small multiples plot below.
<p align=center>
<img src="https://i.imgur.com/MFviYoE.png" width=400></img>
by varying the images in this way, the model always sees slightly new data, and becomes a more robust model. Remember that the caveat is that we can't muddle the relevant classification of the image. Sometimes the best way to see if data augmentation will be helpful is to just try it and see!
```
from tensorflow.keras.layers.experimental import preprocessing
#Creating model
model = Sequential()
model.add(preprocessing.RandomFlip('horizontal')), # flip left-to-right
model.add(preprocessing.RandomFlip('vertical')), # flip upside-down
model.add(preprocessing.RandomContrast(0.5)), # contrast change by up to 50%
model.add(Conv2D(filters=8, kernel_size=(3,3),input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(224))
model.add(Activation('relu'))
# Last layer
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['binary_accuracy'])
early_stop = EarlyStopping(monitor='val_loss',
patience=5,
restore_best_weights=True,)
results = model.fit(train_set,
epochs=30,
validation_data=test_set,
callbacks=[early_stop])
```
<a name='x.3.1'></a>
### 4.3.1 Evaluate Model
[back to top](#top)
```
losses = pd.DataFrame(results.history)
# losses.to_csv('history_augment_model.csv', index=False)
fig, ax = plt.subplots(1, 2, figsize=(10,5))
losses[['loss','val_loss']].plot(ax=ax[0])
losses[['binary_accuracy','val_binary_accuracy']].plot(ax=ax[1])
# predict test set
pred_probability = model.predict(test_set)
# convert to bool
predictions = pred_probability > 0.5
# precision / recall / f1-score
# test_set.classes to get images from ImageDataGenerator
# for image_dataset_from_directory we have to do a little gymnastics
# to get the labels
labels = np.array([])
for x, y in ds_valid_:
labels = np.concatenate([labels, tf.squeeze(y.numpy()).numpy()])
print(classification_report(labels,predictions))
plt.figure(figsize=(10,6))
sns.heatmap(confusion_matrix(labels,predictions),annot=True)
```
<a name='x.3.2'></a>
### 4.3.2 Exercise: Image Preprocessing Layers
[back to top](#top)
These layers apply random augmentation transforms to a batch of images. They are only active during training. You can visit the documentation [here](https://keras.io/api/layers/preprocessing_layers/image_preprocessing/)
* `RandomCrop` layer
* `RandomFlip` layer
* `RandomTranslation` layer
* `RandomRotation` layer
* `RandomZoom` layer
* `RandomHeight` layer
* `RandomWidth` layer
Use any combination of random augmentation transforms and retrain your model. Can you get a higher val performance? you may need to increase your epochs.
```
# code cell for exercise 4.3.2
from tensorflow.keras.layers.experimental import preprocessing
#Creating model
model = Sequential()
model.add(preprocessing.RandomFlip('horizontal')), # flip left-to-right
model.add(preprocessing.RandomFlip('vertical')), # flip upside-down
model.add(preprocessing.RandomContrast(0.5)), # contrast change by up to 50%
model.add(preprocessing.RandomRotation((-1,1))), # contrast change by up to 50%
model.add(Conv2D(filters=8, kernel_size=(3,3),input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(3,3), activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(224))
model.add(Activation('relu'))
# Last layer
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['binary_accuracy'])
early_stop = EarlyStopping(monitor='val_loss',
patience=5,
restore_best_weights=True,)
results = model.fit(train_set,
epochs=200,
validation_data=test_set,
callbacks=[early_stop])
# predict test set
pred_probability = model.predict(test_set)
# convert to bool
predictions = pred_probability > 0.5
# precision / recall / f1-score
# test_set.classes to get images from ImageDataGenerator
# for image_dataset_from_directory we have to do a little gymnastics
# to get the labels
labels = np.array([])
for x, y in ds_valid_:
labels = np.concatenate([labels, tf.squeeze(y.numpy()).numpy()])
print(classification_report(labels,predictions))
plt.figure(figsize=(10,6))
sns.heatmap(confusion_matrix(labels,predictions),annot=True)
```
<a name='x.4'></a>
## 4.4 Transfer Learning
[back to top](#top)
Transfer learning with [EfficientNet](https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/)
```
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.data.experimental import AUTOTUNE
path_to_casting_data = '/content/drive/MyDrive/courses/TECH_FUNDAMENTALS/data/casting_data_class_practice'
technocast_train_path = path_to_casting_data + '/train/'
technocast_test_path = path_to_casting_data + '/test/'
# Load training and validation sets
image_shape = (300,300,3)
batch_size = 32
ds_train_ = image_dataset_from_directory(
technocast_train_path,
labels='inferred',
label_mode='binary',
color_mode="grayscale",
image_size=image_shape[:2],
batch_size=batch_size,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
technocast_test_path,
labels='inferred',
label_mode='binary',
color_mode="grayscale",
image_size=image_shape[:2],
batch_size=batch_size,
shuffle=False,
)
train_set = ds_train_.prefetch(buffer_size=AUTOTUNE)
test_set = ds_valid_.prefetch(buffer_size=AUTOTUNE)
def build_model(image_shape):
input = tf.keras.layers.Input(shape=(image_shape))
# include_top = False will take of the last dense layer used for classification
model = tf.keras.applications.EfficientNetB3(include_top=False,
input_tensor=input,
weights="imagenet")
# Freeze the pretrained weights
model.trainable = False
# now we have to rebuild the top
x = tf.keras.layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = tf.keras.layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = tf.keras.layers.Dropout(top_dropout_rate, name="top_dropout")(x)
# use num-nodes = 1 for binary, class # for multiclass
output = tf.keras.layers.Dense(1, activation="softmax", name="pred")(x)
# Compile
model = tf.keras.Model(input, output, name="EfficientNet")
model.compile(optimizer='adam',
loss="binary_crossentropy",
metrics=["binary_accuracy"])
return model
model = build_model(image_shape)
with tf.device('/device:GPU:0'):
results = model.fit(train_set,
epochs=20,
validation_data=test_set,
callbacks=[early_stop])
```
|
github_jupyter
|
# Data Manipulation and Plotting with `pandas`
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
```

## Learning Goals
- Load .csv files into `pandas` DataFrames
- Describe and manipulate data in Series and DataFrames
- Visualize data using DataFrame methods and `matplotlib`
## What is Pandas?
Pandas, as [the Anaconda docs](https://docs.anaconda.com/anaconda/packages/py3.7_osx-64/) tell us, offers us "High-performance, easy-to-use data structures and data analysis tools." It's something like "Excel for Python", but it's quite a bit more powerful.
Let's read in the heart dataset.
Pandas has many methods for reading different types of files. Note that here we have a .csv file.
Read about this dataset [here](https://www.kaggle.com/ronitf/heart-disease-uci).
```
heart_df = pd.read_csv('heart.csv')
```
The output of the `.read_csv()` function is a pandas *DataFrame*, which has a familiar tabaular structure of rows and columns.
```
type(heart_df)
heart_df
```
## DataFrames and Series
Two main types of pandas objects are the DataFrame and the Series, the latter being in effect a single column of the former:
```
age_series = heart_df['age']
type(age_series)
```
Notice how we can isolate a column of our DataFrame simply by using square brackets together with the name of the column.
Both Series and DataFrames have an *index* as well:
```
heart_df.index
age_series.index
```
Pandas is built on top of NumPy, and we can always access the NumPy array underlying a DataFrame using `.values`.
```
heart_df.values
```
## Basic DataFrame Attributes and Methods
### `.head()`
```
heart_df.head()
```
### `.tail()`
```
heart_df.tail()
```
### `.info()`
```
heart_df.info()
```
### `.describe()`
```
heart_df.describe()
#Provides statistical data on the file data
```
### `.dtypes`
```
heart_df.dtypes
```
### `.shape`
```
heart_df.shape
```
### Exploratory Plots
Let's make ourselves a histogram of ages:
```
sns.set_style('darkgrid')
sns.distplot(a=heart_df['age']);
```
And while we're at it let's do a scatter plot of maximum heart rate vs. age:
```
sns.scatterplot(x=heart_df['age'], y=heart_df['thalach']);
```
## Adding to a DataFrame
### Adding Rows
Here are two rows that our engineer accidentally left out of the .csv file, expressed as a Python dictionary:
```
extra_rows = {'age': [40, 30], 'sex': [1, 0], 'cp': [0, 0], 'trestbps': [120, 130],
'chol': [240, 200],
'fbs': [0, 0], 'restecg': [1, 0], 'thalach': [120, 122], 'exang': [0, 1],
'oldpeak': [0.1, 1.0], 'slope': [1, 1], 'ca': [0, 1], 'thal': [2, 3],
'target': [0, 0]}
extra_rows
```
How can we add this to the bottom of our dataset?
```
# Let's first turn this into a DataFrame.
# We can use the .from_dict() method.
missing = pd.DataFrame(extra_rows)
missing
# Now we just need to concatenate the two DataFrames together.
# Note the `ignore_index` parameter! We'll set that to True.
heart_augmented = pd.concat([heart_df, missing],
ignore_index=True)
# Let's check the end to make sure we were successful!
heart_augmented.tail()
```
### Adding Columns
Adding a column is very easy in `pandas`. Let's add a new column to our dataset called "test", and set all of its values to 0.
```
heart_augmented['test'] = 0
heart_augmented.head()
```
I can also add columns whose values are functions of existing columns.
Suppose I want to add the cholesterol column ("chol") to the resting systolic blood pressure column ("trestbps"):
```
heart_augmented['chol+trestbps'] = heart_augmented['chol'] + heart_augmented['trestbps']
heart_augmented.head()
```
## Filtering
We can use filtering techniques to see only certain rows of our data. If we wanted to see only the rows for patients 70 years of age or older, we can simply type:
```
heart_augmented[heart_augmented['age'] >= 70]
```
Use '&' for "and" and '|' for "or".
### Exercise
Display the patients who are 70 or over as well as the patients whose trestbps score is greater than 170.
```
heart_augmented[(heart_augmented['age'] >= 70) | (heart_augmented['trestbps'] > 170)]
```
<details>
<summary>Answer</summary>
<code>heart_augmented[(heart_augmented['age'] >= 70) | (heart_augmented['trestbps'] > 170)]</code>
</details>
### Exploratory Plot
Using the subframe we just made, let's make a scatter plot of their cholesterol levels vs. age and color by sex:
```
at_risk = heart_augmented[(heart_augmented['age'] >= 70) \
| (heart_augmented['trestbps'] > 170)]
#the backslash figure allows you to break the line, but still continue it on the next
sns.scatterplot(data=at_risk, x='age', y='chol', hue='sex');
```
### `.loc` and `.iloc`
We can use `.loc` to get, say, the first ten values of the age and resting blood pressure ("trestbps") columns:
```
heart_augmented.loc
heart_augmented.loc[:9, ['age', 'trestbps']]
#Note that .loc is grabbing the endpoint as well. Instead of ending at that point. Potentially
#because it is grabbing things by name and the index has a name of '9'
```
`.iloc` is used for selecting locations in the DataFrame **by number**:
```
heart_augmented.iloc
heart_augmented.iloc[3, 0]
```
### Exercise
How would we get the same slice as just above by using .iloc() instead of .loc()?
```
heart_augmented.iloc[:10, [0,3]]
```
<details>
<summary>Answer</summary>
<code>heart_augmented.iloc[:10, [0, 3]]</code>
</details>
## Statistics
### `.mean()`
```
heart_augmented.mean()
```
Be careful! Some of these will are not straightforwardly interpretable. What does an average "sex" of 0.682 mean?
### `.min()`
```
heart_augmented.min()
```
### `.max()`
```
heart_augmented.max()
```
## Series Methods
### `.value_counts()`
How many different values does have slope have? What about sex? And target?
```
heart_augmented['slope'].value_counts()
```
### `.sort_values()`
```
heart_augmented['age'].sort_values()
```
## `pandas`-Native Plotting
The `.plot()` and `.hist()` methods available for DataFrames use a wrapper around `matplotlib`:
```
heart_augmented.plot(x='age', y='trestbps', kind='scatter');
heart_augmented.hist(column='chol');
```
## Exercises
1. Make a bar plot of "age" vs. "slope" for the `heart_augmented` DataFrame.
<details>
<summary>Answer</summary>
<code>sns.barplot(data=heart_augmented, x='slope', y='age');</code>
</details>
2. Make a histogram of ages for **just the men** in `heart_augmented` (heart_augmented['sex']=1).
<details>
<summary>Answer</summary>
<code>men = heart_augmented[heart_augmented['sex'] == 1]
sns.distplot(a=men['age']);</code>
</details>
3. Make separate scatter plots of cholesterol vs. resting systolic blood pressure for the target=0 and the target=1 groups. Put both plots on the same figure and give each an appropriate title.
<details>
<summary>Answer</summary>
<code>target0 = heart_augmented[heart_augmented['target'] == 0]
target1 = heart_augmented[heart_augmented['target'] == 1]
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
sns.scatterplot(data=target0, x='trestbps', y='chol', ax=ax[0])
sns.scatterplot(data=target1, x='trestbps', y='chol', ax=ax[1])
ax[0].set_title('Cholesterol Vs. Resting Blood Pressure in Women')
ax[1].set_title('Cholesterol Vs. Resting Blood Pressure in Men');</code>
</details>
## Let's find a .csv file online and experiment with it.
I'm going to head to [dataportals.org](https://dataportals.org) to find a .csv file.
|
github_jupyter
|
# KCWI_calcs.ipynb
functions from Busola Alabi, Apr 2018
```
from __future__ import division
import glob
import re
import os, sys
from astropy.io.fits import getheader, getdata
from astropy.wcs import WCS
import astropy.units as u
import numpy as np
from scipy import interpolate
import logging
from time import time
import matplotlib.pyplot as plt
from pylab import *
import matplotlib as mpl
import matplotlib.ticker as mtick
from scipy.special import gamma
def make_obj(flux, grat_wave, f_lam_index):
'''
'''
w = np.arange(3000)+3000.
p_A = flux/(2.e-8/w)*(w/grat_wave)**f_lam_index
return w, p_A
def inst_throughput(wave, grat):
'''
'''
eff_bl = np.asarray([0.1825,0.38,0.40,0.46,0.47,0.44])
eff_bm = np.asarray([0.1575, 0.33, 0.36, 0.42, 0.48, 0.45])
eff_bh1 = np.asarray([0., 0.0, 0.0, 0.0, 0.0, 0.])
eff_bh2 = np.asarray([0., 0.18, 0.3, 0.4, 0.28, 0.])
eff_bh3 = np.asarray([0., 0., 0., 0.2, 0.29, 0.31])
wave_0 = np.asarray([355.,380.,405.,450.,486.,530.])*10.
wave_bl = np.asarray([355., 530.])*10.
wave_bm = np.asarray([355., 530.])*10.
wave_bh1 = np.asarray([350., 450.])*10.
wave_bh2 = np.asarray([405., 486.])*10.
wave_bh3 = np.asarray([405., 530.])*10.
trans_atmtel = np.asarray([0.54, 0.55, 0.56, 0.56, 0.56, 0.55])
if grat=='BL':
eff = eff_bl*trans_atmtel
wave_range = wave_bl
if grat=='BM':
eff = eff_bm*trans_atmtel
wave_range = wave_bm
if grat=='BH1':
eff = eff_bh1*trans_atmtel
wave_range = wave_bh1
if grat=='BH2':
eff = eff_bh2*trans_atmtel
wave_range = wave_bh2
if grat=='BH3':
eff = eff_bh3*trans_atmtel
wave_range = wave_bh3
wave1 = wave
interpfunc = interpolate.interp1d(wave_0, eff, fill_value="extrapolate") #this is the only way I've gotten this interpolation to work
eff_int = interpfunc(wave1)
idx = np.where((wave1 <= wave_range[0]) | (wave1 > wave_range[1]))
eff_int[idx] = 0.
return eff_int
def obj_cts(w, f0, grat, exposure_time):
'''
'''
A_geo = np.pi/4.*(10.e2)**2
eff = inst_throughput(w, grat)
cts = eff*A_geo*exposure_time*f0
return cts
def sky(wave):
'''
'''
with open('mk_sky.dat') as f:
lines = (line for line in f if not line.startswith('#'))
skydata = np.loadtxt(lines, skiprows=2)
ws = skydata[:,0]
fs = skydata[:,1]
f_nu_data = getdata('lris_esi_skyspec_fnu_uJy.fits')
f_nu_hdr = getheader('lris_esi_skyspec_fnu_uJy.fits')
dw = f_nu_hdr["CDELT1"]
w0 = f_nu_hdr["CRVAL1"]
ns = len(fs)
ws = np.arange(ns)*dw + w0
f_lam = f_nu_data[:len(ws)]*1e-29*3.*1e18/ws/ws
interpfunc = interpolate.interp1d(ws,f_lam, fill_value="extrapolate")
fs_int = interpfunc(wave)
return fs_int
def sky_mk(wave):
'''
'''
with open('mk_sky.dat') as f:
lines = (line for line in f if not line.startswith('#'))
skydata = np.loadtxt(lines, skiprows=2)
ws = skydata[:,0]
fs = skydata[:,1]
f_nu_data = getdata('lris_esi_skyspec_fnu_uJy.fits')
f_nu_hdr = getheader('lris_esi_skyspec_fnu_uJy.fits')
dw = f_nu_hdr["CDELT1"]
w0 = f_nu_hdr["CRVAL1"]
ns = len(fs)
ws = np.arange(ns)*dw + w0
f_lam = f_nu_data[:len(ws)]*1e-29*3.*1e18/ws/ws
p_lam = f_lam/(2.e-8/ws)
interpfunc = interpolate.interp1d(ws,p_lam, fill_value="extrapolate") #using linear since argument not set in idl
ps_int = interpfunc(wave)
return ps_int
def sky_cts(w, grat, exposure_time, airmass=1.2, area=1.0):
'''
'''
A_geo = np.pi/4.*(10.e2)**2
eff = inst_throughput(w, grat)
cts = eff*A_geo*exposure_time*sky_mk(w)*airmass*area
return cts
def ETC(slicer, grating, grat_wave, f_lam_index, seeing, exposure_time, ccd_bin, spatial_bin=[],
spectral_bin=None, nas=True, sb=True, mag_AB=None, flux=None, Nframes=1, emline_width=None):
"""
Parameters
==========
slicer: str
L/M/S (Large, Medium or Small)
grating: str
BH1, BH2, BH3, BM, BL
grating wavelength: float or int
3400. < ref_wave < 6000.
f_lam_index: float
source f_lam ~ lam^f_lam_index, default = 0
seeing: float
arcsec
exposure_time: float
seconds for source image (total) for all frames
ccd_bin: str
'1x1','2x2'"
spatial_bin: list
[dx,dy] bin in arcsec x arcsec for binning extended emission flux. if sb=True then default is 1 x 1 arcsec^2'
spectral_bin: float or int
Ang to bin for S/N calculation, default=None
nas: boolean
nod and shuffle
sb: boolean
surface brightness m_AB in mag arcsec^2; flux = cgs arcsec^-2'
mag_AB: float or int
continuum AB magnitude at wavelength (ref_wave)'
flux: float
erg cm^-2 s^-1 Ang^1 (continuum source [total]); erg cm^-2 s^1 (point line source [total]) [emline = width in Ang]
EXTENDED: erg cm^-2 s^-1 Ang^1 arcsec^-2 (continuum source [total]); erg cm^-2 s^1 arcsec^-2 (point line source [total]) [emline = width in Ang]
Nframes: int
number of frames (default is 1)
emline_width: float
flux is for an emission line, not continuum flux (only works for flux), and emission line width is emline_width Ang
"""
# logger = logging.getLogger(__name__)
logger.info('Running KECK/ETC')
t0 = time()
slicer_OPTIONS = ('L', 'M','S')
grating_OPTIONS = ('BH1', 'BH2', 'BH3', 'BM', 'BL')
if slicer not in slicer_OPTIONS:
raise ValueError("slicer must be L, M, or S, wrongly entered {}".format(slicer))
logger.info('Using SLICER=%s', slicer)
if grating not in grating_OPTIONS:
raise ValueError("grating must be L, M, or S, wrongly entered {}".format(grating))
logger.info('Using GRATING=%s', grating)
if grat_wave < 3400. or grat_wave > 6000:
raise ValueError('wrong value for grating wavelength')
logger.info('Using reference wavelength=%.2f', grat_wave)
if len(spatial_bin) != 2 and len(spatial_bin) !=0:
raise ValueError('wrong spatial binning!!')
logger.info('Using spatial binning, spatial_bin=%s', str(spatial_bin[0])+'x'+str(spatial_bin[1]))
bin_factor = 1.
if ccd_bin == '2x2':
bin_factor = 0.25
if ccd_bin == '2x2' and slicer == 'S':
print'******** WARNING: DO NOT USE 2x2 BINNING WITH SMALL SLICER'
read_noise = 2.7 # electrons
Nf = Nframes
chsz = 3 #what is this????
nas_overhead = 10. #seconds per half cycle
seeing1 = seeing
seeing2 = seeing
pixels_per_arcsec = 1./0.147
if slicer == 'L':
seeing2 = 1.38
snr_spatial_bin = seeing1*seeing2
pixels_spectral = 8
arcsec_per_slice = 1.35
if slicer == 'M':
seeing2 = max(0.69,seeing)
snr_spatial_bin = seeing1*seeing2
pixels_spectral = 4
arcsec_per_slice = 0.69
if slicer == 'S':
seeing2 = seeing
snr_spatial_bin = seeing1*seeing2
pixels_spectral = 2
arcsec_per_slice = 0.35
N_slices = seeing/arcsec_per_slice
if len(spatial_bin) == 2:
N_slices = spatial_bin[1]/arcsec_per_slice
snr_spatial_bin = spatial_bin[0]*spatial_bin[1]
pixels_spatial_bin = pixels_per_arcsec * N_slices
print "GRATING :", grating
if grating == 'BL':
A_per_pixel = 0.625
if grating == 'BM':
A_per_pixel = 0.28
if grating == 'BH2' or grating == 'BH3':
A_per_pixel = 0.125
print 'A_per_pixel', A_per_pixel
logger.info('f_lam ~ lam = %.2f',f_lam_index)
logger.info('SEEING: %.2f, %s', seeing, ' arcsec')
logger.info('Ang/pixel: %.2f', A_per_pixel)
logger.info('spectral pixels in 1 spectral resolution element: %.2f',pixels_spectral)
A_per_spectral_bin = pixels_spectral*A_per_pixel
logger.info('Ang/resolution element: =%.2f',A_per_spectral_bin)
if spectral_bin is not None:
snr_spectral_bin = spectral_bin
else:
snr_spectral_bin = A_per_spectral_bin
logger.info('Ang/SNR bin: %.2f', snr_spectral_bin)
pixels_per_snr_spec_bin = snr_spectral_bin/A_per_pixel
logger.info('Pixels/Spectral SNR bin: %.2f', pixels_per_snr_spec_bin)
logger.info('SNR Spatial Bin [arcsec^2]: %.2f', snr_spatial_bin)
logger.info('SNR Spatial Bin [pixels^2]: %.2f', pixels_spatial_bin)
flux1 = 0
if flux is not None:
flux1 = flux
if flux is not None and emline_width is not None:
flux1 = flux/emline_width
if flux1 == 0 and emline_width is not None:
raise ValueError('Dont use mag_AB for emission line')
if mag_AB is not None:
flux1 = (10**(-0.4*(mag_AB+48.6)))*(3.e18/grat_wave)/grat_wave
w, p_A = make_obj(flux1,grat_wave, f_lam_index)
if sb==False and mag_AB is not None:
flux_input = ' mag_AB'
logger.info('OBJECT mag: %.2f, %s', mag_AB,flux_input)
if sb==True and mag_AB is not None:
flux_input = ' mag_AB / arcsec^2'
logger.info('OBJECT mag: %.2f, %s',mag_AB,flux_input)
if flux is not None and sb==False and emline_width is None:
flux_input = 'erg cm^-2 s^-1 Ang^-1'
if flux is not None and sb==False and emline_width is not None:
flux_input = 'erg cm^-2 s^-1 in '+ str(emline_width) +' Ang'
if flux is not None and sb and emline_width is None:
flux_input = 'erg cm^-2 s^-1 Ang^-1 arcsec^-2'
if flux is not None and sb and emline_width is not None:
flux_input = 'erg cm^-2 s^-1 arcsec^-2 in '+ str(emline_width) +' Ang'
if flux is not None:
logger.info('OBJECT Flux %.2f, %s',flux,flux_input)
if emline_width is not None:
logger.info('EMISSION LINE OBJECT --> flux is not per unit Ang')
t_exp = exposure_time
if nas==False:
c_o = obj_cts(w,p_A,grating,t_exp)*snr_spatial_bin*snr_spectral_bin
c_s = sky_cts(w,grating,exposure_time,airmass=1.2,area=1.0)*snr_spatial_bin*snr_spectral_bin
c_r = Nf*read_noise**2*pixels_per_snr_spec_bin*pixels_spatial_bin*bin_factor
snr = c_o/np.sqrt(c_s+c_o+c_r)
if nas==True:
n_cyc = np.floor((exposure_time-nas_overhead)/2./(nas+nas_overhead)+0.5)
total_exposure = (2*n_cyc*(nas+nas_overhead))+nas_overhead
logger.info('NAS: Rounding up to ',n_cyc, ' Cycles of NAS for total exposure of',total_exposure,' s')
t_exp = n_cyc*nas
c_o = obj_cts(w,p_A,grating,t_exp)*snr_spatial_bin*snr_spectral_bin
c_s = sky_cts(w,grating,t_exp,airmass=1.2,area=1.0)*snr_spatial_bin*snr_spectral_bin
c_r = 2.*Nf*read_noise**2*pixels_per_snr_spec_bin*pixels_spatial_bin*bin_factor
snr = c_o/np.sqrt(2.*c_s+c_o+c_r)
fig=figure(num=1, figsize=(12, 16), dpi=80, facecolor='w', edgecolor='k')
subplots_adjust(hspace=0.001)
ax0 = fig.add_subplot(611)
ax0.plot(w, snr, 'k-')
ax0.minorticks_on()
ax0.tick_params(axis='both',which='minor',direction='in', length=5,width=2)
ax0.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=8)
ylabel('SNR / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12)
ax1 = fig.add_subplot(612)
ax1.plot(w,c_o, 'k--')
ax1.minorticks_on()
ax1.tick_params(axis='both',which='minor',direction='in',length=5,width=2)
ax1.tick_params(axis='both',which='major',direction='in',length=8,width=2,labelsize=12)
ylabel('Obj cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12)
ax2 = fig.add_subplot(613)
ax2.plot(w,c_s, 'k--')
ax2.minorticks_on()
ax2.tick_params(axis='both',which='minor',direction='in', length=5,width=2)
ax2.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=12)
ylabel('Sky cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12)
ax3 = fig.add_subplot(614)
ax3.plot(w,c_r*np.ones(len(w)), 'k--')
ax3.minorticks_on()
ax3.tick_params(axis='both',which='minor', direction='in', length=5,width=2)
ax3.tick_params(axis='both',which='major', direction='in', length=8,width=2,labelsize=12)
ylabel('Rd. Noise cts / %.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12)
ax4 = fig.add_subplot(615)
yval = w[c_s > 0]
num = c_o[c_s > 0]
den = c_s[c_s > 0]
ax4.plot(yval, num/den, 'k--') #some c_s are zeros
ax4.minorticks_on()
xlim(min(w), max(w)) #only show show valid data!
ax4.tick_params(axis='both',which='minor', direction='in', length=5,width=2)
ax4.tick_params(axis='both',which='major', direction='in', length=8,width=2,labelsize=12)
ylabel('Obj/Sky cts /%.1f'%snr_spectral_bin+r'$\rm \ \AA$', fontsize=12)
ax5 = fig.add_subplot(616)
ax5.plot(w,p_A, 'k--')
ax5.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
ax5.minorticks_on()
ax5.tick_params(axis='both',which='minor',direction='in', length=5,width=2)
ax5.tick_params(axis='both',which='major',direction='in', length=8,width=2,labelsize=12)
ylabel('Flux ['r'$\rm ph\ cm^{-2}\ s^{-1}\ \AA^{-1}$]', fontsize=12)
xlabel('Wavelength ['r'$\rm \AA$]', fontsize=12)
show()
fig.savefig('{}.pdf'.format('KCWI_ETC_calc'), format='pdf', transparent=True, bbox_inches='tight')
logger.info('KCWI/ETC run successful!')
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s', stream=sys.stdout)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
print("KCWI/ETC...python version")
```
Simulate DF44 observation, begin by figuring out Sérsic model conversions.
See toy_jeans4.ipynb for more detailed Sérsic calculations.
```
# n, R_e, M_g = 0.85, 7.1, 19.05 # van Dokkum+16 (van Dokkum+17 is slightly different)
n, mu_0, a_e, R_e = 0.94, 24.2, 9.7, 7.9 # van Dokkum+17; some guesses
b_n = 1.9992*n - 0.3271
mu_m_e = mu_0 - 2.5*log10(n*exp(b_n)/b_n**(2.0*n)*gamma(2.0*n)) + 2.5*b_n / log(10.0) # mean SB, using Graham & Driver eqns 6 and ?
print '<mu>_e =', mu_m_e
ETC('M','BM', 5110., 0., 0.75, 3600., '2x2', spatial_bin=[14.0,14.0], spectral_bin=None, nas=False, sb=True, mag_AB=25.2, flux=None, Nframes=1, emline_width=None)
# S/N ~ 20/Ang, binned over ~1 R_e aperture
```
Simulate VCC 1287 observation:
```
n, a_e, q, m_i = 0.6231, 46.34, 0.809, 15.1081 # Viraj Pandya sci_gf_i.fits header GALFIT results, sent 19 Mar 2018
R_e = a_e * sqrt(q)
g_i = 0.72 # note this is a CFHT g-i not SDSSS g-i
mu_m_e = m_i + 2.5*log10(2.0) + 2.5*log10(pi*R_e**2)
print '<mu>_e (i-band) =', mu_m_e
mu_m_e += g_i
print '<mu>_e (g-band) =', mu_m_e
b_n = 1.9992*n - 0.3271
mu_0 = mu_m_e + 2.5*log10(n*exp(b_n)/b_n**(2.0*n)*gamma(2.0*n)) - 2.5*b_n / log(10.0) # mean SB, using Graham & Driver eqns 6 and ?
print 'mu_0 (g-band) =', mu_0
ETC('M','BM', 5092., 0., 0.75, 3600., '2x2', spatial_bin=[16.5,20.4], spectral_bin=None, nas=False, sb=True, mag_AB=25.5, flux=None, Nframes=1, emline_width=None)
# S/N ~ 20/Ang, binned over full FOV
```
Simulate Hubble VII observation:
```
R_e = 0.9
m_V = 15.8
mue = 15.8 + 2.5*log10(2) + 2.5*log10(pi*R_e**2)
print '<mu_V>_e = ', mue
side = sqrt(pi * R_e**2)
print 'box size = %f arcsec' % (side)
ETC('S','BM', 4500., 0., 0.75, 900., '1x1', spatial_bin=[side,side], spectral_bin=None, nas=False, sb=True, mag_AB=mue, flux=None, Nframes=3, emline_width=None)
```
|
github_jupyter
|
## MIC Demo 1 - Basic steps for measurement
This simple demonstration of the MIC toolbox uses two simulated bivariate VAR(2) models from the ["Macroeconomic simulation comparison with a multivariate extension of the Markov Information Criterion"](https://www.kent.ac.uk/economics/documents/research/papers/2019/1908.pdf) paper. These are the first two settings from the VAR validation exercises. The two simulated datasets are located in `data/model_1.txt` and `data/model_2.txt`. In addition this, one of these two models has been used to generated an 'empirical' dataset `data/emp_dat.txt`. The purpose of the demonstration is to show how to run the MIC and see if we can figure out which of models 1 or 2 is the true model.
The purpose of this first part is to outline the individual steps required to obtain a MIC measurement on a single variable in a multivariate system. As a full multivariate measurment requires several runs of the algorithms, this is best done in parallel, which will be covered in the second demonstration notebook.
We start with the setup, including the toolbox import:
```
import time
import numpy as np
from scipy.stats import pearsonr
import mic.toolbox as mt
```
### Stage 0 - Discretising the data
The first task that needs to be done is to discretise the two variables in the system (denoted $x^1$ and $x^2$). In order to do so, we need to provide the following information:
- `lb` and `ub` : Bounds to the range of variation.
- `r_vec` : Binary discretisation resolution of the variables
- `hp_bit_vec` : High priority bits - Number of bits to prioritise in the permutation
We can then call the binary quantisation function in the toolbox, `mt.bin_quant()`, and look at the result of the discretisation diagnostics to ensure that settings above are chosen so that the discretisation error is i.i.d uniformly distributed.
```
lb = [-10,-10]
ub = [ 10, 10]
r_vec = [7,7]
hp_bit_vec = [3,3]
# Load 'empirical' data
path = 'data/emp_data.txt'
emp_data = np.loadtxt(path, delimiter="\t")
# Pick first replication (columns 1 and 2) - just as an example
dat = emp_data[:,0:2]
# Run the discretisation tests (displays are active by passing any string other than 'notests' or 'nodisplay')
data_struct_emp = mt.bin_quant(dat,lb,ub,r_vec,'')
# Check the correlation of the high-priority bits (example of 'notests' here)
data_struct_hp = mt.bin_quant(dat,lb,ub,hp_bit_vec,'notests')
dat_bin = data_struct_hp['binary_data']
hp_dat = dat - np.asarray(dat_bin.errors)
print('\n Correlation of high priority bits with raw data:')
for n in range(2):
corr = pearsonr(dat[:,n],hp_dat[:,n])
print(' Variable {:1d}: {:7.4f}'.format(n+1,corr[0]))
```
We can see here that under the settings picked above, the quantisation errors for both series are indeed uniformly distributed (KS test not rejected), independent (LB test is not rejected) and the errors are not correlated with the discretisation levels. Furthermore, a discretisation using only the first three bits of each variable (the 'high priority bits') already has a 98% correlation with the raw data. This suggests that the settings are appropriate for the analysis.
### Stage 1 - learning model probabilities from the data
The important parameters to choose at this stage relate to the size of the tree that we want to build with the simulated data. The parameters of interest are:
- `mem` : the maximum amount of nodes we can initialise. As trees tend to be sparse, this does not have to match the theoretical number of the trees $2^D$.
- `d`: maximum depth of the context trees (in bits). Combined with `mem`, these implement a cap on the amount of the amount of memory that can be used.
- `lags`: The number of lags in the Markov process being used to learn the probabilities.
```
mem = 200000
d = 24
lags = 2
```
Choosing 2 Markov lags and 14 bits per observations means that the full context will be 28 bits long (not counting contemporaneous correlations). Given a maximum tree depth $D=24$, it is clear that some of the context will be truncated. We therefore need to permute the bits in the context to prioritise the most important ones and ensure only the least informative ones get truncated. In order to do so, the next step is to generate a permutation the context bits.
As stated above, we are only demonstrating a single run of the MIC algorithm, and we choose to predict the first variable conditional on the context and the current value of the second variable. This is declared via the `var_vec` list below. To clarify the syntax, the first entry in the `var_vec` list identifies the variable to predict (1 in this case), and any subsequent entries in the list indentify contemporaneous conditioning variables (2 in our case)
This will allow us to determine the value of $\lambda^1 (x_t^1 \mid x_t^2, \Omega_t)$. It is important to note that to get the full MIC measurement, will need to run the algorithm again. The steps are essentially the same, and this will be covered in the second part of the demonstration.
```
num_runs = 2
var_vec = [1,2] # This is the critical input, it governs the conditioning order.
perm = mt.corr_perm(dat, r_vec, hp_bit_vec, var_vec, lags, d)
```
We now have all the elements required to train the tree. For the purpose of the demonstration the two simulated data files contain two training sets of 125,000 observations for each variable $x^1$ and $x^2$. The first set is located in the first two columns of the traing data, while the second set is located in columns 3 and 4. This division into two training sets is done in order to illustrate:
- How to initialise a new tree on the first series
- How to update an existing tree with further data
As stated above, we are only carrying out a single run here, so we choose to learn the probabilities for the 1st model only. Once again, to get a measurement for the second model will require further runs which we do in the second part of the demonstration.
```
# Load model data
path = 'data/model_1.txt'
sim_data = np.loadtxt(path, delimiter="\t")
# Pick a tag for the tree (useful for indentifying the tree later on)
tag = 'Model 1'
# Discretise the training data.
sim_dat1 = sim_data[:,0:2]
data_struct = mt.bin_quant(sim_dat1,lb,ub,r_vec,'notests') # Note the 'notests' option
data_bin = data_struct['binary_data']
# Initialise a tree and train it, trying to predict the 1st variable
var = var_vec[0]
output = mt.train(None, data_bin, mem, lags, d, var, tag, perm)
```
Let's now update the tree with the second run of training data to see the difference in syntax and output
```
# Discretise the second run of training data
sim_dat1 = sim_data[:,2:4]
data_struct = mt.bin_quant(sim_dat1,lb,ub,r_vec,'notests') # Note, we are not running discretisation tests
data_bin = data_struct['binary_data']
# Extract the tree from the previous output and train it again. Only the 1st argument changes
T = output['T']
output = mt.train(T, data_bin, mem, lags, d, var, tag, perm)
```
Notice how the header of the output has changed, using the tag to flag that we are updating an existing tree. We are done training the tree, let's get some descriptive statistics.
```
# Use the built-in descriptive statistic method to get some diagnostics
T = output['T']
T.desc()
```
We can see that the tree has used about 1/3 of the initial node allocation, so we have plenty of margin on memory. This will typically change if more variables are included. There is an element of trial and error to figuring out how much memory to allocated, which is why this diagnostic is useful.
It is important to note that the algorithm can cope with failed node allocations (situations where the algorithm attempts to allocate a node to memory but fails), as it has a heuristic that allows it to 'skip' failed nodes, at the cost of introducing an error in the probabilities. Furthermore, because the tree implements a pruning and rollout mechanism, nodes are only allocated when they are not on a single branch path. This means that node allocation failures due to lack of memory will typically occur for very rare events
Both of these mechanisms mean that memory allocation failure is graceful and the odd failure will not impair the measurement. It is nevertheless a good idea to check `T.desc()` to ensure that failed allocations are not too numerous.
### Stage 2 - Scoring the empirical data with the model probabilities
We have already loaded the empirical data when running the discretisation tests. For the purposes of this demonstration, we have 10 replications of 1000 observations each, in order to show the consistency of the measurement. We will therefore loop the steps required to score these series over the 10 replications. In a normal application with only one emprical dataset, this loop is of course not needed!
- Discretise the empirical data into `data_stuct_emp`
- Extract the binary data from the dictionary
- Pass the binary data alongside the tree `T` to the score function. The function knows which variable to score as this is given in the tree.
- Correct the score by removing the estimate of the bias (measured using the Rissanen bound correction).
```
scores = np.zeros([998,10])
for j in range(10):
loop_t = time.time()
# Discretise the data
k = 2*j
dat = emp_data[:,k:k+2]
data_struct_emp = mt.bin_quant(dat,lb,ub,T.r_vec,'notests')
data_bin_emp = data_struct_emp['binary_data']
# Score the data using the tree
score_struct = mt.score(T, data_bin_emp)
# Correct the measurement
scores[:,j] = score_struct['score'] - score_struct['bound_corr']
print('Replication {:2d}: {:10.4f} secs.'.format(j,time.time() - loop_t))
flt_str = ' {:7.2f}'*10
print('\n Scores obtained ')
print(flt_str.format(*np.sum(scores,0)))
```
This provides the measurement for $\lambda^1 (x_t^1 \mid x_t^2, \Omega_t)$. To complete the measurement, one also requires $\lambda^1 (x_t^2 \mid \Omega_t)$. This will enable us to calculate:
$$ \lambda^1 (X) = \sum _{t=L}^T \left[ \lambda^1 (x_t^1 \mid x_t^2, \Omega_t) + \lambda^1 (x_t^2 \mid \Omega_t) \right]$$
To do this, the analysis can be re-run from `In [4]` onwards, setting `var_vec = [2]` and choosing . The resulting score for variable 2 can be added to the score above, which measures the score for variable 1, conditional on 2, thus providing the MIC for the entire system.
Finally, the accuracy of the measurement can be improved by re-doing the analysis using a different conditioning order in the cross-entropy measurement. In this case this can be done by carrying out the same analysis with `var_vec = [2,1]` and `var_vec = [1]` and adding the result. This provides the following measurement:
$$ \lambda^1 (X) = \sum _{t=L}^T \left[ \lambda^1 (x_t^2 \mid x_t^1, \Omega_t) + \lambda^1 (x_t^1 \mid \Omega_t) \right]$$
In theory, the two $\lambda^1 (X)$ measurements should be indentical, in practice they will differ by a measurement error. Averaging the will therefore improve precision.
|
github_jupyter
|
# Predicting Remaining Useful Life (advanced)
<p style="margin:30px">
<img style="display:inline; margin-right:50px" width=50% src="https://www.featuretools.com/wp-content/uploads/2017/12/FeatureLabs-Logo-Tangerine-800.png" alt="Featuretools" />
<img style="display:inline" width=15% src="https://upload.wikimedia.org/wikipedia/commons/e/e5/NASA_logo.svg" alt="NASA" />
</p>
This notebook has a more advanced workflow than [the other notebook](Simple%20Featuretools%20RUL%20Demo.ipynb) for predicting Remaining Useful Life (RUL). If you are a new to either this dataset or Featuretools, I would recommend reading the other notebook first.
## Highlights
* Demonstrate how novel entityset structures improve predictive accuracy
* Use TSFresh Primitives from a featuretools [addon](https://docs.featuretools.com/getting_started/install.html#add-ons)
* Improve Mean Absolute Error by tuning hyper parameters with [BTB](https://github.com/HDI-Project/BTB)
Here is a collection of mean absolute errors from both notebooks. Though we've used averages where possible (denoted by \*), the randomness in the Random Forest Regressor and how we choose labels from the train data changes the score.
| | Train/Validation MAE| Test MAE|
|---------------------------------|---------------------|----------|
| Median Baseline | 72.06* | 50.66* |
| Simple Featuretools | 40.92* | 39.56 |
| Advanced: Custom Primitives | 35.90* | 28.84 |
| Advanced: Hyperparameter Tuning | 34.80* | 27.85 |
# Step 1: Load Data
We load in the train data using the same function we used in the previous notebook:
```
import composeml as cp
import numpy as np
import pandas as pd
import featuretools as ft
import utils
import os
from tqdm import tqdm
from sklearn.cluster import KMeans
data_path = 'data/train_FD004.txt'
data = utils.load_data(data_path)
data.head()
```
We also make cutoff times by using [Compose](https://compose.featurelabs.com) for generating labels on engines that reach at least 100 cycles. For each engine, we generate 10 labels that are spaced 10 cycles apart.
```
def remaining_useful_life(df):
return len(df) - 1
lm = cp.LabelMaker(
target_entity='engine_no',
time_index='time',
labeling_function=remaining_useful_life,
)
label_times = lm.search(
data.sort_values('time'),
num_examples_per_instance=10,
minimum_data=100,
gap=10,
verbose=True,
)
label_times.head()
```
We're going to make 5 sets of cutoff times to use for cross validation by random sampling the labels times we created previously.
```
splits = 5
cutoff_time_list = []
for i in range(splits):
sample = label_times.sample(n=249, random_state=i)
sample.sort_index(inplace=True)
cutoff_time_list.append(sample)
cutoff_time_list[0].head()
```
We're going to do something fancy for our entityset. The values for `operational_setting` 1-3 are continuous but create an implicit relation between different engines. If two engines have a similar `operational_setting`, it could indicate that we should expect the sensor measurements to mean similar things. We make clusters of those settings using [KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) from scikit-learn and make a new entity from the clusters.
```
nclusters = 50
def make_entityset(data, nclusters, kmeans=None):
X = data[[
'operational_setting_1',
'operational_setting_2',
'operational_setting_3',
]]
if kmeans is None:
kmeans = KMeans(n_clusters=nclusters).fit(X)
data['settings_clusters'] = kmeans.predict(X)
es = ft.EntitySet('Dataset')
es.entity_from_dataframe(
dataframe=data,
entity_id='recordings',
index='index',
time_index='time',
)
es.normalize_entity(
base_entity_id='recordings',
new_entity_id='engines',
index='engine_no',
)
es.normalize_entity(
base_entity_id='recordings',
new_entity_id='settings_clusters',
index='settings_clusters',
)
return es, kmeans
es, kmeans = make_entityset(data, nclusters)
es
```
## Visualize EntitySet
```
es.plot()
```
# Step 2: DFS and Creating a Model
In addition to changing our `EntitySet` structure, we're also going to use the [Complexity](http://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.cid_ce) time series primitive from the featuretools [addon](https://docs.featuretools.com/getting_started/install.html#add-ons) of ready-to-use TSFresh Primitives.
```
from featuretools.tsfresh import CidCe
fm, features = ft.dfs(
entityset=es,
target_entity='engines',
agg_primitives=['last', 'max', CidCe(normalize=False)],
trans_primitives=[],
chunk_size=.26,
cutoff_time=cutoff_time_list[0],
max_depth=3,
verbose=True,
)
fm.to_csv('advanced_fm.csv')
fm.head()
```
We build 4 more feature matrices with the same feature set but different cutoff times. That lets us test the pipeline multiple times before using it on test data.
```
fm_list = [fm]
for i in tqdm(range(1, splits)):
es = make_entityset(data, nclusters, kmeans=kmeans)[0]
fm = ft.calculate_feature_matrix(
entityset=es,
features=features,
chunk_size=.26,
cutoff_time=cutoff_time_list[i],
)
fm_list.append(fm)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.feature_selection import RFE
def pipeline_for_test(fm_list, hyperparams=None, do_selection=False):
scores = []
regs = []
selectors = []
hyperparams = hyperparams or {
'n_estimators': 100,
'max_feats': 50,
'nfeats': 50,
}
for fm in fm_list:
X = fm.copy().fillna(0)
y = X.pop('remaining_useful_life')
n_estimators = int(hyperparams['n_estimators'])
max_features = int(hyperparams['max_feats'])
max_features = min(max_features, int(hyperparams['nfeats']))
reg = RandomForestRegressor(n_estimators=n_estimators, max_features=max_features)
X_train, X_test, y_train, y_test = train_test_split(X, y)
if do_selection:
reg2 = RandomForestRegressor(n_estimators=10, n_jobs=3)
selector = RFE(reg2, int(hyperparams['nfeats']), step=25)
selector.fit(X_train, y_train)
X_train = selector.transform(X_train)
X_test = selector.transform(X_test)
selectors.append(selector)
reg.fit(X_train, y_train)
regs.append(reg)
preds = reg.predict(X_test)
mae = mean_absolute_error(preds, y_test)
scores.append(mae)
return scores, regs, selectors
scores, regs, selectors = pipeline_for_test(fm_list)
print([float('{:.1f}'.format(score)) for score in scores])
mean, std = np.mean(scores), np.std(scores)
info = 'Average MAE: {:.1f}, Std: {:.2f}\n'
print(info.format(mean, std))
most_imp_feats = utils.feature_importances(fm_list[0], regs[0])
data_test = utils.load_data('data/test_FD004.txt')
es_test, _ = make_entityset(
data_test,
nclusters,
kmeans=kmeans,
)
fm_test = ft.calculate_feature_matrix(
entityset=es_test,
features=features,
verbose=True,
chunk_size=.26,
)
X = fm_test.copy().fillna(0)
y = pd.read_csv(
'data/RUL_FD004.txt',
sep=' ',
header=None,
names=['remaining_useful_life'],
index_col=False,
)
preds = regs[0].predict(X)
mae = mean_absolute_error(preds, y)
print('Mean Abs Error: {:.2f}'.format(mae))
```
# Step 3: Feature Selection and Scoring
Here, we'll use [Recursive Feature Elimination](http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html). In order to set ourselves up for later optimization, we're going to write a generic `pipeline` function which takes in a set of hyperparameters and returns a score. Our pipeline will first run `RFE` and then split the remaining data for scoring by a `RandomForestRegressor`. We're going to pass in a list of hyperparameters, which we will tune later.
Lastly, we can use that selector and regressor to score the test values.
# Step 4: Hyperparameter Tuning
Because of the way we set up our pipeline, we can use a Gaussian Process to tune the hyperparameters. We will use [BTB](https://github.com/HDI-Project/BTB) from the [HDI Project](https://github.com/HDI-Project). This will search through the hyperparameters `n_estimators` and `max_feats` for RandomForest, and the number of features for RFE to find the hyperparameter set that has the best average score.
```
from btb import HyperParameter, ParamTypes
from btb.tuning import GP
def run_btb(fm_list, n=30, best=45):
hyperparam_ranges = [
('n_estimators', HyperParameter(ParamTypes.INT, [10, 200])),
('max_feats', HyperParameter(ParamTypes.INT, [5, 50])),
('nfeats', HyperParameter(ParamTypes.INT, [10, 70])),
]
tuner = GP(hyperparam_ranges)
shape = (n, len(hyperparam_ranges))
tested_parameters = np.zeros(shape, dtype=object)
scores = []
print('[n_est, max_feats, nfeats]')
best_hyperparams = None
best_sel = None
best_reg = None
for i in tqdm(range(n)):
hyperparams = tuner.propose()
cvscores, regs, selectors = pipeline_for_test(
fm_list,
hyperparams=hyperparams,
do_selection=True,
)
bound = np.mean(cvscores)
tested_parameters[i, :] = hyperparams
tuner.add(hyperparams, -np.mean(cvscores))
if np.mean(cvscores) + np.std(cvscores) < best:
best = np.mean(cvscores)
best_hyperparams = hyperparams
best_reg = regs[0]
best_sel = selectors[0]
info = '{}. {} -- Average MAE: {:.1f}, Std: {:.2f}'
mean, std = np.mean(cvscores), np.std(cvscores)
print(info.format(i, best_hyperparams, mean, std))
print('Raw: {}'.format([float('{:.1f}'.format(s)) for s in cvscores]))
return best_hyperparams, (best_sel, best_reg)
best_hyperparams, best_pipeline = run_btb(fm_list, n=30)
X = fm_test.copy().fillna(0)
y = pd.read_csv(
'data/RUL_FD004.txt',
sep=' ',
header=None,
names=['remaining_useful_life'],
index_col=False,
)
preds = best_pipeline[1].predict(best_pipeline[0].transform(X))
score = mean_absolute_error(preds, y)
print('Mean Abs Error on Test: {:.2f}'.format(score))
most_imp_feats = utils.feature_importances(
X.iloc[:, best_pipeline[0].support_],
best_pipeline[1],
)
```
# Appendix: Averaging old scores
To make a fair comparison between the previous notebook and this one, we should average scores where possible. The work in this section is exactly the work in the previous notebook plus some code for taking the average in the validation step.
```
from featuretools.primitives import Min
old_fm, features = ft.dfs(
entityset=es,
target_entity='engines',
agg_primitives=['last', 'max', 'min'],
trans_primitives=[],
cutoff_time=cutoff_time_list[0],
max_depth=3,
verbose=True,
)
old_fm_list = [old_fm]
for i in tqdm(range(1, splits)):
es = make_entityset(data, nclusters, kmeans=kmeans)[0]
old_fm = ft.calculate_feature_matrix(
entityset=es,
features=features,
cutoff_time=cutoff_time_list[i],
)
old_fm_list.append(fm)
old_scores = []
median_scores = []
for fm in old_fm_list:
X = fm.copy().fillna(0)
y = X.pop('remaining_useful_life')
X_train, X_test, y_train, y_test = train_test_split(X, y)
reg = RandomForestRegressor(n_estimators=10)
reg.fit(X_train, y_train)
preds = reg.predict(X_test)
mae = mean_absolute_error(preds, y_test)
old_scores.append(mae)
medianpredict = [np.median(y_train) for _ in y_test]
mae = mean_absolute_error(medianpredict, y_test)
median_scores.append(mae)
print([float('{:.1f}'.format(score)) for score in old_scores])
mean, std = np.mean(old_scores), np.std(old_scores)
info = 'Average MAE: {:.2f}, Std: {:.2f}\n'
print(info.format(mean, std))
print([float('{:.1f}'.format(score)) for score in median_scores])
mean, std = np.mean(median_scores), np.std(median_scores)
info = 'Baseline by Median MAE: {:.2f}, Std: {:.2f}\n'
print(info.format(mean, std))
y = pd.read_csv(
'data/RUL_FD004.txt',
sep=' ',
header=None,
names=['remaining_useful_life'],
index_col=False,
)
median_scores_2 = []
for ct in cutoff_time_list:
medianpredict2 = [np.median(ct['remaining_useful_life'].values) for _ in y.values]
mae = mean_absolute_error(medianpredict2, y)
median_scores_2.append(mae)
print([float('{:.1f}'.format(score)) for score in median_scores_2])
mean, std = np.mean(median_scores_2), np.std(median_scores_2)
info = 'Baseline by Median MAE: {:.2f}, Std: {:.2f}\n'
print(info.format(mean, std))
# Save output files
os.makedirs("output", exist_ok=True)
fm.to_csv('output/advanced_train_feature_matrix.csv')
cutoff_time_list[0].to_csv('output/advanced_train_label_times.csv')
fm_test.to_csv('output/advanced_test_feature_matrix.csv')
```
<p>
<img src="https://www.featurelabs.com/wp-content/uploads/2017/12/logo.png" alt="Featuretools" />
</p>
Featuretools was created by the developers at [Feature Labs](https://www.featurelabs.com/). If building impactful data science pipelines is important to you or your business, please [get in touch](https://www.featurelabs.com/contact).
|
github_jupyter
|
```
#export
from local.torch_basics import *
from local.test import *
from local.layers import *
from local.data.all import *
from local.notebook.showdoc import show_doc
from local.optimizer import *
from local.learner import *
#default_exp callback.hook
```
# Model hooks
> Callback and helper function to add hooks in models
```
from local.utils.test import *
```
## What are hooks?
Hooks are function you can attach to a particular layer in your model and that will be executed in the foward pass (for forward hooks) or backward pass (for backward hooks). Here we begin with an introduction around hooks, but you should jump to `HookCallback` if you quickly want to implemet one (and read the following example `ActivationStats`).
Forward hooks are functions that take three arguments, the layer it's applied to, the input of that layer and the output of that layer.
```
tst_model = nn.Linear(5,3)
def example_forward_hook(m,i,o): print(m,i,o)
x = torch.randn(4,5)
hook = tst_model.register_forward_hook(example_forward_hook)
y = tst_model(x)
hook.remove()
```
Backward hooks are functions that take three arguments: the layer it's applied to, the gradients of the loss with respect to the input, and the gradients with respect to the output.
```
def example_backward_hook(m,gi,go): print(m,gi,go)
hook = tst_model.register_backward_hook(example_backward_hook)
x = torch.randn(4,5)
y = tst_model(x)
loss = y.pow(2).mean()
loss.backward()
hook.remove()
```
Hooks can change the input/output of a layer, or the gradients, print values or shapes. If you want to store something related to theses inputs/outputs, it's best to have you hook associated to a class so that it can put it in the state of an instance of that class.
## Hook -
```
#export
@docs
class Hook():
"Create a hook on `m` with `hook_func`."
def __init__(self, m, hook_func, is_forward=True, detach=True, cpu=False):
self.hook_func,self.detach,self.cpu,self.stored = hook_func,detach,cpu,None
f = m.register_forward_hook if is_forward else m.register_backward_hook
self.hook = f(self.hook_fn)
self.removed = False
def hook_fn(self, module, input, output):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach: input,output = to_detach(input, cpu=self.cpu),to_detach(output, cpu=self.cpu)
self.stored = self.hook_func(module, input, output)
def remove(self):
"Remove the hook from the model."
if not self.removed:
self.hook.remove()
self.removed=True
def __enter__(self, *args): return self
def __exit__(self, *args): self.remove()
_docs = dict(__enter__="Register the hook",
__exit__="Remove the hook")
```
This will be called during the forward pass if `is_forward=True`, the backward pass otherwise, and will optionally `detach` and put on the `cpu` the (gradient of the) input/output of the model before passing them to `hook_func`. The result of `hook_func` will be stored in the `stored` attribute of the `Hook`.
```
tst_model = nn.Linear(5,3)
hook = Hook(tst_model, lambda m,i,o: o)
y = tst_model(x)
test_eq(hook.stored, y)
show_doc(Hook.hook_fn)
show_doc(Hook.remove)
```
> Note: It's important to properly remove your hooks for your model when you're done to avoid them being called again next time your model is applied to some inputs, and to free the memory that go with their state.
```
tst_model = nn.Linear(5,10)
x = torch.randn(4,5)
y = tst_model(x)
hook = Hook(tst_model, example_forward_hook)
test_stdout(lambda: tst_model(x), f"{tst_model} ({x},) {y.detach()}")
hook.remove()
test_stdout(lambda: tst_model(x), "")
```
### Context Manager
Since it's very important to remove your `Hook` even if your code is interrupted by some bug, `Hook` can be used as context managers.
```
show_doc(Hook.__enter__)
show_doc(Hook.__exit__)
tst_model = nn.Linear(5,10)
x = torch.randn(4,5)
y = tst_model(x)
with Hook(tst_model, example_forward_hook) as h:
test_stdout(lambda: tst_model(x), f"{tst_model} ({x},) {y.detach()}")
test_stdout(lambda: tst_model(x), "")
#export
def _hook_inner(m,i,o): return o if isinstance(o,Tensor) or is_listy(o) else list(o)
def hook_output(module, detach=True, cpu=False, grad=False):
"Return a `Hook` that stores activations of `module` in `self.stored`"
return Hook(module, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
```
The activations stored are the gradients if `grad=True`, otherwise the output of `module`. If `detach=True` they are detached from their history, and if `cpu=True`, they're put on the CPU.
```
tst_model = nn.Linear(5,10)
x = torch.randn(4,5)
with hook_output(tst_model) as h:
y = tst_model(x)
test_eq(y, h.stored)
assert not h.stored.requires_grad
with hook_output(tst_model, grad=True) as h:
y = tst_model(x)
loss = y.pow(2).mean()
loss.backward()
test_close(2*y / y.numel(), h.stored[0])
#cuda
with hook_output(tst_model, cpu=True) as h:
y = tst_model.cuda()(x.cuda())
test_eq(h.stored.device, torch.device('cpu'))
```
## Hooks -
```
#export
@docs
class Hooks():
"Create several hooks on the modules in `ms` with `hook_func`."
def __init__(self, ms, hook_func, is_forward=True, detach=True, cpu=False):
self.hooks = [Hook(m, hook_func, is_forward, detach, cpu) for m in ms]
def __getitem__(self,i): return self.hooks[i]
def __len__(self): return len(self.hooks)
def __iter__(self): return iter(self.hooks)
@property
def stored(self): return [o.stored for o in self]
def remove(self):
"Remove the hooks from the model."
for h in self.hooks: h.remove()
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
_docs = dict(stored = "The states saved in each hook.",
__enter__="Register the hooks",
__exit__="Remove the hooks")
layers = [nn.Linear(5,10), nn.ReLU(), nn.Linear(10,3)]
tst_model = nn.Sequential(*layers)
hooks = Hooks(tst_model, lambda m,i,o: o)
y = tst_model(x)
test_eq(hooks.stored[0], layers[0](x))
test_eq(hooks.stored[1], F.relu(layers[0](x)))
test_eq(hooks.stored[2], y)
hooks.remove()
show_doc(Hooks.stored, name='Hooks.stored')
show_doc(Hooks.remove)
```
### Context Manager
Like `Hook` , you can use `Hooks` as context managers.
```
show_doc(Hooks.__enter__)
show_doc(Hooks.__exit__)
layers = [nn.Linear(5,10), nn.ReLU(), nn.Linear(10,3)]
tst_model = nn.Sequential(*layers)
with Hooks(layers, lambda m,i,o: o) as h:
y = tst_model(x)
test_eq(h.stored[0], layers[0](x))
test_eq(h.stored[1], F.relu(layers[0](x)))
test_eq(h.stored[2], y)
#export
def hook_outputs(modules, detach=True, cpu=False, grad=False):
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
```
The activations stored are the gradients if `grad=True`, otherwise the output of `modules`. If `detach=True` they are detached from their history, and if `cpu=True`, they're put on the CPU.
```
layers = [nn.Linear(5,10), nn.ReLU(), nn.Linear(10,3)]
tst_model = nn.Sequential(*layers)
x = torch.randn(4,5)
with hook_outputs(layers) as h:
y = tst_model(x)
test_eq(h.stored[0], layers[0](x))
test_eq(h.stored[1], F.relu(layers[0](x)))
test_eq(h.stored[2], y)
for s in h.stored: assert not s.requires_grad
with hook_outputs(layers, grad=True) as h:
y = tst_model(x)
loss = y.pow(2).mean()
loss.backward()
g = 2*y / y.numel()
test_close(g, h.stored[2][0])
g = g @ layers[2].weight.data
test_close(g, h.stored[1][0])
g = g * (layers[0](x) > 0).float()
test_close(g, h.stored[0][0])
#cuda
with hook_outputs(tst_model, cpu=True) as h:
y = tst_model.cuda()(x.cuda())
for s in h.stored: test_eq(s.device, torch.device('cpu'))
```
## HookCallback -
To make hooks easy to use, we wrapped a version in a Callback where you just have to implement a `hook` function (plus any element you might need).
```
#export
def has_params(m):
"Check if `m` has at least one parameter"
return len(list(m.parameters())) > 0
assert has_params(nn.Linear(3,4))
assert has_params(nn.LSTM(4,5,2))
assert not has_params(nn.ReLU())
#export
class HookCallback(Callback):
"`Callback` that can be used to register hooks on `modules`"
def __init__(self, hook=None, modules=None, do_remove=True, is_forward=True, detach=True, cpu=False):
self.modules,self.do_remove = modules,do_remove
self.is_forward,self.detach,self.cpu = is_forward,detach,cpu
if hook is not None: setattr(self, 'hook', hook)
def begin_fit(self):
"Register the `Hooks` on `self.modules`."
if not self.modules:
self.modules = [m for m in flatten_model(self.model) if has_params(m)]
self.hooks = Hooks(self.modules, self.hook, self.is_forward, self.detach, self.cpu)
def after_fit(self):
"Remove the `Hooks`."
if self.do_remove: self._remove()
def _remove(self):
if getattr(self, 'hooks', None): self.hooks.remove()
def __del__(self): self._remove()
```
You can either subclass and implement a `hook` function (along with any event you want) or pass that a `hook` function when initializing. Such a function needs to take three argument: a layer, input and output (for a backward hook, input means gradient with respect to the inputs, output, gradient with respect to the output) and can either modify them or update the state according to them.
If not provided, `modules` will default to the layers of `self.model` that have a `weight` attribute. Depending on `do_remove`, the hooks will be properly removed at the end of training (or in case of error). `is_forward` , `detach` and `cpu` are passed to `Hooks`.
The function called at each forward (or backward) pass is `self.hook` and must be implemented when subclassing this callback.
```
class TstCallback(HookCallback):
def hook(self, m, i, o): return o
def after_batch(self): test_eq(self.hooks.stored[0], self.pred)
learn = synth_learner(n_trn=5, cbs = TstCallback())
learn.fit(1)
class TstCallback(HookCallback):
def __init__(self, modules=None, do_remove=True, detach=True, cpu=False):
super().__init__(None, modules, do_remove, False, detach, cpu)
def hook(self, m, i, o): return o
def after_batch(self):
if self.training:
test_eq(self.hooks.stored[0][0], 2*(self.pred-self.yb)/self.pred.shape[0])
learn = synth_learner(n_trn=5, cbs = TstCallback())
learn.fit(1)
show_doc(HookCallback.begin_fit)
show_doc(HookCallback.after_fit)
```
An example of such a `HookCallback` is the following, that stores the mean and stds of activations that go through the network.
```
#exports
@docs
class ActivationStats(HookCallback):
"Callback that record the mean and std of activations."
def begin_fit(self):
"Initialize stats."
super().begin_fit()
self.stats = []
def hook(self, m, i, o): return o.mean().item(),o.std().item()
def after_batch(self):
"Take the stored results and puts it in `self.stats`"
if self.training: self.stats.append(self.hooks.stored)
def after_fit(self):
"Polish the final result."
self.stats = tensor(self.stats).permute(2,1,0)
super().after_fit()
_docs = dict(hook="Take the mean and std of the output")
learn = synth_learner(n_trn=5, cbs = ActivationStats())
learn.fit(1)
learn.activation_stats.stats
```
The first line contains the means of the outputs of the model for each batch in the training set, the second line their standard deviations.
```
#hide
class TstCallback(HookCallback):
def hook(self, m, i, o): return o
def begin_fit(self):
super().begin_fit()
self.means,self.stds = [],[]
def after_batch(self):
if self.training:
self.means.append(self.hooks.stored[0].mean().item())
self.stds.append (self.hooks.stored[0].std() .item())
learn = synth_learner(n_trn=5, cbs = [TstCallback(), ActivationStats()])
learn.fit(1)
test_eq(learn.activation_stats.stats[0].squeeze(), tensor(learn.tst.means))
test_eq(learn.activation_stats.stats[1].squeeze(), tensor(learn.tst.stds))
```
## Model summary
```
#export
def total_params(m):
"Give the number of parameters of a module and if it's trainable or not"
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
test_eq(total_params(nn.Linear(10,32)), (32*10+32,True))
test_eq(total_params(nn.Linear(10,32, bias=False)), (32*10,True))
test_eq(total_params(nn.BatchNorm2d(20)), (20*2, True))
test_eq(total_params(nn.BatchNorm2d(20, affine=False)), (0,False))
test_eq(total_params(nn.Conv2d(16, 32, 3)), (16*32*3*3 + 32, True))
test_eq(total_params(nn.Conv2d(16, 32, 3, bias=False)), (16*32*3*3, True))
#First ih layer 20--10, all else 10--10. *4 for the four gates
test_eq(total_params(nn.LSTM(20, 10, 2)), (4 * (20*10 + 10) + 3 * 4 * (10*10 + 10), True))
#export
def layer_info(learn):
def _track(m, i, o):
return (m.__class__.__name__,)+total_params(m)+(apply(lambda x:x.shape, o),)
layers = [m for m in flatten_model(learn.model)]
xb,_ = learn.data.train_dl.one_batch()
with Hooks(layers, _track) as h:
_ = learn.model.eval()(apply(lambda o:o[:1], xb))
return h.stored
m = nn.Sequential(nn.Linear(1,50), nn.ReLU(), nn.BatchNorm1d(50), nn.Linear(50, 1))
learn = synth_learner()
learn.model=m
test_eq(layer_info(learn), [
('Linear', 100, True, [1, 50]),
('ReLU', 0, False, [1, 50]),
('BatchNorm1d', 100, True, [1, 50]),
('Linear', 51, True, [1, 1])
])
#export core
class PrettyString(str):
"Little hack to get strings to show properly in Jupyter."
def __repr__(self): return self
#export
def _print_shapes(o, bs):
if isinstance(o, torch.Size): return ' x '.join([str(bs)] + [str(t) for t in o[1:]])
else: return [_print_shapes(x, bs) for x in o]
#export
@patch
def summary(self:Learner):
"Print a summary of the model, optimizer and loss function."
infos = layer_info(self)
xb,_ = self.data.train_dl.one_batch()
n,bs = 64,find_bs(xb)
inp_sz = _print_shapes(apply(lambda x:x.shape, xb), bs)
res = f"{self.model.__class__.__name__} (Input shape: {inp_sz})\n"
res += "=" * n + "\n"
res += f"{'Layer (type)':<20} {'Output Shape':<20} {'Param #':<10} {'Trainable':<10}\n"
res += "=" * n + "\n"
ps,trn_ps = 0,0
for typ,np,trn,sz in infos:
if sz is None: continue
ps += np
if trn: trn_ps += np
res += f"{typ:<20} {_print_shapes(sz, bs):<20} {np:<10,} {str(trn):<10}\n"
res += "_" * n + "\n"
res += f"\nTotal params: {ps:,}\n"
res += f"Total trainable params: {trn_ps:,}\n"
res += f"Total non-trainable params: {ps - trn_ps:,}\n\n"
res += f"Optimizer used: {self.opt_func}\nLoss function: {self.loss_func}\n\nCallbacks:\n"
res += '\n'.join(f" - {cb}" for cb in sort_by_run(self.cbs))
return PrettyString(res)
m = nn.Sequential(nn.Linear(1,50), nn.ReLU(), nn.BatchNorm1d(50), nn.Linear(50, 1))
for p in m[0].parameters(): p.requires_grad_(False)
learn = synth_learner()
learn.model=m
learn.summary()
```
## Export -
```
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
```
|
github_jupyter
|
## Demo of 1D regression with an Attentive Neural Process with Recurrent Neural Network (ANP-RNN) model
This notebook will provide a simple and straightforward demonstration on how to utilize an Attentive Neural Process with a Recurrent Neural Network (ANP-RNN) to regress context and target points to a sine curve.
First, we need to import all necessary packages and modules for our task:
```
import os
import sys
import torch
# from matplotlib import pyplot as plt
# Provide access to modules in repo.
sys.path.insert(0, os.path.abspath('neural_process_models'))
sys.path.insert(0, os.path.abspath('misc'))
from neural_process_models.anp_rnn import ANP_RNN_Model
from misc.test_sin_regression.Sin_Wave_Data import sin_wave_data, plot_functions
```
The `sin_wave_data` class, defined in `misc/test_sin_regression/Sin_Data_Wave.py`, represents the curve that we will try to regress to. From instances of this class, we are able to sample context and target points from the curve to serve as inputs for our neural process.
The default parameters of this class will produce a "ground truth" curve defined as the sum of the following:
1. A sine curve with amplitude 1, frequency 1, and phase 1.
2. A sine curve with amplitude 2, frequency 2, and phase 1.
3. A measured amount of noise (0.1).
Let us create an instance of this class:
```
data = sin_wave_data()
```
Next, we need to instantiate our model. The ANP model is implemented under the `NeuralProcessModel` class under the file `neural_process_models/attentive_neural_process.py`.
We will use the following parameters for our example model:
* 1 for x-dimension and y-dimension (since this is 1D regression)
* 4 hidden layers of dimension 256 for encoders and decoder
* 256 as the latent dimension for encoders and decoder
* We will utilize a self-attention process.
* We will utilize a deterministic path for the encoder.
Let us create an instance of this class, as well as set some hyperparameters for our training:
```
np_model = ANP_RNN_Model(x_dim=1,
y_dim=1,
mlp_hidden_size_list=[256, 256, 256, 256],
latent_dim=256,
use_rnn=True,
use_self_attention=True,
le_self_attention_type="laplace",
de_self_attention_type="laplace",
de_cross_attention_type="laplace",
use_deter_path=True)
optim = torch.optim.Adam(np_model.parameters(), lr=1e-4)
num_epochs = 1000
batch_size = 16
```
Now, let us train our model. For each epoch, we will print the loss at that epoch.
Additionally, every 50 epochs, an image will be generated and displayed, using `pyplot`. This will give you an opportunity to more closely analyze and/or save the images, if you would like.
```
for epoch in range(1, num_epochs + 1):
print("step = " + str(epoch))
np_model.train()
plt.clf()
optim.zero_grad()
ctt_x, ctt_y, tgt_x, tgt_y = data.query(batch_size=batch_size,
context_x_start=-6,
context_x_end=6,
context_x_num=200,
target_x_start=-6,
target_x_end=6,
target_x_num=200)
mu, sigma, log_p, kl, loss = np_model(ctt_x, ctt_y, tgt_x, tgt_y)
print('loss = ', loss)
loss.backward()
optim.step()
np_model.eval()
if epoch % 50 == 0:
plt.ion()
plot_functions(tgt_x.numpy(),
tgt_y.numpy(),
ctt_x.numpy(),
ctt_y.numpy(),
mu.detach().numpy(),
sigma.detach().numpy())
title_str = 'Training at epoch ' + str(epoch)
plt.title(title_str)
plt.pause(0.1)
plt.ioff()
plt.show()
```
|
github_jupyter
|
<img src="images/usm.jpg" width="480" height="240" align="left"/>
# MAT281 - Laboratorio N°02
## Objetivos del laboratorio
* Reforzar conceptos básicos de clasificación.
## Contenidos
* [Problema 01](#p1)
<a id='p1'></a>
## I.- Problema 01
<img src="https://www.xenonstack.com/wp-content/uploads/xenonstack-credit-card-fraud-detection.png" width="360" height="360" align="center"/>
El conjunto de datos se denomina `creditcard.csv` y consta de varias columnas con información acerca del fraude de tarjetas de crédito, en donde la columna **Class** corresponde a: 0 si no es un fraude y 1 si es un fraude.
En este ejercicio se trabajará el problemas de clases desbalancedas. Veamos las primeras cinco filas dle conjunto de datos:
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,accuracy_score,recall_score,precision_score,f1_score
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
%matplotlib inline
sns.set_palette("deep", desat=.6)
sns.set(rc={'figure.figsize':(11.7,8.27)})
# cargar datos
df = pd.read_csv(os.path.join("data","creditcard.csv"), sep=";")
df.head()
```
Analicemos el total de fraudes respecto a los casos que nos son fraudes:
```
# calcular proporciones
df_count = pd.DataFrame()
df_count["fraude"] =["no","si"]
df_count["total"] = df["Class"].value_counts()
df_count["porcentaje"] = 100*df_count["total"] /df_count["total"] .sum()
df_count
```
Se observa que menos del 1% corresponde a registros frudulentos. La pregunta que surgen son:
* ¿ Cómo deben ser el conjunto de entrenamiento y de testeo?
* ¿ Qué modelos ocupar?
* ¿ Qué métricas ocupar?
Por ejemplo, analicemos el modelos de regresión logÃstica y apliquemos el procedimiento estándar:
```
# datos
y = df.Class
X = df.drop('Class', axis=1)
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=27)
# Creando el modelo
lr = LogisticRegression(solver='liblinear').fit(X_train, y_train)
# predecir
lr_pred = lr.predict(X_test)
# calcular accuracy
accuracy_score(y_test, lr_pred)
```
En general el modelo tiene un **accuracy** del 99,9%, es decir, un podrÃa suponer que el modelo predice casi perfectamente, pero eso esta lejos de ser asÃ. Para ver por qué es necesario seguir los siguientes pasos:
### 1. Cambiar la métrica de rendimiento
El primer paso es comparar con distintas métricas, para eso ocupemos las 4 métricas clásicas abordadas en el curso:
* accuracy
* precision
* recall
* f-score
En este punto deberá poner las métricas correspondientes y comentar sus resultados.
```
# metrics
y_true = list(y_test)
y_pred = list(lr.predict(X_test))
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
print('\nMetricas:\n ')
print('accuracy: ',accuracy_score(y_test, lr_pred))
print('recall: ',recall_score(y_test, lr_pred))
print('precision: ',precision_score(y_test, lr_pred))
print('f-score: ',f1_score(y_test, lr_pred))
print("")
```
##### accuracy : posee gran certeza en la predicción de tarjetas con fraude y no fraude respecto al total de la muestra.
##### recall:respecto a las predicciones hechas para tarjetas con fraude se observa un decaimiento en la certeza de las predicciones, probablemente dado que el modelo este entrenado preferentemente para predecir casos sin fraude, dada la cantidad de ejemplos con esta categoria.Por otro lado, estoy dando enfasis por medio de esta métrica a los casos que dije que no eran fraudes cuando si lo eran.
##### precisión: Esta métrica indica la importancia relativa de las predicciones correctas como no fraude respecto a las señaladas como no fraude aún cuando fueron hechas incorrectamente. Respecto, a recall está es más elevada implicando. El error en equivocarse en predecir fraude cuando no lo es, es menos grave. En tal caso, la metrica de recall que implica equivocarse en decir que una tarjeta no posee fraude cuando lo tiene es más importante, este análisis pensando en los factores del denominador de ambas métricas.
##### f-score: seria el equilibrio entre las otras métricas, ponderando precisión y recall. En este caso, se encuentra intermedia entre el reclla y precisión.
##### Comentarios finales, usar el accuracy por si solo para analizar los resultados del problema serÃa incompleto. Análizando las otras métricas propondrÃa mejorar el problema equilibrando los datos de ejemplos.
### 2. Cambiar algoritmo
El segundo paso es comparar con distintos modelos. Debe tener en cuenta que el modelo ocupado resuelva el problema supervisado de clasificación.
En este punto deberá ajustar un modelo de **random forest**, aplicar las métricas y comparar con el modelo de regresión logÃstica.
```
# train model
rfc = RandomForestClassifier(n_estimators=5).fit(X_train, y_train) # algoritmo random forest
# metrics
y_true = list(y_test)
y_pred = list(rfc.predict(X_test)) # predicciones con random forest
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
print('\nMetricas:\n ')
print('accuracy: ',accuracy_score(y_true,y_pred))
print('recall: ',recall_score(y_true,y_pred))
print('precision: ',precision_score(y_true,y_pred))
print('f-score: ',f1_score(y_true,y_pred))
print("")
```
###### En este caso se puede observar que las métricas de Recall, precisión y f-score mejoran, en comparación al modelo de regresión logisticas. Sin embargo, el orden de certeza se mantiene accuracy, precisión, f-score y recal (Respectivamente).
##### cambiar de modelo de predicción probablemente puede ayudar a mejorar en cierto nivel las métricas, sin embargo, no soluciona el problema de desbalanceo de clases y puede seguir existiendo cierto nivel de sesgo en clasificar clases.
###### nota:En este caso los estimadores se seleccionaron arbitrariamente como 5 para el algoritmo de random forest.
### 3. Técnicas de remuestreo: sobremuestreo de clase minoritaria
El tercer paso es ocupar ténicas de remuestreo, pero sobre la clase minoritaria. Esto significa que mediantes ténicas de remuestreo trataremos de equiparar el número de elementos de la clase minoritaria a la clase mayoritaria.
```
from sklearn.utils import resample
# concatenar el conjunto de entrenamiento
X = pd.concat([X_train, y_train], axis=1)
# separar las clases
not_fraud = X[X.Class==0]
fraud = X[X.Class==1]
# remuestrear clase minoritaria
fraud_upsampled = resample(fraud,
replace=True, # sample with replacement
n_samples=len(not_fraud), # match number in majority class
random_state=27) # reproducible results
# recombinar resultados
upsampled = pd.concat([not_fraud, fraud_upsampled])
# chequear el número de elementos por clases
upsampled.Class.value_counts()
# datos de entrenamiento sobre-balanceados
y_train = upsampled.Class
X_train = upsampled.drop('Class', axis=1)
```
Ocupando estos nuevos conjunto de entrenamientos, vuelva a aplicar el modelos de regresión logÃstica y calcule las correspondientes métricas. Además, justifique las ventajas y desventjas de este procedimiento.
```
upsampled = LogisticRegression(solver='liblinear').fit(X_train, y_train) # algoritmo de regresion logistica
# metrics
y_true = list(y_test)
y_pred = list(upsampled.predict(X_test))
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
print('\nMetricas:\n ')
print('accuracy: ',accuracy_score(y_true,y_pred))
print('recall: ',recall_score(y_true,y_pred))
print('precision: ',precision_score(y_true,y_pred))
print('f-score: ',f1_score(y_true,y_pred))
print("")
```
##### En este caso, las metricas en general disminuyeron su valor en comparación a la clase desbalanceada. Cabe señalar que la metrica de acurracy se dispara respecto a las otras metricas. Probablemente incurra en una especie de sobreajuste del clasificador, no siendo util para extrapolar a otras realidades. Redoblar la clase minoritaria quizás sea más efectiva cuando haya un desbalanceo no tan extremo como el problema de ejemplo, y sobre esta misma clase minoritaria haya algo más de variabilidad de los datos de la clase para realizar mejores extrapolaciones.
### 4. Técnicas de remuestreo - Ejemplo de clase mayoritaria
El cuarto paso es ocupar ténicas de remuestreo, pero sobre la clase mayoritaria. Esto significa que mediantes ténicas de remuestreo trataremos de equiparar el número de elementos de la clase mayoritaria a la clase minoritaria.
```
# remuestreo clase mayoritaria
not_fraud_downsampled = resample(not_fraud,
replace = False, # sample without replacement
n_samples = len(fraud), # match minority n
random_state = 27) # reproducible results
# recombinar resultados
downsampled = pd.concat([not_fraud_downsampled, fraud])
# chequear el número de elementos por clases
downsampled.Class.value_counts()
# datos de entrenamiento sub-balanceados
y_train = downsampled.Class
X_train = downsampled.drop('Class', axis=1)
```
Ocupando estos nuevos conjunto de entrenamientos, vuelva a aplicar el modelos de regresión logÃstica y calcule las correspondientes métricas. Además, justifique las ventajas y desventjas de este procedimiento.
```
undersampled = LogisticRegression(solver='liblinear').fit(X_train, y_train) # modelo de regresi+on logÃstica
# metrics
y_true = list(y_test)
y_pred = list(undersampled.predict(X_test))
print('\nMatriz de confusion:\n ')
print(confusion_matrix(y_true,y_pred))
print('\nMetricas:\n ')
print('accuracy: ',accuracy_score(y_true,y_pred))
print('recall: ',recall_score(y_true,y_pred))
print('precision: ',precision_score(y_true,y_pred))
print('f-score: ',f1_score(y_true,y_pred))
print("")
```
##### La métrica bajan en comparación al análisis de clases desbalancedas presenta métricas con valores más bajos. Respecto a la metodologia anterior las métricas dan resultados levemente mejores para precision, f-score y accuracy. Sin embargo, no son suficientes para decir que está metodologia es mejor que la anterior.
##### La desventaja que mencionaria de esta metodologia es que al intentar equilibrar la clase mayoritaria a la minoritaria, se puede perder información importante para clasificar una de las clases.
### 5. Conclusiones
Para finalizar el laboratorio, debe realizar un análisis comparativo con los disintos resultados obtenidos en los pasos 1-4. Saque sus propias conclusiones del caso.
##### Cuando los ejemplos dentro de un problema de clasificación se encuentran desproporcionados, el algoritmo de clasificador puede favorecer la clase mayoritaria. Para identificar esto es necesario analizar la proporción de las clases, y no solo basarse en la métrica de accuracy, revisando otras como precision, recall y f-score. Esto es altamente recomendable cuando el error que se comente al acertar es más importante que solo clasificar, lo que consideran alguna de estas otras métricas mencionadas.
##### Para analizar la certeza de lo modelos analizados, más alla de variar el algoritmo que puede aportar en mejorar cierto nivel de confianza en las predicciones, es importante agregar alguna metodologia de balanceo de clases para analizar estos problemas. Cuando se intenta doblar la clase minoritaria a una clase mayoritaria, se puede caer en un sobre ajuste del clasificador, dado que los ejemplos de la clase minoritaria pueden no aportar información nueva y se mantiene la tendencia sesgada en las predicciones. Por otro lado, al disminuir la clase mayoritaria a la minoritaria se puede perder información importante de la clase mayoritaria para clasificar. Se recomienda analizar el caso a caso.
|
github_jupyter
|
# T1548.001 - Abuse Elevation Control Mechanism: Setuid and Setgid
An adversary may perform shell escapes or exploit vulnerabilities in an application with the setsuid or setgid bits to get code running in a different userâs context. On Linux or macOS, when the setuid or setgid bits are set for an application, the application will run with the privileges of the owning user or group respectively. (Citation: setuid man page). Normally an application is run in the current userâs context, regardless of which user or group owns the application. However, there are instances where programs need to be executed in an elevated context to function properly, but the user running them doesnât need the elevated privileges.
Instead of creating an entry in the sudoers file, which must be done by root, any user can specify the setuid or setgid flag to be set for their own applications. These bits are indicated with an "s" instead of an "x" when viewing a file's attributes via <code>ls -l</code>. The <code>chmod</code> program can set these bits with via bitmasking, <code>chmod 4777 [file]</code> or via shorthand naming, <code>chmod u+s [file]</code>.
Adversaries can use this mechanism on their own malware to make sure they're able to execute in elevated contexts in the future.(Citation: OSX Keydnap malware).
## Atomic Tests
```
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
```
### Atomic Test #1 - Make and modify binary from C source
Make, change owner, and change file attributes on a C source code file
**Supported Platforms:** macos, linux
Elevation Required (e.g. root or admin)
#### Attack Commands: Run with `sh`
```sh
cp PathToAtomicsFolder/T1548.001/src/hello.c /tmp/hello.c
sudo chown root /tmp/hello.c
sudo make /tmp/hello
sudo chown root /tmp/hello
sudo chmod u+s /tmp/hello
/tmp/hello
```
```
Invoke-AtomicTest T1548.001 -TestNumbers 1
```
### Atomic Test #2 - Set a SetUID flag on file
This test sets the SetUID flag on a file in Linux and macOS.
**Supported Platforms:** macos, linux
Elevation Required (e.g. root or admin)
#### Attack Commands: Run with `sh`
```sh
sudo touch /tmp/evilBinary
sudo chown root /tmp/evilBinary
sudo chmod u+s /tmp/evilBinary
```
```
Invoke-AtomicTest T1548.001 -TestNumbers 2
```
### Atomic Test #3 - Set a SetGID flag on file
This test sets the SetGID flag on a file in Linux and macOS.
**Supported Platforms:** macos, linux
Elevation Required (e.g. root or admin)
#### Attack Commands: Run with `sh`
```sh
sudo touch /tmp/evilBinary
sudo chown root /tmp/evilBinary
sudo chmod g+s /tmp/evilBinary
```
```
Invoke-AtomicTest T1548.001 -TestNumbers 3
```
## Detection
Monitor the file system for files that have the setuid or setgid bits set. Monitor for execution of utilities, like chmod, and their command-line arguments to look for setuid or setguid bits being set.
|
github_jupyter
|
```
import matplotlib.pyplot as plt
x = [1, 2.1, 0.4, 8.9, 7.1, 0.1, 3, 5.1, 6.1, 3.4, 2.9, 9]
y = [1, 3.4, 0.7, 1.3, 9, 0.4, 4, 1.9, 9, 0.3, 4.0, 2.9]
plt.scatter(x,y, color='red')
w = [0.1, 0.2, 0.4, 0.8, 1.6, 2.1, 2.5, 4, 6.5, 8, 10]
z = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
plt.plot(z, w, color='lightblue', linewidth=2)
c = [0,1,2,3,4, 5, 6, 7, 8, 9, 10]
plt.plot(c)
plt.ylabel('some numbers')
plt.xlabel('some more numbers')
plt.savefig('plot.png')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
x = np.random.rand(10)
y = np.random.rand(10)
plt.plot(x,y,'--', x**2, y**2,'-.')
plt.savefig('lines.png')
plt.axis('equal')
plt.show()
"""
Demo of custom tick-labels with user-defined rotation.
"""
import matplotlib.pyplot as plt
x = [1, 2, 3, 4]
y = [1, 4, 9, 6]
labels = ['Frogs', 'Hogs', 'Bogs', 'Slogs']
plt.plot(x, y, 'ro')
# You can specify a rotation for the tick labels in degrees or with keywords.
plt.xticks(x, labels, rotation='vertical')
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
plt.savefig('ticks.png')
plt.show()
import matplotlib.pyplot as plt
x = [0.5, 0.6, 0.8, 1.2, 2.0, 3.0]
y = [10, 15, 20, 25, 30, 35]
z = [1, 2, 3, 4]
w = [10, 20, 30, 40]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, color='lightblue', linewidth=3)
ax.scatter([2,3.4,4, 5.5],
[5,10,12, 15],
color='black',
marker='^')
ax.set_xlim(0, 6.5)
ax2 = fig.add_subplot(222)
ax2.plot(z, w, color='lightgreen', linewidth=3)
ax2.scatter([3,5,7],
[5,15,25],
color='red',
marker='*')
ax2.set_xlim(1, 7.5)
plt.savefig('mediumplot.png')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
# First way: the top plot, all in one #
x = np.random.rand(10)
y = np.random.rand(10)
figure1 = plt.plot(x,y)
# Second way: the lower 4 plots#
x1 = np.random.rand(10)
x2 = np.random.rand(10)
x3 = np.random.rand(10)
x4 = np.random.rand(10)
y1 = np.random.rand(10)
y2 = np.random.rand(10)
y3 = np.random.rand(10)
y4 = np.random.rand(10)
figure2, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.plot(x1,y1)
ax2.plot(x2,y2)
ax3.plot(x3,y3)
ax4.plot(x4,y4)
plt.savefig('axes.png')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 1, 500)
y = np.sin(4 * np.pi * x) * np.exp(-5 * x)
fig, ax = plt.subplots()
ax.fill(x, y, color='lightblue')
#ax.grid(True, zorder=5)
plt.savefig('fill.png')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
x, y = np.random.randn(2, 100)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.xcorr(x, y, usevlines=True, maxlags=50, normed=True, lw=2)
#ax1.grid(True)
ax1.axhline(0, color='black', lw=2)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.acorr(x, usevlines=True, normed=True, maxlags=50, lw=2)
#ax2.grid(True)
ax2.axhline(0, color='black', lw=2)
plt.savefig('advanced.png')
plt.show()
```
|
github_jupyter
|
```
import pandas as pd
import sklearn as sk
import json
import ast
import pickle
import math
import matplotlib.pyplot as plt
df = pd.read_json('/data/accessible_POIs/great-britain-latest.json')
df.loc[:,'id'] = df['Node'].apply(lambda x: dict(x)['id'])
df.loc[:,'access'] = df['Node'].apply(lambda x: dict(x)['tags'].get('access') if 'access' in dict(x)['tags'] else 'NONE')
df.loc[:,'barrier'] = df['Node'].apply(lambda x: dict(x)['tags'].get('barrier'))
df.loc[:,'bicycle'] = df['Node'].apply(lambda x: dict(x)['tags'].get('bicycle'))
df.loc[:,'motor_vehicle'] = df['Node'].apply(lambda x: dict(x)['tags'].get('motor_vehicle'))
df.loc[:,'opening_hours'] = df['Node'].apply(lambda x: dict(x)['tags'].get('opening_hours'))
df.loc[:,'wheelchair'] = df['Node'].apply(lambda x: dict(x)['tags'].get('wheelchair'))
df.loc[:,'amenity'] = df['Node'].apply(lambda x: dict(x)['tags'].get('amenity'))
df.loc[:,'lon'] = df['Node'].apply(lambda x: dict(x)['lonlat'][0])
df.loc[:,'lat'] = df['Node'].apply(lambda x: dict(x)['lonlat'][1])
df.drop(['Node','Way','Relation'], axis=1, inplace=True)
df
df.to_pickle('/shared/accessible_pois.pkl')
from zipfile import ZipFile
with ZipFile('/data/All_POIs_by_country/pois_by_countries.zip', 'r') as z:
z.extract('geojson/great-britain-latest.json', '/shared/great-britain-latest.json')
#z.extract('geojson/great-britain-latest.geojson', '/shared/great-britain-latest.geojson')
with open('/shared/great-britain-latest.json','r') as j:
data = json.load(j)
df = pd.json_normalize(data)
df
with open('/shared/great-britain-latest.json','r') as j:
data = json.load(j)
df = pd.json_normalize(data['features'], max_level=3)
df
def extract_key(x,key):
if type(x) == float:
return None
x_ = x.split(',')
x_ = [y.replace('\'','').replace('"','') for y in x_]
for k in x_:
if key in k:
return k[k.find('>')+1:]
return None
df.loc[:,'shop'] = df['properties.other_tags'].apply(extract_key, args=('shop',))
df.loc[:,'amenity'] = df['properties.other_tags'].apply(extract_key, args=('amenity',))
df.loc[:,'wheelchair'] = df['properties.other_tags'].apply(extract_key, args=('wheelchair',))
df.loc[:,'barrier'] = df['properties.other_tags'].apply(extract_key, args=('barrier',))
df.loc[:,'access'] = df['properties.other_tags'].apply(extract_key, args=('access',))
df.loc[:,'lon'] = df['geometry.coordinates'].apply(lambda x: list(x)[0])
df.loc[:,'lat'] = df['geometry.coordinates'].apply(lambda x: list(x)[1])
df.to_csv('all_pois_wordcloud.csv')
len(df)
df = df[df['wheelchair'].isin(['yes','no','limited','designated'])]
#df.drop(['geometry.coordinates','type','geometry.type'], axis=1, inplace=True)
df.to_csv('accessible_pois_wordcloud.csv')
set(df['wheelchair'])
plt.figure(figsize=(30,10))
df['wheelchair'].value_counts().plot(kind='bar')
len(df)
_ = df
_['wheelchair'].value_counts().plot(kind='bar')
_
df['properties.other_tags'].to_csv('wordcloud.csv')
df
```
|
github_jupyter
|
### Image Captioning
To perform image captioning we are going to apply an approach similar to the work described in references [1],[2], and [3]. The approach applied here uses a recurrent neural network (RNN) to train a network to generate image captions. The input to the RNN is comprised of a high-level representation of an image and a caption describing it. The Microsoft Common Object in Context (MSCOCO) data set is used for this because it has many images and five captions for each one in most cases. In the previous section, we learned how to create and train a simple RNN. For this part, we will learn how to concatenate a feature vector that represents the images with its corresponding sentence and feed this into an RNN.
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
#import reader
import collections
import os
import re
import json
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy import misc
import sys
sys.path.insert(0, '/data/models/slim')
slim=tf.contrib.slim
from nets import vgg
from preprocessing import vgg_preprocessing
%matplotlib inline
!nvidia-smi
```
### MSCOCO Captions
We are going to build on our RNN example. First, we will look at the data and evaluate a single image, its captions, and feature vector.
```
TRAIN_IMAGE_PATH='/data/mscoco/train2014/'
## Read Training files
with open("/data/mscoco/captions_train2014.json") as data_file:
data=json.load(data_file)
image_feature_vectors={}
tf.reset_default_graph()
one_image=ndimage.imread(TRAIN_IMAGE_PATH+data["images"][0]['file_name'])
#resize for vgg network
resize_img=misc.imresize(one_image,[224,224])
if len(one_image.shape)!= 3: #Check to see if the image is grayscale if True mirror colorband
resize_img=np.asarray(np.dstack((resize_img, resize_img, resize_img)), dtype=np.uint8)
processed_image = vgg_preprocessing.preprocess_image(resize_img, 224, 224, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
network,endpts= vgg.vgg_16(processed_images, is_training=False)
init_fn = slim.assign_from_checkpoint_fn(os.path.join('/data/mscoco/vgg_16.ckpt'),slim.get_model_variables('vgg_16'))
sess = tf.Session()
init_fn(sess)
NETWORK,ENDPTS=sess.run([network,endpts])
sess.close()
print('fc7 array for a single image')
print(ENDPTS['vgg_16/fc7'][0][0][0])
plt.plot(ENDPTS['vgg_16/fc7'][0][0][0])
plt.xlabel('feature vector index')
plt.ylabel('amplitude')
plt.title('fc7 feature vector')
data["images"][0]['file_name']
```
How can you look at feature maps from the first convolutional layer? Look here if you need a [hint](#answer1 "The output from the convolutional layer is in the form of height, width, and number of feature maps. FEATUREMAPID can be any value between 0 and the number of feature maps minus 1.").
```
print(ENDPTS['vgg_16/conv1/conv1_1'][0].shape)
FEATUREMAPID=0
print('input image and feature map from conv1')
plt.subplot(1,2,1)
plt.imshow(resize_img)
plt.subplot(1,2,2)
plt.imshow(ENDPTS['vgg_16/conv1/conv1_1'][0][:,:,FEATUREMAPID])
```
How can you look at the response of different layers in your network?
Next, we are going to combine the feature maps with their respective captions. Many of the images have five captions. Run the code below to view the captions for one image.
```
CaptionsForOneImage=[]
for k in range(len(data['annotations'])):
if data['annotations'][k]['image_id']==data["images"][0]['id']:
CaptionsForOneImage.append([data['annotations'][k]['caption'].lower()])
plt.imshow(resize_img)
print('MSCOCO captions for a single image')
CaptionsForOneImage
```
A file with feature vectors from 2000 of the MSCOCO images has been created. Next, you will load these and train. Please note this step can take more than 5 minutes to run.
```
example_load=np.load('/data/mscoco/train_vgg_16_fc7_2000.npy').tolist()
image_ids=example_load.keys()
#Create 3 lists image_id, feature maps, and captions.
image_id_key=[]
feature_maps_to_id=[]
caption_to_id=[]
for observed_image in image_ids:
for k in range(len(data['annotations'])):
if data['annotations'][k]['image_id']==observed_image:
image_id_key.append([observed_image])
feature_maps_to_id.append(example_load[observed_image])
caption_to_id.append(re.sub('[^A-Za-z0-9]+',' ',data['annotations'][k]['caption']).lower()) #remove punctuation
print('number of images ',len(image_ids))
print('number of captions ',len(caption_to_id))
```
In the cell above we created three lists, one for the image_id, feature map. and caption. To verify that the indices of each list are aligned, display the image id and caption for one image.
```
STRING='%012d' % image_id_key[0][0]
exp_image=ndimage.imread(TRAIN_IMAGE_PATH+'COCO_train2014_'+STRING+'.jpg')
plt.imshow(exp_image)
print('image_id ',image_id_key[:5])
print('the captions for this image ')
print(caption_to_id[:5])
num_steps=20
######################################################################
##Create a list of all of the sentences.
DatasetWordList=[]
for dataset_caption in caption_to_id:
DatasetWordList+=str(dataset_caption).split()
#Determine number of distint words
distintwords=collections.Counter(DatasetWordList)
#Order words
count_pairs = sorted(distintwords.items(), key=lambda x: (-x[1], x[0])) #ascending order
words, occurence = list(zip(*count_pairs))
#DictionaryLength=occurence.index(4) #index for words that occur 4 times or less
words=['PAD','UNK','EOS']+list(words)#[:DictionaryLength])
word_to_id=dict(zip(words, range(len(words))))
##################### Tokenize Sentence #######################
Tokenized=[]
for full_words in caption_to_id:
EmbeddedSentence=[word_to_id[word] for word in full_words.split() if word in word_to_id]+[word_to_id['EOS']]
#Pad sentences that are shorter than the number of steps
if len(EmbeddedSentence)<num_steps:
b=[word_to_id['PAD']]*num_steps
b[:len(EmbeddedSentence)]=EmbeddedSentence
if len(EmbeddedSentence)>num_steps:
b=EmbeddedSentence[:num_steps]
if len(b)==EmbeddedSentence:
b=EmeddedSentence
#b=[word_to_id['UNK'] if x>=DictionaryLength else x for x in b] #turn all words used 4 times or less to 'UNK'
#print(b)
Tokenized+=[b]
print("Number of words in this dictionary ", len(words))
#Tokenized Sentences
Tokenized[::2000]
```
The next cell contains functions for queuing our data and the RNN model. What should the output for each function be? If you need a hint look [here](#answer2 "The data_queue function batches the data for us, this needs to return tokenized_caption, input_feature_map. The RNN model should return prediction before the softmax is applied and is defined as pred.").
```
def data_queue(caption_input,feature_vector,batch_size,):
train_input_queue = tf.train.slice_input_producer(
[caption_input, np.asarray(feature_vector)],num_epochs=10000,
shuffle=True) #False before
##Set our train data and label input shape for the queue
TrainingInputs=train_input_queue[0]
FeatureVectors=train_input_queue[1]
TrainingInputs.set_shape([num_steps])
FeatureVectors.set_shape([len(feature_vector[0])]) #fc7 is 4096
min_after_dequeue=1000000
capacity = min_after_dequeue + 3 * batch_size
#input_x, target_y
tokenized_caption, input_feature_map = tf.train.batch([TrainingInputs, FeatureVectors],
batch_size=batch_size,
capacity=capacity,
num_threads=6)
return tokenized_caption,input_feature_map
def rnn_model(Xconcat,input_keep_prob,output_keep_prob,num_layers,num_hidden):
#Create a multilayer RNN
#reuse=False for training but reuse=True for sharing
layer_cell=[]
for _ in range(num_layers):
lstm_cell = tf.contrib.rnn.LSTMCell(num_units=num_hidden, state_is_tuple=True)
lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob)
layer_cell.append(lstm_cell)
cell = tf.contrib.rnn.MultiRNNCell(layer_cell, state_is_tuple=True)
outputs, last_states = tf.contrib.rnn.static_rnn(
cell=cell,
dtype=tf.float32,
inputs=tf.unstack(Xconcat))
output_reshape=tf.reshape(outputs, [batch_size*(num_steps),num_hidden]) #[12==batch_size*num_steps,num_hidden==12]
pred=tf.matmul(output_reshape, variables_dict["weights_mscoco"]) +variables_dict["biases_mscoco"]
return pred
tf.reset_default_graph()
#######################################################################################################
# Parameters
num_hidden=2048
num_steps=num_steps
dict_length=len(words)
batch_size=4
num_layers=2
train_lr=0.00001
#######################################################################################################
TrainingInputs=Tokenized
FeatureVectors=feature_maps_to_id
## Variables ##
# Learning rate placeholder
lr = tf.placeholder(tf.float32, shape=[])
#tf.get_variable_scope().reuse_variables()
variables_dict = {
"weights_mscoco":tf.Variable(tf.truncated_normal([num_hidden,dict_length],
stddev=1.0,dtype=tf.float32),name="weights_mscoco"),
"biases_mscoco": tf.Variable(tf.truncated_normal([dict_length],
stddev=1.0,dtype=tf.float32), name="biases_mscoco")}
tokenized_caption, input_feature_map=data_queue(TrainingInputs,FeatureVectors,batch_size)
mscoco_dict=words
TrainInput=tf.constant(word_to_id['PAD'],shape=[batch_size,1],dtype=tf.int32)
#Pad the beginning of our caption. The first step now only has the image feature vector. Drop the last time step
#to timesteps to 20
TrainInput=tf.concat([tf.constant(word_to_id['PAD'],shape=[batch_size,1],dtype=tf.int32),
tokenized_caption],1)[:,:-1]
X_one_hot=tf.nn.embedding_lookup(np.identity(dict_length), TrainInput) #[batch,num_steps,dictionary_length][2,6,7]
#ImageFeatureTensor=input_feature_map
Xconcat=tf.concat([input_feature_map+tf.zeros([num_steps,batch_size,4096]),
tf.unstack(tf.to_float(X_one_hot),num_steps,1)],2)#[:num_steps,:,:]
pred=rnn_model(Xconcat,1.0,1.0,num_layers,num_hidden)
#the full caption is the target sentence
y_one_hot=tf.unstack(tf.nn.embedding_lookup(np.identity(dict_length), tokenized_caption),num_steps,1) #[batch,num_steps,dictionary_length][2,6,7]
y_target_reshape=tf.reshape(y_one_hot,[batch_size*num_steps,dict_length])
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_target_reshape))
optimizer = tf.train.MomentumOptimizer(lr,0.9)
gvs = optimizer.compute_gradients(cost,aggregation_method = tf.AggregationMethod.EXPERIMENTAL_TREE)
capped_gvs = [(tf.clip_by_value(grad, -10., 10.), var) for grad, var in gvs]
train_op=optimizer.apply_gradients(capped_gvs)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
#Load a pretrained network
saver.restore(sess, '/data/mscoco/rnn_layermodel_iter40000')
print('Model restored from file')
for i in range(100):
loss,y_pred,target_caption,_=sess.run([cost,pred,tokenized_caption,train_op],feed_dict={lr:train_lr})
if i% 10==0:
print("iteration: ",i, "loss: ",loss)
MODEL_NAME='rnn_model_iter'+str(i)
saver.save(sess, MODEL_NAME)
print('saved trained network ',MODEL_NAME)
print("Done Training")
coord.request_stop()
coord.join(threads)
sess.close()
```
We can use the function below to estimate how well the network is able to predict the next word in the caption. You can evaluate a single image and its caption from the last batch using the index of the batch. If you need a hint look [here](#answer3 "if the batch_size is 4, batch_id may be any value between 0 and 3.").
##### Please note that depending on the status of the neural network at the time it was saved, incomplete, incoherent, and sometimes inappropriate captions could be generated.
```
def show_next_predicted_word(batch_id,batch_size,id_of_image,target_caption,predicted_caption,words,PATH):
Target=[words[ind] for ind in target_caption[batch_id]]
Prediction_Tokenized=np.argmax(predicted_caption[batch_id::batch_size],1)
Prediction=[words[ind] for ind in Prediction_Tokenized]
STRING2='%012d' % id_of_image
img=ndimage.imread(PATH+STRING2+'.jpg')
return Target,Prediction,img,STRING2
#You can change the batch id to a number between [0 , batch_size-1]
batch_id=0
image_id_for_predicted_caption=[x for x in range(len(Tokenized)) if target_caption[batch_id].tolist()== Tokenized[x]][0]
t,p,input_img,string_out=show_next_predicted_word(batch_id,batch_size,image_id_key[image_id_for_predicted_caption][0]
,target_caption,y_pred,words,TRAIN_IMAGE_PATH+'COCO_train2014_')
print('Caption')
print(t)
print('Predicted Words')
print(p)
plt.imshow(input_img)
```
##### Questions
[1] Can the show_next_predicted_word function be used for deployment?
Probably not. Can you think of any reason why? Each predicted word is based on the previous ground truth word. In a deployment scenario, we will only have the feature map from our input image.
[2] Can you load your saved network and use it to generate a caption from a validation image?
The validation images are stored in /data/mscoco/val2014. A npy file of the feature vectors is stored /data/mscoco/val_vgg_16_fc7_100.npy. For a hint on how to add this look [here](#answer4 "You can change this parameter to val_load=np.load(/data/mscoco/val_vgg_16_fc7_100.npy).tolist()").
[3] Do you need to calculate the loss or cost when only performing inference?
[4] Do you use dropout when performing inference?
```
##Load and test our test set
val_load=np.load('/data/mscoco/val_vgg_16_fc7_100.npy').tolist()
val_ids=val_load.keys()
#Create 3 lists image_id, feature maps, and captions.
val_id_key=[]
val_map_to_id=[]
val_caption_to_id=[]
for observed_image in val_ids:
val_id_key.append([observed_image])
val_map_to_id.append(val_load[observed_image])
print('number of images ',len(val_ids))
print('number of captions ',len(val_map_to_id))
```
The cell below will load a feature vector from one of the images in the validation data set and use it with our pretrained network to generate a caption. Use the VALDATA variable to propagate and image through our RNN and generate a caption. You also need to load the network you just created during training. Look here if you need a [hint](#answer5 "Any of the of the data points in our validation set can be used here. There are 501 captions. Any number between 0 and 501-1 can be used for the VALDATA parameter, such as VALDATA=430. The pretrained network file that you just saved is rnn_model_iter99, insert this string into saver.restore(sess,FILENAME)").
##### Please note that depending on the status of the neural network at the time it was saved, incomplete, incoherent, and sometimes inappropriate captions could be generated.
```
tf.reset_default_graph()
batch_size=1
num_steps=20
print_topn=0 #0for do not display
printnum0f=3
#Choose a image to caption
VALDATA=54 #ValImage fc7 feature vector
variables_dict = {
"weights_mscoco":tf.Variable(tf.truncated_normal([num_hidden,dict_length],
stddev=1.0,dtype=tf.float32),name="weights_mscoco"),
"biases_mscoco": tf.Variable(tf.truncated_normal([dict_length],
stddev=1.0,dtype=tf.float32), name="biases_mscoco")}
StartCaption=np.zeros([batch_size,num_steps],dtype=np.int32).tolist()
CaptionPlaceHolder = tf.placeholder(dtype=tf.int32, shape=(batch_size , num_steps))
ValFeatureMap=val_map_to_id[VALDATA]
X_one_hot=tf.nn.embedding_lookup(np.identity(dict_length), CaptionPlaceHolder) #[batch,num_steps,dictionary_length][2,6,7]
#ImageFeatureTensor=input_feature_map
Xconcat=tf.concat([ValFeatureMap+tf.zeros([num_steps,batch_size,4096]),
tf.unstack(tf.to_float(X_one_hot),num_steps,1)],2)#[:num_steps,:,:]
pred=rnn_model(Xconcat,1.0,1.0,num_layers,num_hidden)
pred=tf.nn.softmax(pred)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
#Load a pretrained network
saver.restore(sess, 'rnn_model_iter99')
print('Model restored from file')
for i in range(num_steps-1):
predict_next_word=sess.run([pred],feed_dict={CaptionPlaceHolder:StartCaption})
INDEX=np.argmax(predict_next_word[0][i])
StartCaption[0][i+1]=INDEX
##Post N most probable next words at each step
if print_topn !=0:
print("Top ",str(printnum0f), "predictions for the", str(i+1), "word in the predicted caption" )
result_args = np.argsort(predict_next_word[0][i])[-printnum0f:][::-1]
NextWord=[words[x] for x in result_args]
print(NextWord)
coord.request_stop()
coord.join(threads)
sess.close()
STRING2='%012d' % val_id_key[VALDATA][0]
img=ndimage.imread('/data/mscoco/val2014/COCO_val2014_'+STRING2+'.jpg')
plt.imshow(img)
plt.title('COCO_val2014_'+STRING2+'.jpg')
PredictedCaption=[words[x] for x in StartCaption[0]]
print("predicted sentence: ",PredictedCaption[1:])
#Free our GPU memory before proceeding to the next part of the lab
import os
os._exit(00)
```
## References
[1] Donahue, J, et al. "Long-term recurrent convolutional networks for visual recognition and description." Proceedings of the IEEE conference on computer vision and pattern recognition. 2015.
[2]Vinyals, Oriol, et al. "Show and tell: Lessons learned from the 2015 mscoco image captioning challenge." IEEE transactions on pattern analysis and machine intelligence 39.4 (2017): 652-663.
[3] TensorFlow Show and Tell:A Neural Image Caption Generator [example] (https://github.com/tensorflow/models/tree/master/im2txt)
[4] Karapthy, A. [NeuralTalk2](https://github.com/karpathy/neuraltalk2)
[5]Lin, Tsung-Yi, et al. "Microsoft coco: Common objects in context." European Conference on Computer Vision. Springer International Publishing, 2014.
|
github_jupyter
|
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Using a Single Slider to Set the Range
```
import plotly.plotly as py
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed
from IPython.core.display import HTML
from IPython.display import display, clear_output
from plotly.widgets import GraphWidget
styles = '''<style>.widget-hslider { width: 100%; }
.widget-hbox { width: 100% !important; }
.widget-slider { width: 100% !important; }</style>'''
HTML(styles)
#this widget will display our plotly chart
graph = GraphWidget("https://plotly.com/~jordanpeterson/889")
fig = py.get_figure("https://plotly.com/~jordanpeterson/889")
#find the range of the slider.
xmin, xmax = fig['layout']['xaxis']['range']
# use the interact decorator to tie a widget to the listener function
@interact(y=widgets.FloatRangeSlider(min=xmin, max=xmax, step=(xmax-xmin)/1000.0, continuous_update=False))
def update_plot(y):
graph.relayout({'xaxis.range[0]': y[0], 'xaxis.range[1]': y[1]})
#display the app
graph
%%html
<img src='https://cloud.githubusercontent.com/assets/12302455/16469485/42791e90-3e1f-11e6-8db4-2364bd610ce4.gif'>
```
#### Using Two Sliders to Set Range
```
import plotly.plotly as py
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed
from IPython.core.display import HTML
from IPython.display import display, clear_output
from plotly.widgets import GraphWidget
from traitlets import link
styles = '''<style>.widget-hslider { width: 100%; }
.widget-hbox { width: 100% !important; }
.widget-slider { width: 100% !important; }</style>'''
HTML(styles)
#this widget will display our plotly chart
graph = GraphWidget("https://plotly.com/~jordanpeterson/889")
fig = py.get_figure("https://plotly.com/~jordanpeterson/889")
#find the range of the slider.
xmin, xmax = fig['layout']['xaxis']['range']
# let's define our listener functions that will respond to changes in the sliders
def on_value_change_left(change):
graph.relayout({'xaxis.range[0]': change['new']})
def on_value_change_right(change):
graph.relayout({'xaxis.range[1]': change['new']})
# define the sliders
left_slider = widgets.FloatSlider(min=xmin, max=xmax, value=xmin, description="Left Slider")
right_slider = widgets.FloatSlider(min=xmin, max=xmax, value=xmax, description="Right Slider")
# put listeners on slider activity
left_slider.observe(on_value_change_left, names='value')
right_slider.observe(on_value_change_right, names='value')
# set a relationship between the left and right slider
link((left_slider, 'max'), (right_slider, 'value'))
link((left_slider, 'value'), (right_slider, 'min'))
# display our app
display(left_slider)
display(right_slider)
display(graph)
%%html
<img src='https://cloud.githubusercontent.com/assets/12302455/16469486/42891d0e-3e1f-11e6-9576-02c5f6c3d3c9.gif'>
```
#### Sliders with 3d Plots
```
import plotly.plotly as py
import ipywidgets as widgets
import numpy as np
from ipywidgets import interact, interactive, fixed
from IPython.core.display import HTML
from IPython.display import display, clear_output
from plotly.widgets import GraphWidget
g = GraphWidget('https://plotly.com/~DemoAccount/10147/')
x = y = np.arange(-5,5,0.1)
yt = x[:,np.newaxis]
# define our listener class
class z_data:
def __init__(self):
self.z = np.cos(x*yt)+np.sin(x*yt)*2
def on_z_change(self, name):
new_value = name['new']
self.z = np.cos(x*yt*(new_value+1)/100)+np.sin(x*yt*(new_value+1/100))
self.replot()
def replot(self):
g.restyle({ 'z': [self.z], 'colorscale': 'Viridis'})
# create sliders
z_slider = widgets.FloatSlider(min=0,max=30,value=1,step=0.05, continuous_update=False)
z_slider.description = 'Frequency'
z_slider.value = 1
# initialize listener class
z_state = z_data()
# activate listener on our slider
z_slider.observe(z_state.on_z_change, 'value')
# display our app
display(z_slider)
display(g)
%%html
<img src="https://cloud.githubusercontent.com/assets/12302455/16569550/bd02e030-4205-11e6-8087-d41c9b5d3681.gif">
```
#### Reference
```
help(GraphWidget)
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'slider_example.ipynb', 'python/slider-widget/', 'IPython Widgets | plotly',
'Interacting with Plotly charts using Sliders',
title = 'Slider Widget with Plotly',
name = 'Slider Widget with Plotly',
has_thumbnail='true', thumbnail='thumbnail/ipython_widgets.jpg',
language='python', page_type='example_index',
display_as='chart_events', order=20,
ipynb= '~notebook_demo/91')
```
|
github_jupyter
|
# Process an interferogram with ASF HyP3
https://hyp3-docs.asf.alaska.edu/using/sdk/
## Search for scenes
scenes over grand mesa, colorado using https://asf.alaska.edu/api/
```
import requests
import shapely.geometry
roi = shapely.geometry.box(-108.3,39.2,-107.8,38.8)
polygonWKT = roi.wkt
baseurl = "https://api.daac.asf.alaska.edu/services/search/param"
data = dict(
intersectsWith=polygonWKT,
platform='Sentinel-1',
processingLevel="SLC",
beamMode='IW',
output='json',
start='2020-10-30T11:59:59Z',
end='2020-11-30T11:59:59Z',
#relativeOrbit=None,
#flightDirection=None,
)
r = requests.get(baseurl, params=data, timeout=100)
print(r.url)
# load results into pandas dataframe
import pandas as pd
df = pd.DataFrame(r.json()[0])
df.head()
# Easier to explore the inventory in plots
import hvplot.pandas
from bokeh.models.formatters import DatetimeTickFormatter
formatter = DatetimeTickFormatter(years='%m-%d')
timeseries = df.hvplot.scatter(x='startTime', y='relativeOrbit', c='relativeOrbit',
xformatter=formatter,
title='Acquisition times (UTC)')
import geopandas as gpd
import geoviews as gv
import panel as pn
gf_aoi = gpd.GeoDataFrame(geometry=[roi])
polygons = df.stringFootprint.apply(shapely.wkt.loads)
gf_footprints = gpd.GeoDataFrame(df, crs="EPSG:4326", geometry=polygons)
tiles = gv.tile_sources.StamenTerrainRetina.options(width=600, height=400)
aoi = gf_aoi.hvplot(geo=True, fill_color=None, line_color='m', hover=False)
footprints = gf_footprints.hvplot.polygons(geo=True, legend=False, alpha=0.2, c='relativeOrbit', title='Sentinel-1 Tracks')
mapview = tiles * footprints * aoi
pn.Column(mapview,timeseries)
```
```
df.relativeOrbit.unique()
orbit = '129'
reference = '2020-11-11'
secondary = '2020-10-30'
dfS = df[df.relativeOrbit == orbit]
granule1 = dfS.loc[dfS.sceneDate.str.startswith(reference), 'granuleName'].values[0]
granule2 = dfS.loc[dfS.sceneDate.str.startswith(secondary), 'granuleName'].values[0]
print(f'granule1: {granule1}')
print(f'granule2: {granule2}')
for ref in [reference, secondary]:
print(dfS.loc[dfS.sceneDate.str.startswith(ref), 'downloadUrl'].values[0])
```
## Process an InSAR pair (interferogram)
examples:
- https://nbviewer.jupyter.org/github/ASFHyP3/hyp3-sdk/blob/main/docs/sdk_example.ipynb
- https://hyp3-docs.asf.alaska.edu/using/sdk/
```
import hyp3_sdk
# ~/.netrc file used for credentials
hyp3 = hyp3_sdk.HyP3()
# Processing quota
hyp3.check_quota() #199 (200 scenes per month?)
job = hyp3.submit_insar_job(granule1,
granule2,
name='gm_20201111_20201030',
include_los_displacement=True,
include_inc_map=True)
# All jobs you've submitted
# NOTE: processing w/ defaults uses INSAR_GAMMA
# NOTE: re-run this cell to update results of batch job
batch = hyp3.find_jobs()
job = batch.jobs[0] # most recent job
job
# If you have lists of dictionaries, visualizing with a pandas dataframe is convenient
df = pd.DataFrame([job.to_dict() for job in batch])
df.head()
# Actually no, expiration time is not available for download...
#pd.to_datetime(df.expiration_time[0]) - pd.to_datetime(df.request_time[0])
# ImportError: IProgress not found. Please update jupyter and ipywidgets.
# but I think this still succeeeds
job.download_files()
!ls -ltrh
# requires ipywidgets
#hyp3.watch(job)
```
## Process multiple pairs in batch mode
```
# with progress bar
#from tqdm.auto import tqdm
#insar_jobs = sdk.Batch()
#for reference in tqdm(granules):
# neighbors_metadata = asf_search.get_nearest_neighbors(reference, max_neighbors=2)
# for secondary_metadata in neighbors_metadata:
# insar_jobs += hyp3.submit_insar_job(reference, secondary_metadata['granuleName'], name='insar-example')
#print(insar_jobs)
# Can also submit jobs via web interface # Can also visit https://hyp3.asf.alaska.edu/pending_products
# Which then shows logs that can be sorted into 'submitted, failed, etc...'
```
|
github_jupyter
|
# Activity #1: MarketMap
* another way to visualize mappable data
## 1.a : explore the dataset
```
# our usual stuff
%matplotlib inline
import pandas as pd
import numpy as np
#!pip install xlrd # JPN, might have to run this
# note: this is quering from the web! How neat is that??
df = pd.read_excel('https://query.data.world/s/ivl45pdpubos6jpsii3djsjwm2pcjv', skiprows=5)
# the above might take a while to load all the data
# what is in this dataframe? lets take a look at the top
df.head()
# this dataset is called: "Surgery Charges Across the U.S."
# and its just showing us how much different procedures
# cost from different hospitals
# what kinds of data are we working with?
df.dtypes
# lets look at some summary data
# recall: this is like R's "summary" function
df.describe()
# so, things like the mean zipcode aren't
# meaningful, same thing with provider ID
# But certainly looking at the average
# total payments, discharges, might
# be useful
# lets look at how many seperate types of surgery are
# represented in this dataset:
df["DRG Definition"].unique().size
# what about how many provider (hospital) names?
df["Provider Name"].unique().size
# how many states are represented
df["Provider State"].unique().size
# what are the state codes?
df["Provider State"].unique()
# lets figure out what the most common surgeries are via how
# many many folks are discharged after each type of surgery
# (1)
most_common = df.groupby("DRG Definition")["Total Discharges"].sum()
most_common
# (2) but lets sort by the largest on top
most_common = df.groupby("DRG Definition")["Total Discharges"].sum().sort_values(ascending=False)
most_common
# (3) lets look at only the top 5, for fun
most_common[:5]
# (4) or we can only look at the names of the top 5:
most_common[:5].index.values
```
## 1.b: formatting data for MarketMap
* here we are going to practice doing some fancy things to clean this data
* this will be good practice for when you run into other datasets "in the wild"
```
# (1) lets create a little table of total discharges for
# each type of surgery & state
total_discharges = df.groupby(["DRG Definition", "Provider State"])["Total Discharges"].sum()
total_discharges
# (2) the above is not intuative, lets prettify it
total_discharges = df.groupby(["DRG Definition", "Provider State"])["Total Discharges"].sum().unstack()
total_discharges
```
### Aside: lets quick check out what are the most frequent surgeries
```
# for our map, we are going to want to
# normalize the discharges or each surgery
# for each
# state by the total discharges across all
# states for a particular type of surger
# lets add this to our total_discharges DF
total_discharges["Total"] = total_discharges.sum(axis = 1)
total_discharges["Total"].head() # just look at the first few
# finally, lets check out the most often
# performed surgery across all states
# we can do this by sorting our DF by this total we just
# calculated:
total_discharges.sort_values(by = "Total",
ascending=False,
inplace = True)
# now lets just look at the first few of our
# sorted array
total_discharges.head()
# so, from this we see that joint replacement
# or reattachment of a lower extremeity is
# the most likely surgery (in number of discharges)
# followed by surgeries for sepsis and then heart failure
# neat. We won't need these for plotting, so we can remove our
# total column we just calculated
del total_discharges["Total"]
total_discharges.head()
# now we see that we are back to just states & surgeries
# *but* our sorting is still by the total that we
# previously calculated.
# spiffy!
```
## 1.c: plot data with bqplot
```
import bqplot
# by default bqplot does not import
# all packages, we have to
# explicitely import market_map
import bqplot.market_map # for access to market_map
# lets do our usual thing, but with a market map
# instead of a heat map
# scales:
x_sc, y_sc = bqplot.OrdinalScale(), bqplot.OrdinalScale() # note, just a different way to call things
c_sc = bqplot.ColorScale(scheme="Blues")
# just a color axes for now:
c_ax = bqplot.ColorAxis(scale = c_sc, orientation = 'vertical')
# lets make the market map:
# (1) what should we plot for our color? lets take a look:
total_discharges.iloc[0].values, total_discharges.columns.values
# this is the total discharges for the most
# popular surgical procedure
# the columns will be states
# (2) lets put this into a map
mmap = bqplot.market_map.MarketMap(color = total_discharges.iloc[0].values,
names = total_discharges.columns.values,
scales={'color':c_sc},
axes=[c_ax])
# (3) ok, but just clicking on things doesn't tell us too much
# lets add a little label to print out the total of the selected
import ipywidgets
label = ipywidgets.Label()
# link to market map
def get_data(change):
# (3.1)
#print(change['owner'].selected)
# (3.2) loop
v = 0.0 # to store total value
for s in change['owner'].selected:
v += total_discharges.iloc[0][total_discharges.iloc[0].index == s].values
if v > 0: # in case nothing is selected
# what are we printing?
l = 'Total discharges of ' + \
total_discharges.iloc[0].name + \
' = ' + str(v[0]) # note: v is by default an array
label.value = l
mmap.observe(get_data,'selected')
#mmap
# (3)
ipywidgets.VBox([label,mmap])
```
## Discussion:
* think back to the map we had last week: we can certainly plot this information with a more geo-realistic map
* what are the pros & cons of each style of map? What do each highlight? How are each biased?
## IF we have time: Re-do with other mapping system:
```
from us_state_abbrev import us_state_abbrev
sc_geo = bqplot.AlbersUSA()
state_data = bqplot.topo_load('map_data/USStatesMap.json')
#(1)
#states_map = bqplot.Map(map_data=state_data, scales={'projection':sc_geo})
#(2)
# library from last time
from states_utils import get_ids_and_names
ids, state_names = get_ids_and_names(states_map)
# color maps
import matplotlib.cm as cm
cmap = cm.Blues
# most popular surgery
popSurg = total_discharges.iloc[0]
# here, we will go through the process of getting colors to plot
# each state with its similar color to the marketmap above:
#!pip install webcolors
from webcolors import rgb_to_hex
d = {} # empty dict to store colors
for s in states_map.map_data['objects']['subunits']['geometries']:
if s['properties'] is not None:
#print(s['properties']['name'], s['id'])
# match states to abbreviations
state_abbrev = us_state_abbrev[s['properties']['name']]
#print(state_abbrev)
v = popSurg[popSurg.index == state_abbrev].values[0]
# renorm v to colors and then number of states
v = (v - popSurg.values.min())/(popSurg.values.max()-popSurg.values.min())
#print(v, int(cmap(v)[0]), int(cmap(v)[1]), int(cmap(v)[2]))
# convert to from 0-1 to 0-255 rgbs
c = [int(cmap(v)[i]*255) for i in range(3)]
#d[s['id']] = rgb_to_hex([int(cmap(v)[0]*255), int(cmap(v)[1]*255), int(cmap(v)[2]*255)])
d[s['id']] = rgb_to_hex(c)
def_tt = bqplot.Tooltip(fields=['name'])
states_map = bqplot.Map(map_data=state_data, scales={'projection':sc_geo}, colors = d, tooltip=def_tt)
# add interactions
states_map.interactions = {'click': 'select', 'hover': 'tooltip'}
# (3)
label = ipywidgets.Label()
# link to heat map
def get_data(change):
v = 0.0 # to store total value
if change['owner'].selected is not None:
for s in change['owner'].selected:
#print(s)
sn = state_names[s == ids][0]
state_abbrev = us_state_abbrev[sn]
v += popSurg[popSurg.index == state_abbrev].values[0]
if v > 0: # in case nothing is selected
# what are we printing?
l = 'Total discharges of ' + \
popSurg.name + \
' = ' + str(v) # note: v is by default an array
label.value = l
states_map.observe(get_data,'selected')
fig=bqplot.Figure(marks=[states_map],
title='US States Map Example',
fig_margin={'top': 0, 'bottom': 0, 'left': 0, 'right': 0}) # try w/o first and see
#fig
# (3)
ipywidgets.VBox([label,fig])
```
# Activity #2: Real quick ipyleaflets
* since cartopy wasn't working for folks, we'll quickly look at another option: ipyleaflets
```
#!pip install ipyleaflet
from ipyleaflet import *
# note: you might have to close and reopen you notebook
# to see the map
m = Map(center=(52, 10), zoom=8, basemap=basemaps.Hydda.Full)
#(2) street maps
strata_all = basemap_to_tiles(basemaps.Strava.All)
m.add_layer(strata_all)
m
```
### Note: more examples available here - https://github.com/jupyter-widgets/ipyleaflet/tree/master/examples
# Activity #3: Networked data - Simple example
```
# lets start with some very basic node data
# **copy paste into chat **
node_data = [
{"label": "Luke Skywalker", "media": "Star Wars", "shape": "rect"},
{"label": "Jean-Luc Picard", "media": "Star Trek", "shape": "rect"},
{"label": "Doctor Who", "media": "Doctor Who", "shape": "rect"},
{"label": "Pikachu", "media": "Detective Pikachu", "shape": "circle"},
]
# we'll use bqplot.Graph to plot these
graph = bqplot.Graph(node_data=node_data,
colors = ["red", "red", "red", "red"])
fig = bqplot.Figure(marks = [graph])
fig
# you note I can pick them up and move them around, but they aren't connected in any way
# lets make some connections
node_data = [
{"label": "Luke Skywalker", "media": "Star Wars", "shape": "rect"},
{"label": "Jean-Luc Picard", "media": "Star Trek", "shape": "rect"},
{"label": "Doctor Who", "media": "Doctor Who", "shape": "rect"},
{"label": "Pikachu", "media": "Detective Pikachu", "shape": "circle"},
]
# lets link the 0th entry (luke skywalker) to both
# jean-luc picard (1th entry) and pikachu (3rd entry)
link_data = [{'source': 0, 'target': 1}, {'source': 0, 'target': 3}]
graph = bqplot.Graph(node_data=node_data, link_data=link_data,
colors = ["red", "red", "red", "red"])
#(2) we can also play with the springiness of our links:
graph.charge = -300 # setting it to positive makes them want to overlap and is, ingeneral, a lot of fun
# -300 is default
# (3) we can also change the link type:
graph.link_type = 'line' # arc = default, line, slant_line
# (4) highlight link direction, or not
graph.directed = False
fig = bqplot.Figure(marks = [graph])
fig
# we can do all the same things we've done with
# our previous map plots:
# for example, we can add a tooltip:
#(1)
tooltip = bqplot.Tooltip(fields=["media"])
graph = bqplot.Graph(node_data=node_data, link_data=link_data,
colors = ["red", "red", "red", "red"],
tooltip=tooltip)
# we can also do interactive things with labels
label = ipywidgets.Label()
# note here that the calling sequence
# is a little different - instead
# of "change" we have "obj" and
# "element"
def printstuff(obj, element):
# (1.1)
#print(obj)
#print(element)
label.value = 'Media = ' + element['data']['media']
graph.on_element_click(printstuff)
fig = bqplot.Figure(marks = [graph])
ipywidgets.VBox([label,fig])
```
# Activity #4: Network data - subset of facebook friends dataset
* from: https://snap.stanford.edu/data/egonets-Facebook.html
* dataset of friends lists
#### Info about this dataset:
* the original file you can read in has about 80,000 different connections
* it is ordered by the most connected person (person 0) at the top
* because this network would be computationally slow and just a hairball - we're going to be working with downsampled data
* for example, a file tagged "000090_000010" starts with the 10th most connected person, and only included connections up to the 90th most connected person
* Its worth noting that this dataset (linked here and on the webpage) also includes feature data like gender, last name, school, etc - however it is too sparse to be of visualization use to us
Check out the other social network links at the SNAP data webpage!
```
# from 10 to 150 connections, a few large nodes
#filename = 'facebook_combined_sm000150_000010.txt'
# this might be too large: one large node, up to 100 connections
#filename='facebook_combined_sm000100.txt'
# start here
filename = 'facebook_combined_sm000090_000010.txt'
# then this one
#filename = 'facebook_combined_sm000030_000000.txt'
# note how different the topologies are
network = pd.read_csv('/Users/jillnaiman1/Downloads/'+filename,
sep=' ', names=['ind1', 'ind2'])
network
# build the network
node_data = []
link_data = []
color_data = [] # all same color
# add nodes
maxNet = max([network['ind1'].max(),network['ind2'].max()])
for i in range(maxNet+1):
node_data.append({"label": str(i), 'shape_attrs': {'r': 8} }) # small circles
# now, make links
for i in range(len(network)):
# we are linking the ith object to another jth object, but we
# gotta figure out with jth object it is
source_id = network.iloc[i]['ind1']
target_id = network.iloc[i]['ind2']
link_data.append({'source': source_id, 'target': target_id})
color_data.append('blue')
#link_data,node_data
#color_data
# plot
graph = bqplot.Graph(node_data=node_data,
link_data = link_data,
colors=color_data)
# play with these for different graphs
graph.charge = -100
graph.link_type = 'line'
graph.link_distance=50
# there is no direction to links
graph.directed = False
fig = bqplot.Figure(marks = [graph])
fig.layout.min_width='1000px'
fig.layout.min_height='900px'
# note: I think this has to be the layout for this to look right
fig
# in theory, we could color this network by what school folks are in, or some such
# but while the dataset does contain some of these features, the
# answer rate is too sparse for our subset here
```
# Note: the below is just prep if you want to make your own subset datasets
```
# prep fb data by downsampling
minCon = 0
maxCon = 30
G = pd.read_csv('/Users/jillnaiman1/Downloads/facebook_combined.txt',sep=' ', names=['ind1', 'ind2'])
Gnew = np.zeros([2],dtype='int')
# loop and append
Gnew = G.loc[G['ind1']==minCon].values[0]
for i in xrange(G.loc[G['ind1']==minCon].index[0],len(G)):
gl = G.loc[i].values
if (gl[0] <= maxCon) and (gl[1] <= maxCon) and (gl[0] >= minCon) and (gl[1] >= minCon):
Gnew = np.vstack((Gnew,gl))
np.savetxt('/Users/jillnaiman1/spring2019online/week09/data/facebook_combined_sm' + \
str(maxCon).zfill(6) + '_' + str(minCon).zfill(6) + '.txt', Gnew,fmt='%i')
graph.link_distance
```
|
github_jupyter
|
```
import sys
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0" #for training on gpu
from scipy import signal
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
from random import shuffle
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Conv1D, MaxPooling1D, Dropout, GlobalAveragePooling1D, Reshape
#path to data files
path = "./nine_movs_six_sub_split/"
#path where you want to save trained model and some other files
sec_path = "./"
def create_dataset(file_path, persons):
path = file_path + "{}_{}.txt"
sgn = []
lbl = []
for i in persons:
for j in range(9):
with open(path.format(i, j + 1), "rb") as fp: # Unpickling
data = pickle.load(fp)
for k in range(np.shape(data)[0]):
sgn.append(data[k])
lbl.append(j)
sgn = np.asarray(sgn, dtype=np.float32)
lbl = np.asarray(lbl, dtype=np.int32)
c = list(zip(sgn, lbl))
shuffle(c)
sgn, lbl = zip(*c)
sgn = np.asarray(sgn, dtype=np.float64)
lbl = np.asarray(lbl, dtype=np.int64)
print(sgn.shape)
train_signals = sgn[0:int(0.8 * len(sgn))]
train_labels = lbl[0:int(0.8 * len(lbl))]
val_signals = sgn[int(0.8*len(sgn)):]
val_labels = lbl[int(0.8*len(lbl)):]
#test_signals = sgn[int(0.8*len(sgn)):]
#test_labels = lbl[int(0.8*len(lbl)):]
train_labels = to_categorical(train_labels)
val_labels = to_categorical(val_labels)
#test_labels = to_categorical(test_labels)
return train_signals, train_labels, val_signals, val_labels
def create_dataset2(file_path, persons):
path = file_path + "{}_{}.txt"
sgn = []
lbl = []
i = persons
for j in range(9):
with open(path.format(i, j + 1), "rb") as fp: # Unpickling
data = pickle.load(fp)
for k in range(np.shape(data)[0]):
sgn.append(data[k])
lbl.append(j)
sgn = np.asarray(sgn, dtype=np.float32)
lbl = np.asarray(lbl, dtype=np.int32)
c = list(zip(sgn, lbl))
shuffle(c)
sgn, lbl = zip(*c)
sgn = np.asarray(sgn, dtype=np.float64)
lbl = np.asarray(lbl, dtype=np.int64)
print(sgn.shape)
train_signals = sgn[0:int(0.6 * len(sgn))]
train_labels = lbl[0:int(0.6 * len(lbl))]
val_signals = sgn[int(0.6*len(sgn)):int(0.8*len(sgn))]
val_labels = lbl[int(0.6*len(lbl)):int(0.8*len(lbl))]
test_signals = sgn[int(0.8*len(sgn)):]
test_labels = lbl[int(0.8*len(lbl)):]
train_labels = to_categorical(train_labels)
val_labels = to_categorical(val_labels)
test_labels = to_categorical(test_labels)
return train_signals, train_labels, val_signals, val_labels, test_signals, test_labels
def evaluate_model(model, expected_person_index = 2):
print("evaluate_model, expected_person_index:", expected_person_index)
persons = [1, 2, 3, 4, 5, 6]
persons.remove(expected_person_index)
train_signals, train_labels, val_signals, val_labels = create_dataset(path, persons)
model.evaluate(train_signals, train_labels)
train_signals, train_labels, val_signals, val_labels, test_signals, test_labels = create_dataset2(path, expected_person_index)
model.evaluate(train_signals, train_labels)
def plot_history(history):
plt.figure(figsize=(10,6))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('ÐпПÑ
а')
plt.ylabel('ÐеÑПÑÑМПÑÑÑ ÐºÐŸÑÑекÑМПгП ÑаÑпПзМаваМОÑ')
#plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.grid(True)
print(history.history['val_accuracy'])
# training model on 5 form 6 persons
a = [1, 3, 4, 5, 6]
train_signals, train_labels, val_signals, val_labels = create_dataset(path, a)
num_classes = 9
num_sensors = 1
input_size = train_signals.shape[1]
model = Sequential()
model.add(Reshape((input_size, num_sensors), input_shape=(input_size, )))
model.add(Conv1D(50, 10, activation='relu', input_shape=(input_size, num_sensors)))
model.add(Conv1D(25, 10, activation='relu'))
model.add(MaxPooling1D(4))
model.add(Conv1D(100, 10, activation='relu'))
model.add(Conv1D(50, 10, activation='relu'))
model.add(MaxPooling1D(4))
model.add(Dropout(0.5))
#next layers will be retrained
model.add(Conv1D(100, 10, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
#elapsed_time = time.time() - start_time # training time
#loss, accuracy = model.evaluate(val_signals, val_labels) # evaluating model on test data
#loss = float("{0:.3f}".format(loss))
#accuracy = float("{0:.3f}".format(accuracy))
#elapsed_time = float("{0:.3f}".format(elapsed_time))
#saving some data
#f = open(sec_path + "info.txt", 'w')
#f.writelines(["loss: ", str(loss), '\n', "accuracy: ", str(accuracy), '\n', "elapsed_time: ", str(elapsed_time), '\n'])
#saving model
#model.save(sec_path + "pretrained_model.h5")
#saving test data just in case
#cc = list(zip(test_signals, test_labels))
#with open(sec_path + "pretrained_model_test_data.txt", "wb") as fp:
# pickle.dump(cc, fp)
#saving history
#with open(sec_path + "pretrained_model_history.h5", "wb") as fp:
# pickle.dump(history.history, fp)
start_time = time.time()
history = model.fit(train_signals, train_labels,
steps_per_epoch=25,
epochs=100,
batch_size=None,
validation_data=(val_signals, val_labels),
#validation_steps=25
)
train_signals, train_labels, val_signals, val_labels, test_signals, test_labels = create_dataset2(path, 2)
plot_history(history)
evaluate_model(model, 2)
checkpoin_weights = []
for l in model.layers:
checkpoin_weights.append(l.get_weights())
model2 = Sequential()
model2.add(Reshape((input_size, num_sensors), input_shape=(input_size, )))
# ÐПвÑй ÑлПй (ÐÐÐ¥ ÑОлÑÑÑа)
# 1 ÑвеÑÑка Оз 11 ÑОÑел
length_of_conv_filter = 11
model2.add(Conv1D(1, length_of_conv_filter, activation='linear', input_shape=(input_size, num_sensors), padding='same', name="Filter"))
model2.add(Dropout(0.5))
model2.add(Conv1D(50, 10, activation='relu', input_shape=(input_size, num_sensors), trainable='False'))
model2.add(Conv1D(25, 10, activation='relu', trainable='False'))
model2.add(MaxPooling1D(4))
model2.add(Conv1D(100, 10, activation='relu', trainable='False'))
model2.add(Conv1D(50, 10, activation='relu', trainable='False'))
model2.add(MaxPooling1D(4))
model2.add(Dropout(0.5))
#next layers will be retrained
model2.add(Conv1D(100, 10, activation='relu', trainable='False'))
model2.add(GlobalAveragePooling1D())
model2.add(Dense(num_classes, activation='softmax', trainable='False'))
#for i in range(1, 11):
# model2.layers[i+1].set_weights(checkpoin_weights[i])
w = model2.layers[1].get_weights()
print(w[0].shape)
# ÐПЎбОÑаеЌ паÑаЌеÑÑÑ ÐŽÐ»Ñ Ð¿ÐµÑвПгП ÑÐ»ÐŸÑ ÐÐÐ¥ ÑОлÑÑÑа
w[0] = w[0] * 0
w[0][int(length_of_conv_filter/2),0,0] = 1
w[1] = w[1]*0
plt.plot(w[0].flatten())
w = model2.layers[1].set_weights(w)
# УÑÑаМавлОваеЌ веÑа ÐŽÐ»Ñ Ñже ПбÑÑеММÑÑ
ÑлПев
n_layers = 11
for i in range(1, n_layers):
model2.layers[i+2].set_weights(checkpoin_weights[i])
# model2.layers[i+1].trainable = False
model2.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
model2.evaluate(train_signals, train_labels)
# !tensorboard dev upload --logdir ./ \
# --name "Simple experiment" \
# --description "Training results from https://colab.sandbox.google.com/github/tensorflow/tensorboard/blob/master/docs/tbdev_getting_started.ipynb" \
# --one_shot
# !tensorboard dev list
#keras.utils.plot_model(model2, 'dense_image_classifier.png', show_shapes=True)
keras.utils.plot_model(model2, 'dense_image_classifier2.png', show_shapes=True)
#keras.utils.plot_model(model2, 'dense_image_classifier.png', show_shapes=True)
history2 = model2.fit(train_signals, train_labels, epochs=25,
validation_data=(test_signals, test_labels))
plot_history(history2)
#ÑÑМкÑÐžÑ Ð²ÑвПЎа кПÑÑÑОÑОеМÑПв ÑвÑÑÑПÑМПгП ÑлПÑ
def check_coef_conv_layer(model_name, num_layer, num_filter):
#ÑПÑ
ÑаМÑеЌ в пеÑеЌеММÑÑ ÐºÐŸÑÑÑОÑОеМÑÑ ÐœÐ°Ð±Ð»ÑЎаеЌПгП ÑлПÑ
layer = model_name.layers[num_layer].get_weights()
#кПÑÑÑОÑОеМÑÑ 'а' МаблÑЎаеЌПгП ÑÐ»ÐŸÑ Ð¿ÐµÑвПй ÑеÑО
weights = layer[0]
#кПÑÑÑОÑОеМÑÑ 'b' МаблÑЎаеЌПгП ÑÐ»ÐŸÑ Ð¿ÐµÑвПй ÑеÑО
biases = layer[1]
#вÑвПЎ ЎаММÑÑ
Ма ÑкÑаМ
for i in range(10):
print("k{} = {:7.4f}".format(i, weights[i][0][num_filter]))
print("\nb = {:7.4f}".format(biases[num_filter]))
#ÑÑМкÑÐžÑ Ð²ÑвПЎа кПеÑÑОÑОеМÑПв пПлМПÑвÑзМПгП ÑлПÑ
def check_coef_dense_layer(model_name, num_layer, num_filter):
#ÑПÑ
ÑаМÑеЌ в пеÑеЌеММÑÑ Ð²ÐµÑа МаблÑЎаеЌПгП ÑÐ»ÐŸÑ ÑеÑО
layer = model_name.layers[num_layer].get_weights()
#кПÑÑÑОÑОеМÑÑ 'а' МаблÑЎаеЌПгП ÑÐ»ÐŸÑ ÑеÑО
weights = layer[0]
#кПÑÑÑОÑОеМÑÑ 'b' МаблÑЎаеЌПгП ÑÐ»ÐŸÑ ÑеÑО
biases = layer[1]
#вÑвПЎ ЎаММÑÑ
Ма ÑкÑаМ
for i in range(10):
print("k{} = {:7.4f}".format(i, weights[i][num_filter]))
print("\nb = {:7.4f}".format(biases[num_filter]))
l = model.layers[10].get_weights()
plot_history(history2)
evaluate_model(model2, 2)
# keras.utils.plot_model(model, 'dense_image_classifier.png', show_shapes=True)
b = model2.layers[1].get_weights()
# b[0] - neurons weights
w, h = signal.freqz(b[0].flatten())
plt.figure(figsize=(7, 5))
plt.plot(w, 20 * np.log10(abs(h)), 'b', label='аЌплОÑÑЎМП-ÑаÑÑПÑÐœÐ°Ñ Ñ
аÑакÑеÑОÑÑОка 1 ÑелПвек')
plt.grid(True)
plt.xlabel('ÐПÑЌОÑÐŸÐ²Ð°ÐœÐœÐ°Ñ ÑаÑÑПÑа')
plt.ylabel('ÐЌплОÑÑЎа, dB')
plt.legend(loc='lower right')
#print(b[0])
#plt.set_xlabel('Frequency [rad/sample]')
#plt.set_ylabel('Amplitude [dB]', color='b')
plt.figure(figsize=(8,5))
print(b[0].flatten())
print(abs(b[0].flatten().min()))
# plt.plot(np.log10(b[0].flatten()+0.1), label='ОЌпÑлÑÑÐœÐ°Ñ Ñ
аÑакÑеÑОÑÑОка')
plt.plot(b[0].flatten(), label='ОЌпÑлÑÑÐœÐ°Ñ Ñ
аÑакÑеÑОÑÑОка')
plt.grid(True)
plt.xlabel('кПÑÑÑОÑОеМÑ')
plt.ylabel('зМаÑеМОе')
plt.legend(loc='upper right')
plt.title('ОЌпÑлÑÑÐœÐ°Ñ Ñ
аÑакÑеÑОÑÑОка')
plt.plot(b[0].flatten())
print(b[0].flatten())
# ÐбÑÑаеЌ ÑепеÑÑ ÐœÐ° ÑÑеÑÑеЌ ÑÑбÑекÑе
train_signals, train_labels, val_signals, val_labels, test_signals, test_labels = create_dataset2(path, 3)
model.evaluate(train_signals, train_labels)
history2 = model2.fit(train_signals, train_labels, epochs=25,
validation_data=(test_signals, test_labels))
evaluate_model(model2, 3)
```
|
github_jupyter
|
# 蜬眮å·ç§¯
:label:`sec_transposed_conv`
å°ç®å䞺æ¢ïŒæä»¬æè§å°çå·ç§¯ç¥ç»çœç»å±ïŒäŸåŠå·ç§¯å±ïŒ :numref:`sec_conv_layer`ïŒåæ±èå±ïŒ :numref:`sec_pooling`ïŒïŒéåžžäŒåå°äžéæ ·èŸå
¥åŸåç空éŽç»ŽåºŠïŒé«å宜ïŒã
ç¶èåŠæèŸå
¥åèŸåºåŸåç空éŽç»ŽåºŠçžåïŒåšä»¥åçŽ çº§åç±»çè¯ä¹åå²äžå°äŒåŸæ¹äŸ¿ã
äŸåŠïŒèŸåºåçŽ æå€çéé绎å¯ä»¥ä¿æèŸå
¥åçŽ åšåäžäœçœ®äžçåç±»ç»æã
䞺äºå®ç°è¿äžç¹ïŒå°€å
¶æ¯åšç©ºéŽç»ŽåºŠè¢«å·ç§¯ç¥ç»çœç»å±çŒ©å°åïŒæä»¬å¯ä»¥äœ¿çšåŠäžç§ç±»åçå·ç§¯ç¥ç»çœç»å±ïŒå®å¯ä»¥å¢å äžéæ ·äžéŽå±ç¹åŸåŸç空éŽç»ŽåºŠã
åšæ¬èäžïŒæä»¬å°ä»ç»
*蜬眮å·ç§¯*ïŒtransposed convolutionïŒ :cite:`Dumoulin.Visin.2016`ïŒ
çšäºé蜬äžéæ ·å¯ŒèŽç空éŽå°ºå¯žåå°ã
```
import torch
from torch import nn
from d2l import torch as d2l
```
## åºæ¬æäœ
让æä»¬ææ¶å¿œç¥ééïŒä»åºæ¬ç蜬眮å·ç§¯åŒå§ïŒè®Ÿæ¥å¹
䞺1äžæ²¡æå¡«å
ã
å讟æä»¬æäžäžª$n_h \times n_w$çèŸå
¥åŒ éåäžäžª$k_h \times k_w$çå·ç§¯æ žã
以æ¥å¹
䞺1æ»åšå·ç§¯æ žçªå£ïŒæ¯è¡$n_w$æ¬¡ïŒæ¯å$n_h$次ïŒå
±äº§ç$n_h n_w$䞪äžéŽç»æã
æ¯äžªäžéŽç»æéœæ¯äžäžª$(n_h + k_h - 1) \times (n_w + k_w - 1)$çåŒ éïŒåå§å䞺0ã
䞺äºè®¡ç®æ¯äžªäžéŽåŒ éïŒèŸå
¥åŒ éäžçæ¯äžªå
çŽ éœèŠä¹ä»¥å·ç§¯æ žïŒä»è䜿æåŸç$k_h \times k_w$åŒ éæ¿æ¢äžéŽåŒ éçäžéšåã
请泚æïŒæ¯äžªäžéŽåŒ éè¢«æ¿æ¢éšåçäœçœ®äžèŸå
¥åŒ éäžå
çŽ çäœçœ®çžå¯¹åºã
æåïŒææäžéŽç»æçžå 以è·åŸæç»ç»æã
äŸåŠïŒ :numref:`fig_trans_conv`è§£éäºåŠäœäžº$2\times 2$çèŸå
¥åŒ é计ç®å·ç§¯æ žäžº$2\times 2$ç蜬眮å·ç§¯ã

:label:`fig_trans_conv`
æä»¬å¯ä»¥å¯¹èŸå
¥ç©éµ`X`åå·ç§¯æ žç©éµ`K`(**å®ç°åºæ¬ç蜬眮å·ç§¯è¿ç®**)`trans_conv`ã
```
def trans_conv(X, K):
h, w = K.shape
Y = torch.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Y[i: i + h, j: j + w] += X[i, j] * K
return Y
```
äžéè¿å·ç§¯æ žâåå°âèŸå
¥å
çŽ çåžžè§å·ç§¯ïŒåš :numref:`sec_conv_layer`äžïŒçžæ¯ïŒèœ¬çœ®å·ç§¯éè¿å·ç§¯æ žâ广æâèŸå
¥å
çŽ ïŒä»è产ç倧äºèŸå
¥çèŸåºã
æä»¬å¯ä»¥éè¿ :numref:`fig_trans_conv`æ¥æå»ºèŸå
¥åŒ é`X`åå·ç§¯æ žåŒ é`K`ä»è[**éªè¯äžè¿°å®ç°èŸåº**]ã
æ€å®ç°æ¯åºæ¬çäºç»Žèœ¬çœ®å·ç§¯è¿ç®ã
```
X = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
K = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
trans_conv(X, K)
```
æè
ïŒåœèŸå
¥`X`åå·ç§¯æ ž`K`éœæ¯åç»ŽåŒ éæ¶ïŒæä»¬å¯ä»¥[**䜿çšé«çº§APIè·åŸçžåçç»æ**]ã
```
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, bias=False)
tconv.weight.data = K
tconv(X)
```
## [**å¡«å
ãæ¥å¹
åå€éé**]
äžåžžè§å·ç§¯äžåïŒåšèœ¬çœ®å·ç§¯äžïŒå¡«å
被åºçšäºçèŸåºïŒåžžè§å·ç§¯å°å¡«å
åºçšäºèŸå
¥ïŒã
äŸåŠïŒåœå°é«å宜䞀䟧çå¡«å
æ°æå®äžº1æ¶ïŒèœ¬çœ®å·ç§¯çèŸåºäžå°å é€ç¬¬äžåæåçè¡äžåã
```
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, padding=1, bias=False)
tconv.weight.data = K
tconv(X)
```
åšèœ¬çœ®å·ç§¯äžïŒæ¥å¹
被æå®äžºäžéŽç»æïŒèŸåºïŒïŒèäžæ¯èŸå
¥ã
äœ¿çš :numref:`fig_trans_conv`äžçžåèŸå
¥åå·ç§¯æ žåŒ éïŒå°æ¥å¹
ä»1æŽæ¹äžº2äŒå¢å äžéŽåŒ éçé«åæéïŒå æ€èŸåºåŒ éåš :numref:`fig_trans_conv_stride2`äžã

:label:`fig_trans_conv_stride2`
以äžä»£ç å¯ä»¥éªè¯ :numref:`fig_trans_conv_stride2`äžæ¥å¹
䞺2ç蜬眮å·ç§¯çèŸåºã
```
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False)
tconv.weight.data = K
tconv(X)
```
对äºå€äžªèŸå
¥åèŸåºééïŒèœ¬çœ®å·ç§¯äžåžžè§å·ç§¯ä»¥çžåæ¹åŒè¿äœã
å讟èŸå
¥æ$c_i$䞪ééïŒäžèœ¬çœ®å·ç§¯äžºæ¯äžªèŸå
¥ééåé
äºäžäžª$k_h\times k_w$çå·ç§¯æ žåŒ éã
åœæå®å€äžªèŸåºééæ¶ïŒæ¯äžªèŸåºééå°æäžäžª$c_i\times k_h\times k_w$çå·ç§¯æ žã
åæ ·ïŒåŠææä»¬å°$\mathsf{X}$代å
¥å·ç§¯å±$f$æ¥èŸåº$\mathsf{Y}=f(\mathsf{X})$ïŒå¹¶å建äžäžªäž$f$å
·æçžåçè¶
åæ°ãäœèŸåºé鿰鿝$\mathsf{X}$äžééæ°ç蜬眮å·ç§¯å±$g$ïŒé£ä¹$g(Y)$ç圢ç¶å°äž$\mathsf{X}$çžåã
äžé¢ç瀺äŸå¯ä»¥è§£éè¿äžç¹ã
```
X = torch.rand(size=(1, 10, 16, 16))
conv = nn.Conv2d(10, 20, kernel_size=5, padding=2, stride=3)
tconv = nn.ConvTranspose2d(20, 10, kernel_size=5, padding=2, stride=3)
tconv(conv(X)).shape == X.shape
```
## [**äžç©éµåæ¢çèç³»**]
:label:`subsec-connection-to-mat-transposition`
蜬眮å·ç§¯äžºäœä»¥ç©éµåæ¢åœåå¢ïŒ
让æä»¬éŠå
ççåŠäœäœ¿çšç©éµä¹æ³æ¥å®ç°å·ç§¯ã
åšäžé¢ç瀺äŸäžïŒæä»¬å®ä¹äºäžäžª$3\times 3$çèŸå
¥`X`å$2\times 2$å·ç§¯æ ž`K`ïŒç¶å䜿çš`corr2d`åœæ°è®¡ç®å·ç§¯èŸåº`Y`ã
```
X = torch.arange(9.0).reshape(3, 3)
K = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
Y = d2l.corr2d(X, K)
Y
```
æ¥äžæ¥ïŒæä»¬å°å·ç§¯æ ž`K`éå䞺å
å«å€§é0ççšçæéç©éµ`W`ã
æéç©éµçåœ¢ç¶æ¯ïŒ$4$ïŒ$9$ïŒïŒå
¶äžé0å
çŽ æ¥èªå·ç§¯æ ž`K`ã
```
def kernel2matrix(K):
k, W = torch.zeros(5), torch.zeros((4, 9))
k[:2], k[3:5] = K[0, :], K[1, :]
W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k
return W
W = kernel2matrix(K)
W
```
éè¡è¿ç»èŸå
¥`X`ïŒè·åŸäºäžäžªé¿åºŠäžº9çç¢éã
ç¶åïŒ`W`çç©éµä¹æ³ååéåç`X`ç»åºäºäžäžªé¿åºŠäžº4çåéã
éå¡å®ä¹åïŒå¯ä»¥è·åŸäžäžé¢çåå§å·ç§¯æäœæåŸçžåçç»æ`Y`ïŒæä»¬åå䜿çšç©éµä¹æ³å®ç°äºå·ç§¯ã
```
Y == torch.matmul(W, X.reshape(-1)).reshape(2, 2)
```
åæ ·ïŒæä»¬å¯ä»¥äœ¿çšç©éµä¹æ³æ¥å®ç°èœ¬çœ®å·ç§¯ã
åšäžé¢ç瀺äŸäžïŒæä»¬å°äžé¢çåžžè§å·ç§¯$2 \times 2$çèŸåº`Y`äœäžºèœ¬çœ®å·ç§¯çèŸå
¥ã
æ³èŠéè¿ç©éµçžä¹æ¥å®ç°å®ïŒæä»¬åªéèŠå°æéç©éµ`W`ç圢ç¶èœ¬çœ®äžº$(9, 4)$ã
```
Z = trans_conv(Y, K)
Z == torch.matmul(W.T, Y.reshape(-1)).reshape(3, 3)
```
æœè±¡æ¥çïŒç»å®èŸå
¥åé$\mathbf{x}$åæéç©éµ$\mathbf{W}$ïŒå·ç§¯çååäŒ æåœæ°å¯ä»¥éè¿å°å
¶èŸå
¥äžæéç©éµçžä¹å¹¶èŸåºåé$\mathbf{y}=\mathbf{W}\mathbf{x}$æ¥å®ç°ã
ç±äºååäŒ æéµåŸªéŸåŒæ³åå$\nabla_{\mathbf{x}}\mathbf{y}=\mathbf{W}^\top$ïŒå·ç§¯çååäŒ æåœæ°å¯ä»¥éè¿å°å
¶èŸå
¥äžèœ¬çœ®çæéç©éµ$\mathbf{W}^\top$çžä¹æ¥å®ç°ã
å æ€ïŒèœ¬çœ®å·ç§¯å±èœå€äº€æ¢å·ç§¯å±çæ£åäŒ æåœæ°åååäŒ æåœæ°ïŒå®çæ£åäŒ æåååäŒ æåœæ°å°èŸå
¥åéåå«äž$\mathbf{W}^\top$å$\mathbf{W}$çžä¹ã
## å°ç»
* äžéè¿å·ç§¯æ žåå°èŸå
¥å
çŽ çåžžè§å·ç§¯çžåïŒèœ¬çœ®å·ç§¯éè¿å·ç§¯æ žå¹¿æèŸå
¥å
çŽ ïŒä»è产ç圢ç¶å€§äºèŸå
¥çèŸåºã
* åŠææä»¬å°$\mathsf{X}$èŸå
¥å·ç§¯å±$f$æ¥è·åŸèŸåº$\mathsf{Y}=f(\mathsf{X})$å¹¶åé äžäžªäž$f$æçžåçè¶
åæ°ãäœèŸåºééæ°æ¯$\mathsf{X}$äžééæ°ç蜬眮å·ç§¯å±$g$ïŒé£ä¹$g(Y)$ç圢ç¶å°äž$\mathsf{X}$çžåã
* æä»¬å¯ä»¥äœ¿çšç©éµä¹æ³æ¥å®ç°å·ç§¯ã蜬眮å·ç§¯å±èœå€äº€æ¢å·ç§¯å±çæ£åäŒ æåœæ°åååäŒ æåœæ°ã
## ç»ä¹
1. åš :numref:`subsec-connection-to-mat-transposition`äžïŒå·ç§¯èŸå
¥`X`å蜬眮çå·ç§¯èŸåº`Z`å
·æçžåç圢ç¶ãä»ä»¬çæ°åŒä¹çžååïŒäžºä»ä¹ïŒ
1. 䜿çšç©éµä¹æ³æ¥å®ç°å·ç§¯æ¯åŠææçïŒäžºä»ä¹ïŒ
[Discussions](https://discuss.d2l.ai/t/3302)
|
github_jupyter
|
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Better performance with tf.function
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/function"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/function.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/function.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/function.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
In TensorFlow 2, [eager execution](eager.ipynb) is turned on by default. The user interface is intuitive and flexible (running one-off operations is much easier and faster), but this can come at the expense of performance and deployability.
You can use `tf.function` to make graphs out of your programs. It is a transformation tool that creates Python-independent dataflow graphs out of your Python code. This will help you create performant and portable models, and it is required to use `SavedModel`.
This guide will help you conceptualize how `tf.function` works under the hood, so you can use it effectively.
The main takeaways and recommendations are:
- Debug in eager mode, then decorate with `@tf.function`.
- Don't rely on Python side effects like object mutation or list appends.
- `tf.function` works best with TensorFlow ops; NumPy and Python calls are converted to constants.
## Setup
```
import tensorflow as tf
```
Define a helper function to demonstrate the kinds of errors you might encounter:
```
import traceback
import contextlib
# Some helper code to demonstrate the kinds of errors you might encounter.
@contextlib.contextmanager
def assert_raises(error_class):
try:
yield
except error_class as e:
print('Caught expected exception \n {}:'.format(error_class))
traceback.print_exc(limit=2)
except Exception as e:
raise e
else:
raise Exception('Expected {} to be raised but no error was raised!'.format(
error_class))
```
## Basics
### Usage
A `Function` you define (for example by applying the `@tf.function` decorator) is just like a core TensorFlow operation: You can execute it eagerly; you can compute gradients; and so on.
```
@tf.function # The decorator converts `add` into a `Function`.
def add(a, b):
return a + b
add(tf.ones([2, 2]), tf.ones([2, 2])) # [[2., 2.], [2., 2.]]
v = tf.Variable(1.0)
with tf.GradientTape() as tape:
result = add(v, 1.0)
tape.gradient(result, v)
```
You can use `Function`s inside other `Function`s.
```
@tf.function
def dense_layer(x, w, b):
return add(tf.matmul(x, w), b)
dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2]))
```
`Function`s can be faster than eager code, especially for graphs with many small ops. But for graphs with a few expensive ops (like convolutions), you may not see much speedup.
```
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# Warm up
conv_layer(image); conv_fn(image)
print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10))
print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10))
print("Note how there's not much difference in performance for convolutions")
```
### Tracing
This section exposes how `Function` works under the hood, including implementation details *which may change in the future*. However, once you understand why and when tracing happens, it's much easier to use `tf.function` effectively!
#### What is "tracing"?
A `Function` runs your program in a [TensorFlow Graph](https://www.tensorflow.org/guide/intro_to_graphs#what_are_graphs). However, a `tf.Graph` cannot represent all the things that you'd write in an eager TensorFlow program. For instance, Python supports polymorphism, but `tf.Graph` requires its inputs to have a specified data type and dimension. Or you may perform side tasks like reading command-line arguments, raising an error, or working with a more complex Python object; none of these things can run in a `tf.Graph`.
`Function` bridges this gap by separating your code in two stages:
1) In the first stage, referred to as "**tracing**", `Function` creates a new `tf.Graph`. Python code runs normally, but all TensorFlow operations (like adding two Tensors) are *deferred*: they are captured by the `tf.Graph` and not run.
2) In the second stage, a `tf.Graph` which contains everything that was deferred in the first stage is run. This stage is much faster than the tracing stage.
Depending on its inputs, `Function` will not always run the first stage when it is called. See ["Rules of tracing"](#rules_of_tracing) below to get a better sense of how it makes that determination. Skipping the first stage and only executing the second stage is what gives you TensorFlow's high performance.
When `Function` does decide to trace, the tracing stage is immediately followed by the second stage, so calling the `Function` both creates and runs the `tf.Graph`. Later you will see how you can run only the tracing stage with [`get_concrete_function`](#obtaining_concrete_functions).
When we pass arguments of different types into a `Function`, both stages are run:
```
@tf.function
def double(a):
print("Tracing with", a)
return a + a
print(double(tf.constant(1)))
print()
print(double(tf.constant(1.1)))
print()
print(double(tf.constant("a")))
print()
```
Note that if you repeatedly call a `Function` with the same argument type, TensorFlow will skip the tracing stage and reuse a previously traced graph, as the generated graph would be identical.
```
# This doesn't print 'Tracing with ...'
print(double(tf.constant("b")))
```
You can use `pretty_printed_concrete_signatures()` to see all of the available traces:
```
print(double.pretty_printed_concrete_signatures())
```
So far, you've seen that `tf.function` creates a cached, dynamic dispatch layer over TensorFlow's graph tracing logic. To be more specific about the terminology:
- A `tf.Graph` is the raw, language-agnostic, portable representation of a TensorFlow computation.
- A `ConcreteFunction` wraps a `tf.Graph`.
- A `Function` manages a cache of `ConcreteFunction`s and picks the right one for your inputs.
- `tf.function` wraps a Python function, returning a `Function` object.
- **Tracing** creates a `tf.Graph` and wraps it in a `ConcreteFunction`, also known as a **trace.**
#### Rules of tracing
A `Function` determines whether to reuse a traced `ConcreteFunction` by computing a **cache key** from an input's args and kwargs. A **cache key** is a key that identifies a `ConcreteFunction` based on the input args and kwargs of the `Function` call, according to the following rules (which may change):
- The key generated for a `tf.Tensor` is its shape and dtype.
- The key generated for a `tf.Variable` is a unique variable id.
- The key generated for a Python primitive (like `int`, `float`, `str`) is its value.
- The key generated for nested `dict`s, `list`s, `tuple`s, `namedtuple`s, and [`attr`](https://www.attrs.org/en/stable/)s is the flattened tuple of leaf-keys (see `nest.flatten`). (As a result of this flattening, calling a concrete function with a different nesting structure than the one used during tracing will result in a TypeError).
- For all other Python types the key is unique to the object. This way a function or method is traced independently for each instance it is called with.
Note: Cache keys are based on the `Function` input parameters so changes to global and [free variables](https://docs.python.org/3/reference/executionmodel.html#binding-of-names) alone will not create a new trace. See [this section](#depending_on_python_global_and_free_variables) for recommended practices when dealing with Python global and free variables.
#### Controlling retracing
Retracing, which is when your `Function` creates more than one trace, helps ensures that TensorFlow generates correct graphs for each set of inputs. However, tracing is an expensive operation! If your `Function` retraces a new graph for every call, you'll find that your code executes more slowly than if you didn't use `tf.function`.
To control the tracing behavior, you can use the following techniques:
- Specify `input_signature` in `tf.function` to limit tracing.
```
@tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32),))
def next_collatz(x):
print("Tracing with", x)
return tf.where(x % 2 == 0, x // 2, 3 * x + 1)
print(next_collatz(tf.constant([1, 2])))
# You specified a 1-D tensor in the input signature, so this should fail.
with assert_raises(ValueError):
next_collatz(tf.constant([[1, 2], [3, 4]]))
# You specified an int32 dtype in the input signature, so this should fail.
with assert_raises(ValueError):
next_collatz(tf.constant([1.0, 2.0]))
```
- Specify a \[None\] dimension in `tf.TensorSpec` to allow for flexibility in trace reuse.
Since TensorFlow matches tensors based on their shape, using a `None` dimension as a wildcard will allow `Function`s to reuse traces for variably-sized input. Variably-sized input can occur if you have sequences of different length, or images of different sizes for each batch (See the [Transformer](../tutorials/text/transformer.ipynb) and [Deep Dream](../tutorials/generative/deepdream.ipynb) tutorials for example).
```
@tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32),))
def g(x):
print('Tracing with', x)
return x
# No retrace!
print(g(tf.constant([1, 2, 3])))
print(g(tf.constant([1, 2, 3, 4, 5])))
```
- Cast Python arguments to Tensors to reduce retracing.
Often, Python arguments are used to control hyperparameters and graph constructions - for example, `num_layers=10` or `training=True` or `nonlinearity='relu'`. So, if the Python argument changes, it makes sense that you'd have to retrace the graph.
However, it's possible that a Python argument is not being used to control graph construction. In these cases, a change in the Python value can trigger needless retracing. Take, for example, this training loop, which AutoGraph will dynamically unroll. Despite the multiple traces, the generated graph is actually identical, so retracing is unnecessary.
```
def train_one_step():
pass
@tf.function
def train(num_steps):
print("Tracing with num_steps = ", num_steps)
tf.print("Executing with num_steps = ", num_steps)
for _ in tf.range(num_steps):
train_one_step()
print("Retracing occurs for different Python arguments.")
train(num_steps=10)
train(num_steps=20)
print()
print("Traces are reused for Tensor arguments.")
train(num_steps=tf.constant(10))
train(num_steps=tf.constant(20))
```
If you need to force retracing, create a new `Function`. Separate `Function` objects are guaranteed not to share traces.
```
def f():
print('Tracing!')
tf.print('Executing')
tf.function(f)()
tf.function(f)()
```
### Obtaining concrete functions
Every time a function is traced, a new concrete function is created. You can directly obtain a concrete function, by using `get_concrete_function`.
```
print("Obtaining concrete trace")
double_strings = double.get_concrete_function(tf.constant("a"))
print("Executing traced function")
print(double_strings(tf.constant("a")))
print(double_strings(a=tf.constant("b")))
# You can also call get_concrete_function on an InputSpec
double_strings_from_inputspec = double.get_concrete_function(tf.TensorSpec(shape=[], dtype=tf.string))
print(double_strings_from_inputspec(tf.constant("c")))
```
Printing a `ConcreteFunction` displays a summary of its input arguments (with types) and its output type.
```
print(double_strings)
```
You can also directly retrieve a concrete function's signature.
```
print(double_strings.structured_input_signature)
print(double_strings.structured_outputs)
```
Using a concrete trace with incompatible types will throw an error
```
with assert_raises(tf.errors.InvalidArgumentError):
double_strings(tf.constant(1))
```
You may notice that Python arguments are given special treatment in a concrete function's input signature. Prior to TensorFlow 2.3, Python arguments were simply removed from the concrete function's signature. Starting with TensorFlow 2.3, Python arguments remain in the signature, but are constrained to take the value set during tracing.
```
@tf.function
def pow(a, b):
return a ** b
square = pow.get_concrete_function(a=tf.TensorSpec(None, tf.float32), b=2)
print(square)
assert square(tf.constant(10.0)) == 100
with assert_raises(TypeError):
square(tf.constant(10.0), b=3)
```
### Obtaining graphs
Each concrete function is a callable wrapper around a `tf.Graph`. Although retrieving the actual `tf.Graph` object is not something you'll normally need to do, you can obtain it easily from any concrete function.
```
graph = double_strings.graph
for node in graph.as_graph_def().node:
print(f'{node.input} -> {node.name}')
```
### Debugging
In general, debugging code is easier in eager mode than inside `tf.function`. You should ensure that your code executes error-free in eager mode before decorating with `tf.function`. To assist in the debugging process, you can call `tf.config.run_functions_eagerly(True)` to globally disable and reenable `tf.function`.
When tracking down issues that only appear within `tf.function`, here are some tips:
- Plain old Python `print` calls only execute during tracing, helping you track down when your function gets (re)traced.
- `tf.print` calls will execute every time, and can help you track down intermediate values during execution.
- `tf.debugging.enable_check_numerics` is an easy way to track down where NaNs and Inf are created.
- `pdb` (the [Python debugger](https://docs.python.org/3/library/pdb.html)) can help you understand what's going on during tracing. (Caveat: `pdb` will drop you into AutoGraph-transformed source code.)
## AutoGraph transformations
AutoGraph is a library that is on by default in `tf.function`, and transforms a subset of Python eager code into graph-compatible TensorFlow ops. This includes control flow like `if`, `for`, `while`.
TensorFlow ops like `tf.cond` and `tf.while_loop` continue to work, but control flow is often easier to write and understand when written in Python.
```
# A simple loop
@tf.function
def f(x):
while tf.reduce_sum(x) > 1:
tf.print(x)
x = tf.tanh(x)
return x
f(tf.random.uniform([5]))
```
If you're curious you can inspect the code autograph generates.
```
print(tf.autograph.to_code(f.python_function))
```
### Conditionals
AutoGraph will convert some `if <condition>` statements into the equivalent `tf.cond` calls. This substitution is made if `<condition>` is a Tensor. Otherwise, the `if` statement is executed as a Python conditional.
A Python conditional executes during tracing, so exactly one branch of the conditional will be added to the graph. Without AutoGraph, this traced graph would be unable to take the alternate branch if there is data-dependent control flow.
`tf.cond` traces and adds both branches of the conditional to the graph, dynamically selecting a branch at execution time. Tracing can have unintended side effects; check out [AutoGraph tracing effects](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/control_flow.md#effects-of-the-tracing-process) for more information.
```
@tf.function
def fizzbuzz(n):
for i in tf.range(1, n + 1):
print('Tracing for loop')
if i % 15 == 0:
print('Tracing fizzbuzz branch')
tf.print('fizzbuzz')
elif i % 3 == 0:
print('Tracing fizz branch')
tf.print('fizz')
elif i % 5 == 0:
print('Tracing buzz branch')
tf.print('buzz')
else:
print('Tracing default branch')
tf.print(i)
fizzbuzz(tf.constant(5))
fizzbuzz(tf.constant(20))
```
See the [reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/control_flow.md#if-statements) for additional restrictions on AutoGraph-converted if statements.
### Loops
AutoGraph will convert some `for` and `while` statements into the equivalent TensorFlow looping ops, like `tf.while_loop`. If not converted, the `for` or `while` loop is executed as a Python loop.
This substitution is made in the following situations:
- `for x in y`: if `y` is a Tensor, convert to `tf.while_loop`. In the special case where `y` is a `tf.data.Dataset`, a combination of `tf.data.Dataset` ops are generated.
- `while <condition>`: if `<condition>` is a Tensor, convert to `tf.while_loop`.
A Python loop executes during tracing, adding additional ops to the `tf.Graph` for every iteration of the loop.
A TensorFlow loop traces the body of the loop, and dynamically selects how many iterations to run at execution time. The loop body only appears once in the generated `tf.Graph`.
See the [reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/control_flow.md#while-statements) for additional restrictions on AutoGraph-converted `for` and `while` statements.
#### Looping over Python data
A common pitfall is to loop over Python/NumPy data within a `tf.function`. This loop will execute during the tracing process, adding a copy of your model to the `tf.Graph` for each iteration of the loop.
If you want to wrap the entire training loop in `tf.function`, the safest way to do this is to wrap your data as a `tf.data.Dataset` so that AutoGraph will dynamically unroll the training loop.
```
def measure_graph_size(f, *args):
g = f.get_concrete_function(*args).graph
print("{}({}) contains {} nodes in its graph".format(
f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node)))
@tf.function
def train(dataset):
loss = tf.constant(0)
for x, y in dataset:
loss += tf.abs(y - x) # Some dummy computation.
return loss
small_data = [(1, 1)] * 3
big_data = [(1, 1)] * 10
measure_graph_size(train, small_data)
measure_graph_size(train, big_data)
measure_graph_size(train, tf.data.Dataset.from_generator(
lambda: small_data, (tf.int32, tf.int32)))
measure_graph_size(train, tf.data.Dataset.from_generator(
lambda: big_data, (tf.int32, tf.int32)))
```
When wrapping Python/NumPy data in a Dataset, be mindful of `tf.data.Dataset.from_generator` versus ` tf.data.Dataset.from_tensors`. The former will keep the data in Python and fetch it via `tf.py_function` which can have performance implications, whereas the latter will bundle a copy of the data as one large `tf.constant()` node in the graph, which can have memory implications.
Reading data from files via `TFRecordDataset`, `CsvDataset`, etc. is the most effective way to consume data, as then TensorFlow itself can manage the asynchronous loading and prefetching of data, without having to involve Python. To learn more, see the [`tf.data`: Build TensorFlow input pipelines](../../guide/data) guide.
#### Accumulating values in a loop
A common pattern is to accumulate intermediate values from a loop. Normally, this is accomplished by appending to a Python list or adding entries to a Python dictionary. However, as these are Python side effects, they will not work as expected in a dynamically unrolled loop. Use `tf.TensorArray` to accumulate results from a dynamically unrolled loop.
```
batch_size = 2
seq_len = 3
feature_size = 4
def rnn_step(inp, state):
return inp + state
@tf.function
def dynamic_rnn(rnn_step, input_data, initial_state):
# [batch, time, features] -> [time, batch, features]
input_data = tf.transpose(input_data, [1, 0, 2])
max_seq_len = input_data.shape[0]
states = tf.TensorArray(tf.float32, size=max_seq_len)
state = initial_state
for i in tf.range(max_seq_len):
state = rnn_step(input_data[i], state)
states = states.write(i, state)
return tf.transpose(states.stack(), [1, 0, 2])
dynamic_rnn(rnn_step,
tf.random.uniform([batch_size, seq_len, feature_size]),
tf.zeros([batch_size, feature_size]))
```
## Limitations
TensorFlow `Function` has a few limitations by design that you should be aware of when converting a Python function to a `Function`.
### Executing Python side effects
Side effects, like printing, appending to lists, and mutating globals, can behave unexpectedly inside a `Function`, sometimes executing twice or not all. They only happen the first time you call a `Function` with a set of inputs. Afterwards, the traced `tf.Graph` is reexecuted, without executing the Python code.
The general rule of thumb is to avoid relying on Python side effects in your logic and only use them to debug your traces. Otherwise, TensorFlow APIs like `tf.data`, `tf.print`, `tf.summary`, `tf.Variable.assign`, and `tf.TensorArray` are the best way to ensure your code will be executed by the TensorFlow runtime with each call.
```
@tf.function
def f(x):
print("Traced with", x)
tf.print("Executed with", x)
f(1)
f(1)
f(2)
```
If you would like to execute Python code during each invocation of a `Function`, `tf.py_function` is an exit hatch. The drawback of `tf.py_function` is that it's not portable or particularly performant, cannot be saved with SavedModel, and does not work well in distributed (multi-GPU, TPU) setups. Also, since `tf.py_function` has to be wired into the graph, it casts all inputs/outputs to tensors.
#### Changing Python global and free variables
Changing Python global and [free variables](https://docs.python.org/3/reference/executionmodel.html#binding-of-names) counts as a Python side effect, so it only happens during tracing.
```
external_list = []
@tf.function
def side_effect(x):
print('Python side effect')
external_list.append(x)
side_effect(1)
side_effect(1)
side_effect(1)
# The list append only happened once!
assert len(external_list) == 1
```
You should avoid mutating containers like lists, dicts, other objects that live outside the `Function`. Instead, use arguments and TF objects. For example, the section ["Accumulating values in a loop"](#accumulating_values_in_a_loop) has one example of how list-like operations can be implemented.
You can, in some cases, capture and manipulate state if it is a [`tf.Variable`](https://www.tensorflow.org/guide/variable). This is how the weights of Keras models are updated with repeated calls to the same `ConcreteFunction`.
#### Using Python iterators and generators
Many Python features, such as generators and iterators, rely on the Python runtime to keep track of state. In general, while these constructs work as expected in eager mode, they are examples of Python side effects and therefore only happen during tracing.
```
@tf.function
def buggy_consume_next(iterator):
tf.print("Value:", next(iterator))
iterator = iter([1, 2, 3])
buggy_consume_next(iterator)
# This reuses the first value from the iterator, rather than consuming the next value.
buggy_consume_next(iterator)
buggy_consume_next(iterator)
```
Just like how TensorFlow has a specialized `tf.TensorArray` for list constructs, it has a specialized `tf.data.Iterator` for iteration constructs. See the section on [AutoGraph transformations](#autograph_transformations) for an overview. Also, the [`tf.data`](https://www.tensorflow.org/guide/data) API can help implement generator patterns:
```
@tf.function
def good_consume_next(iterator):
# This is ok, iterator is a tf.data.Iterator
tf.print("Value:", next(iterator))
ds = tf.data.Dataset.from_tensor_slices([1, 2, 3])
iterator = iter(ds)
good_consume_next(iterator)
good_consume_next(iterator)
good_consume_next(iterator)
```
### Deleting tf.Variables between `Function` calls
Another error you may encounter is a garbage-collected variable. `ConcreteFunction`s only retain [WeakRefs](https://docs.python.org/3/library/weakref.html) to the variables they close over, so you must retain a reference to any variables.
```
external_var = tf.Variable(3)
@tf.function
def f(x):
return x * external_var
traced_f = f.get_concrete_function(4)
print("Calling concrete function...")
print(traced_f(4))
# The original variable object gets garbage collected, since there are no more
# references to it.
external_var = tf.Variable(4)
print()
print("Calling concrete function after garbage collecting its closed Variable...")
with assert_raises(tf.errors.FailedPreconditionError):
traced_f(4)
```
### All outputs of a tf.function must be return values
With the exception of `tf.Variable`s, a tf.function must return all its
outputs. Attempting to directly access any tensors from a function without
going through return values causes "leaks".
For example, the function below "leaks" the tensor `a` through the Python
global `x`:
```
x = None
@tf.function
def leaky_function(a):
global x
x = a + 1 # Bad - leaks local tensor
return a + 2
correct_a = leaky_function(tf.constant(1))
print(correct_a.numpy()) # Good - value obtained from function's returns
with assert_raises(AttributeError):
x.numpy() # Bad - tensor leaked from inside the function, cannot be used here
print(x)
```
This is true even if the leaked value is also returned:
```
@tf.function
def leaky_function(a):
global x
x = a + 1 # Bad - leaks local tensor
return x # Good - uses local tensor
correct_a = leaky_function(tf.constant(1))
print(correct_a.numpy()) # Good - value obtained from function's returns
with assert_raises(AttributeError):
x.numpy() # Bad - tensor leaked from inside the function, cannot be used here
print(x)
@tf.function
def captures_leaked_tensor(b):
b += x # Bad - `x` is leaked from `leaky_function`
return b
with assert_raises(TypeError):
captures_leaked_tensor(tf.constant(2))
```
Usually, leaks such as these occur when you use Python statements or data structures.
In addition to leaking inaccessible tensors, such statements are also likely wrong because they count as Python side effects, and are not guaranteed to execute at every function call.
Common ways to leak local tensors also include mutating an external Python collection, or an object:
```
class MyClass:
def __init__(self):
self.field = None
external_list = []
external_object = MyClass()
def leaky_function():
a = tf.constant(1)
external_list.append(a) # Bad - leaks tensor
external_object.field = a # Bad - leaks tensor
```
## Known Issues
If your `Function` is not evaluating correctly, the error may be explained by these known issues which are planned to be fixed in the future.
### Depending on Python global and free variables
`Function` creates a new `ConcreteFunction` when called with a new value of a Python argument. However, it does not do that for the Python closure, globals, or nonlocals of that `Function`. If their value changes in between calls to the `Function`, the `Function` will still use the values they had when it was traced. This is different from how regular Python functions work.
For that reason, we recommend a functional programming style that uses arguments instead of closing over outer names.
```
@tf.function
def buggy_add():
return 1 + foo
@tf.function
def recommended_add(foo):
return 1 + foo
foo = 1
print("Buggy:", buggy_add())
print("Correct:", recommended_add(foo))
print("Updating the value of `foo` to 100!")
foo = 100
print("Buggy:", buggy_add()) # Did not change!
print("Correct:", recommended_add(foo))
```
You can close over outer names, as long as you don't update their values.
#### Depending on Python objects
The recommendation to pass Python objects as arguments into `tf.function` has a number of known issues, that are expected to be fixed in the future. In general, you can rely on consistent tracing if you use a Python primitive or `tf.nest`-compatible structure as an argument or pass in a *different* instance of an object into a `Function`. However, `Function` will *not* create a new trace when you pass **the same object and only change its attributes**.
```
class SimpleModel(tf.Module):
def __init__(self):
# These values are *not* tf.Variables.
self.bias = 0.
self.weight = 2.
@tf.function
def evaluate(model, x):
return model.weight * x + model.bias
simple_model = SimpleModel()
x = tf.constant(10.)
print(evaluate(simple_model, x))
print("Adding bias!")
simple_model.bias += 5.0
print(evaluate(simple_model, x)) # Didn't change :(
```
Using the same `Function` to evaluate the updated instance of the model will be buggy since the updated model has the [same cache key](#rules_of_tracing) as the original model.
For that reason, we recommend that you write your `Function` to avoid depending on mutable object attributes or create new objects.
If that is not possible, one workaround is to make new `Function`s each time you modify your object to force retracing:
```
def evaluate(model, x):
return model.weight * x + model.bias
new_model = SimpleModel()
evaluate_no_bias = tf.function(evaluate).get_concrete_function(new_model, x)
# Don't pass in `new_model`, `Function` already captured its state during tracing.
print(evaluate_no_bias(x))
print("Adding bias!")
new_model.bias += 5.0
# Create new Function and ConcreteFunction since you modified new_model.
evaluate_with_bias = tf.function(evaluate).get_concrete_function(new_model, x)
print(evaluate_with_bias(x)) # Don't pass in `new_model`.
```
As [retracing can be expensive](https://www.tensorflow.org/guide/intro_to_graphs#tracing_and_performance), you can use `tf.Variable`s as object attributes, which can be mutated (but not changed, careful!) for a similar effect without needing a retrace.
```
class BetterModel:
def __init__(self):
self.bias = tf.Variable(0.)
self.weight = tf.Variable(2.)
@tf.function
def evaluate(model, x):
return model.weight * x + model.bias
better_model = BetterModel()
print(evaluate(better_model, x))
print("Adding bias!")
better_model.bias.assign_add(5.0) # Note: instead of better_model.bias += 5
print(evaluate(better_model, x)) # This works!
```
### Creating tf.Variables
`Function` only supports singleton `tf.Variable`s created once on the first call, and reused across subsequent function calls. The code snippet below would create a new `tf.Variable` in every function call, which results in a `ValueError` exception.
Example:
```
@tf.function
def f(x):
v = tf.Variable(1.0)
return v
with assert_raises(ValueError):
f(1.0)
```
A common pattern used to work around this limitation is to start with a Python None value, then conditionally create the `tf.Variable` if the value is None:
```
class Count(tf.Module):
def __init__(self):
self.count = None
@tf.function
def __call__(self):
if self.count is None:
self.count = tf.Variable(0)
return self.count.assign_add(1)
c = Count()
print(c())
print(c())
```
#### Using with multiple Keras optimizers
You may encounter `ValueError: tf.function only supports singleton tf.Variables created on the first call.` when using more than one Keras optimizer with a `tf.function`. This error occurs because optimizers internally create `tf.Variables` when they apply gradients for the first time.
```
opt1 = tf.keras.optimizers.Adam(learning_rate = 1e-2)
opt2 = tf.keras.optimizers.Adam(learning_rate = 1e-3)
@tf.function
def train_step(w, x, y, optimizer):
with tf.GradientTape() as tape:
L = tf.reduce_sum(tf.square(w*x - y))
gradients = tape.gradient(L, [w])
optimizer.apply_gradients(zip(gradients, [w]))
w = tf.Variable(2.)
x = tf.constant([-1.])
y = tf.constant([2.])
train_step(w, x, y, opt1)
print("Calling `train_step` with different optimizer...")
with assert_raises(ValueError):
train_step(w, x, y, opt2)
```
If you need to change the optimizer during training, a workaround is to create a new `Function` for each optimizer, calling the [`ConcreteFunction`](#obtaining_concrete_functions) directly.
```
opt1 = tf.keras.optimizers.Adam(learning_rate = 1e-2)
opt2 = tf.keras.optimizers.Adam(learning_rate = 1e-3)
# Not a tf.function.
def train_step(w, x, y, optimizer):
with tf.GradientTape() as tape:
L = tf.reduce_sum(tf.square(w*x - y))
gradients = tape.gradient(L, [w])
optimizer.apply_gradients(zip(gradients, [w]))
w = tf.Variable(2.)
x = tf.constant([-1.])
y = tf.constant([2.])
# Make a new Function and ConcreteFunction for each optimizer.
train_step_1 = tf.function(train_step).get_concrete_function(w, x, y, opt1)
train_step_2 = tf.function(train_step).get_concrete_function(w, x, y, opt2)
for i in range(10):
if i % 2 == 0:
train_step_1(w, x, y) # `opt1` is not used as a parameter.
else:
train_step_2(w, x, y) # `opt2` is not used as a parameter.
```
#### Using with multiple Keras models
You may also encounter `ValueError: tf.function only supports singleton tf.Variables created on the first call.` when passing different model instances to the same `Function`.
This error occurs because Keras models (which [do not have their input shape defined](https://www.tensorflow.org/guide/keras/custom_layers_and_models#best_practice_deferring_weight_creation_until_the_shape_of_the_inputs_is_known)) and Keras layers create `tf.Variables`s when they are first called. You may be attempting to initialize those variables inside a `Function`, which has already been called. To avoid this error, try calling `model.build(input_shape)` to initialize all the weights before training the model.
## Further reading
To learn about how to export and load a `Function`, see the [SavedModel guide](../../guide/saved_model). To learn more about graph optimizations that are performed after tracing, see the [Grappler guide](../../guide/graph_optimization). To learn how to optimize your data pipeline and profile your model, see the [Profiler guide](../../guide/profiler.md).
|
github_jupyter
|
### Strings - Quotation Marks
```
# Quotation marks must be matching. Both of the following work.
good_string = "Hello, how are you?"
another_good_string = 'Hello, how are you?'
# These strings will not work
bad_string = 'Don't do that'
another_bad_string = "Don't do that'
# Notice you enclose the whole sentence in doubles if there is
# a single within the sentence.
solution_to_bad_string = "Don't do that."
# If for some reason you need both, escape with backslash
# 'She said, "Don't do that!"'
my_escape_string = 'She said, "Don\'t do that!"'
print(my_escape_string)
# Multiple line breaks
my_super_long_string = '''This is a
string that spans
multiple lines.'''
print(my_super_long_string)
# Another way for multiple line breaks
my_long_string = ('This is a\n'
'string that spans\n'
'multiple lines.')
print(my_long_string)
```
### String Type
```
# As with numeric, can assign strings to variables
# and can check the type
my_string = 'Hello World'
type(my_string)
```
### String Operators + and *
```
# Use + to add two strings together
one = 'Hello, my name is '
two = 'Erin'
my_name = one + two
print(my_name)
# Use * to repeat a string a number of times
# Notice that I told Python to add space between the strings
repeat_this = 'I will use descriptive variable names. '
repeat_this * 3
```
### String Methods
```
# Notice the space at the end when the string prints
'Repeating string ' * 3
# Let's use a method to get rid of that space
# Use dot notation to call a method
'Repeating string Repeating string Repeating string '.strip()
# Another example
# Notice it removed white space from both start and end
' Repeating string Repeating string Repeating string '.strip()
my_str_variable = 'this IS my STRING to PLAY around WITH.'
# .capitalize()
cap_str = my_str_variable.capitalize()
# .upper()
upp_str = my_str_variable.upper()
# .lower()
low_str = my_str_variable.lower()
# .replace()
new_str = my_str_variable.replace('STR', 'fl')
# .split()
split_str = my_str_variable.split()
print(cap_str)
print(upp_str)
print(low_str)
print(new_str)
print(split_str)
# Want to know all the methods available for strings?
# type your string then dot-tab
my_str_variable
```
### String Indexing
```
# Grab a specific character
my_str_variable = 'Test String'
second_char = my_str_variable[1]
sixth_char = my_str_variable[5]
last_char = my_str_variable[-1]
third_from_last_char = my_str_variable[-3]
# Notice the zero indexing
print(second_char)
print(sixth_char)
print(last_char)
print(third_from_last_char)
# Grab characters in some subset (range)
my_str_variable = 'Test String'
# This is called 'slicing'
subset1 = my_str_variable[1:3]
subset2 = my_str_variable[5:9]
subset3 = my_str_variable[-6:-1]
subset4 = my_str_variable[1:]
subset5 = my_str_variable[:-1]
# Start at index, print everything up to end index
# Inclusive on left, exclusive on right
print(subset1)
print(subset2)
print(subset3)
print(subset4)
print(subset5)
# Grab characters in steps
my_str_variable = 'Test String'
every_second = my_str_variable[::2]
every_third_between210 = my_str_variable[2:10:3]
print(every_second)
print(every_third_between210)
```
### String Looping
```
string_to_loop = 'Denver is better than Colorado Springs'
# find out the length of your string (the number of characters)
length_of_str = len(string_to_loop)
print(length_of_str)
# Loop through string with while loop
# define your variables
string_to_loop = "What's Up?"
length_of_str = len(string_to_loop)
idx = 0
# loop until condition is met
while idx < length_of_str:
print(string_to_loop[idx])
idx += 1
# Loop through string with for loop
# define variables
string_to_loop = "What's Up?"
length_of_str = len(string_to_loop)
# loop until end of string
for index in range(length_of_str):
print(string_to_loop[index])
# Notice the range() constructor: this tells the for
# loop how long to continue. Thus our for loop will
# continue for the length of the string
# The following for loop will do the same as above,
# but it's considered cleaner code
string_to_loop = "What's Up?"
for char in string_to_loop:
print(char)
```
### Zen of Python
```
# The Zen of Python
import this
```
### String Formatting
```
my_name = 'Sean'
print('Hello, my name is {}.'.format(my_name))
# Now you can just update one variable without
# having to retype the entire sentence
my_name = 'Erin'
print('Hello, my name is {}.'.format(my_name))
# .format() told Python to format the string
# the {} are the location to format.
# Multiple values to insert?
name_one = 'Sean'
name_two = 'Erin'
print('{1} is cooler than {0}.'.format(name_two, name_one))
# If you don't tell .format() the order, it will
# assume the order.
print('{} is cooler than {}.'.format(name_two, name_one))
# .format() can also accept numbers
# numbers can be formatted
print("To be precise, that's {:.1f} times.".format(2))
# Here, the {:.1f} told Python that you would pass
# it a number which you wanted to be a float
# with only one decimal place.
'hello this is a test'.split()
```
### Lists
```
# Create a list by hard coding things into it
# Notice: lists are enclosed with []
my_first_lst = [1, 'hello', 3, 'goodbye']
# Create a list by wrapping list() around
# something you want to split apart
my_second_lst = list('hello')
print(my_first_lst)
print(my_second_lst)
# You can also create lists of lists
list_of_lists = [[1,2,3], ['erin', 'bob']]
print(list_of_lists)
# look what happens when you index into a list of lists
print(list_of_lists[0])
# what about getting into an inside list?
print(list_of_lists[1][0])
```
### List Methods
```
my_list = [1,2,3,'erin']
# .tab-complete to see methods for lists
my_list
my_lst = [1, 2, 3, 4]
# add an element to list
my_lst.append(5)
print(my_lst)
# remove last element and print it
print(my_lst.pop())
print(my_lst)
# remove element from list
my_lst.remove(4)
print(my_lst)
# reverse order of list
my_lst.reverse()
print(my_lst)
# sort the list
my_lst.sort()
print(my_lst)
```
### List Iteration
```
# define a list
list_of_nums = [1,2,3,4,5,6]
# loop through list and print
for num in list_of_nums:
print(num)
# What if you need the index in a list?
# There's a special method for that called
# enumerate()
list_of_letters = ['a','b','c','d','e']
for index, letter in enumerate(list_of_letters):
print(index, letter)
# Class challenge answer
list_of_nums = [1,2,3,456,32,75]
for idx, num in enumerate(list_of_nums):
if (num % 3 == 0) and (num % 5 == 0):
print(idx, num, 'FizzBuzz')
elif num % 3 == 0:
print(idx, num, 'Fizz')
elif num % 5 == 0:
print(idx, num, 'Buzz')
else:
print(idx, num)
```
### List Comprehensions
```
# Let's transform this for loop into a comprehension loop
my_list = [1,2,3,4,5,6]
# create an empty list that you will populate
my_squares = []
# loop through your number list and append
# the square of each number to the new list
for num in my_list:
my_squares.append(num**2)
print(my_squares)
# Now let's do the same thing with a list comprehension
my_squares_comp = [num**2 for num in my_list]
# Walk through this on white board
print(my_squares)
print(my_squares_comp)
# What about building a list with a conditional?
my_num_list = [1,2,3,4,89,1234]
even_numbers = []
for num in my_num_list:
if num % 2 == 0:
even_numbers.append(num)
# Now with list comprehension
even_numbers_comp = [num for num in my_num_list if num % 2 == 0]
print(even_numbers)
print(even_numbers_comp)
# Class challenge question and answer
class_names = ['bob', 'sally', 'fred']
short_names = [ ]
for name in class_names:
if len(name) <= 3:
short_names.append(name)
short_names_comp = [name for name in class_names if len(name) <= 3]
print(short_names)
print(short_names_comp)
```
|
github_jupyter
|
```
import copy
if __name__ == '__main__':
%run Tests.ipynb
%run MoleculeGenerator2.ipynb
%run Discrim.ipynb
%run Rewards.ipynb
%run PPO_WITH_TRICKS.ipynb
%run ChemEnv.ipynb
%run SupervisedPreTraining.ipynb
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# wants: a single class for pretraining and rl training
# also want a singler logger for everything
# also should put in cross validation for the supervised portion
# means a logger instance in the init method
class SupervisedToReinforcement():
def __init__(self,run_title, rewards_list, chem_env_kwargs, PPO_kwargs, svw_kwargs):
self.run_title = run_title
self.writer = SummaryWriter(f'./tb_logs/{run_title}/{run_title}_logs')
self.reward_module = FinalRewardModule(self.writer,rewards_list)
chem_env_kwargs['num_chunks'] = train_kwargs['num_chunks']
chem_env_kwargs['RewardModule'] = self.reward_module
chem_env_kwargs['writer'] = self.writer
self.ChemEnv = ChemEnv(**chem_env_kwargs)
input_dim = chem_env_kwargs['num_node_feats']
#self.policy = Spin2(input_dim,300,chem_env_kwargs['num_atom_types']).cuda()
self.policy = BaseLine(input_dim,800,chem_env_kwargs['num_atom_types']+1).cuda()
self.policy.apply(init_weights_recursive)
svw_kwargs['writer'] = self.writer
svw_kwargs['input_dim'] = input_dim
svw_kwargs['num_atom_types'] = chem_env_kwargs['num_atom_types']
print(svw_kwargs)
self.svw = Supervised_Trainer(self.policy, **svw_kwargs)
PPO_kwargs['env'] = self.ChemEnv
PPO_kwargs['actor'] = self.policy
PPO_kwargs['writer'] = self.writer
self.PPO = PPO_MAIN(**PPO_kwargs)
self.PPO.to_device(device)
def Train(self,total_epochs, batch_size, epochs_per_chunk, num_chunks, PPO_steps, cv_path):
self.svw.TrainModel(total_epochs)
# torch.save({
# 'model_state_dict': self.policy.state_dict(),
# 'optimizer_state_dict': self.svw.optim.state_dict()
# }, f'./{self.run_title}/SavedModel')
print("fra")
# self.PPO.learn(PPO_steps)
%run SupervisedPreTraining.ipynb
# rewards_list = [SizeSynth_norm()]
# rewards_list = [Synthesizability(), SizeReward()]
rewards_list = [ Synthesizability()]
chem_env_kwargs = {'max_nodes' : 12,
'num_atom_types' : 17,
'num_node_feats' : 54,
'num_edge_types' : 3,
'bond_padding' : 12,
'mol_featurizer': mol_to_graph_full,
'RewardModule' : None,
'writer' : None}
PPO_kwargs = {'env' : None,
'batch_size' : 32,
'timesteps_per_batch' : 1200,
'clip' : 0.08,
'a_lr' : 1e-4,
'c_lr' : 3e-4,
'n_updates_per_iteration' : 6,
'max_timesteps_per_episode' : 40,
'gamma' : .95,
'actor' : None}
svw_kwargs = {'batch_size' : 128, 'data_set_size' : 507528}
train_kwargs = {'total_epochs' : 15,
'batch_size' : 256,
'epochs_per_chunk' : 1,
'num_chunks' : 0,
'cv_path' : './CrossVal/chunk_11',
'PPO_steps' : 150000}
%run ChemEnv.ipynb
svtr = SupervisedToReinforcement('test_18',rewards_list,chem_env_kwargs,PPO_kwargs,svw_kwargs)
svtr.Train(**train_kwargs)
Chem.MolFromSmiles('CCCN(CC)C(=O)S')
svtr.PPO.inference()
env = svtr.ChemEnv
env.assignMol(Chem.MolFromSmiles('CCC(C)C(=O)O'))
print(env.last_action_node)
env.StateSpace
env.step(0,verbose=True)
env.StateSpace
PPO_kwargs = {'env' : env,
'batch_size' : 32,
'timesteps_per_batch' : 1200,
'clip' : 0.08,
'a_lr' : 1e-4,
'c_lr' : 3e-4,
'n_updates_per_iteration' : 6,
'max_timesteps_per_episode' : 40,
'gamma' : .95,
'actor' : svtr.svw.policy,
'writer': SummaryWriter(f'./tb_logs/3/3_logs')}
ppo_test = PPO_MAIN(**PPO_kwargs)
ppo_test.inference()
Chem.MolFromSmiles('CCC(C(N)=O)N1CC(C)CC1=O')
Chem.MolFromSmiles('CCCNC(=O)n1ccnc1C')
env.assignMol(Chem.MolFromSmiles('C.C'))
env.step(19,verbose=True)
env.StateSpace
chem_env_kwargs = {'max_nodes' : 12,
'num_atom_types' : 17,
'num_node_feats' : 54,
'num_edge_types' : 3,
'bond_padding' : 12,
'mol_featurizer': mol_to_graph_full,
'RewardModule' : rewards_list,
'writer' : SummaryWriter(f'./tb_logs/3/3_logs'),
'num_chunks': 1}
%run ChemEnv.ipynb
env = ChemEnv(**chem_env_kwargs)
env.assignMol(Chem.MolFromSmiles('CCC.N'))
env.step(2, verbose=True)
env.StateSpace
ppo_test = PPO_MAIN(**PPO_kwargs)
svtr.PPO.actor = svtr.policy
Chem.MolFromSmiles('CCC.N')
ppo_test.inference(True)
torch.save({
'model_state_dict': svtr.policy.state_dict(),
'optimizer_state_dict': svtr.svw.optim.state_dict()
}, './test_1/ah')
svtr.policy.state_dict()
model = Spin2(54,300,17)
model.load_state_dict(svtr.policy.state_dict())
%run ChemEnv.ipynb
svtr = SupervisedToReinforcement('test',rewards_list,chem_env_kwargs,PPO_kwargs)
env = svtr.ChemEnv
svtr.PPO.inference()
torch.save(svtr.PPO.actor.state_dict(), './model')
env = svtr.ChemEnv
env.reset()
env.step(14)
env.step(17)
env.step(14)
env.StateSpace
(Chem.MolFromSmiles('NCc1cccc([SH]=O)c1', sanitize = True))
Chem.MolFromSmiles('Nc1cc2ccc1SSC(S)C2O.c1ccnc1', sanitize = False)
env.reset()
#env.StateSpace = Chem.RWMol(Chem.MolFromSmiles('Nc1cc2ccc1SSC(S)C2O.c1ccnc1', sanitize = False))
#env.step(16)
#env.addEdge(1,0)
env.addBenzine()
env.addEdge(1,0)
env.StateSpace
env.addPyrrole()
env.addEdge(1,11)
# env.StateSpace.RemoveAtom(17)
# env.StateSpace.RemoveAtom(16)
# env.StateSpace.RemoveAtom(15)
# env.StateSpace.RemoveAtom(14)
# env.StateSpace.RemoveAtom(13)
#Chem.SanitizeMol(env.StateSpace)
env.StateSpace
for atom in env.StateSpace.GetAtoms():
print(atom.GetDegree(),atom.GetSymbol(),atom.GetIsAromatic())
t_mol = Chem.RWMol(Chem.MolFromSmiles('FC(CBr)c1ccccc1',sanitize = True))
t_mol
env.reset()
env.addBenzine()
env.addEdge(2,0)
env.StateSpace
t_mol = Chem.RWMol(Chem.MolFromSmiles('FC(CBr)c1ccccc1',sanitize = True))
env = svtr.ChemEnv
env.reset()
env.StateSpace = t_mol
# env.StateSpace
env.addEdge(2,7)
env.StateSpace
env = svtr.ChemEnv
env.reset()
# env.addPyrrole()
env.addBenzine()
env.addEdge(1,2)
# env.addNode('C')
# env.addEdge(2,4)
#env.addNode('C')
#env.addEdge(1,3)
env.StateSpace
mol2 = SanitizeNoKEKU(mol2)
mol2
mol2 = Chem.RWMol(Chem.MolFromSmiles('O=CC(=Bc1ccccc1P)P(Br)c1ccccc1.[NaH]', sanitize = True))
mol1 = Chem.RWMol(Chem.MolFromSmiles('CC.c1ccnc1', sanitize = False))
mol2.UpdatePropertyCache()
#mol2.AddAtom(Chem.Atom('C'))
#mol2.AddBond(0,5,Chem.BondType.SINGLE)
# print(mol2.NeedsUpdatePropertyCache())
# mol2.UpdatePropertyCache()
Chem.SanitizeMol(mol2)
mol1.AddBond(0,5,Chem.BondType.SINGLE)
Chem.SanitizeMol(mol1)
mol2
for atom in mol2.GetAtoms():
print(atom.GetSymbol(),atom.GetImplicitValence())
SanitizeNoKEKU(mol2)
cycles = list(mol2.GetRingInfo().AtomRings())
for cycle in cycles:
for atom_idx in cycle:
bonds = mol2.GetAtomWithIdx(atom_idx).GetBonds()
for bond_x in bonds:
if bond_x.GetBondType() == Chem.BondType.DOUBLE:
print("fraraf")
for atom in mol2.GetAtoms():
atom.UpdatePropertyCache()
print(atom.GetExplicitValence())
for bond in atom.GetBonds():
print(bond.GetBondType())
#env.reset()
env.addPyrrole()
env.StateSpace
env.step(17)
mol = Chem.MolFromSmiles('n1cccc1', sanitize = False)
mol.UpdatePropertyCache()
for bond in mol.GetBonds():
print(bond.GetBondType())
mol
Chem.MolFromSmiles('[nH]1cccc1')
def SanitizeNoKEKU(mol):
s_dict = {'SANITIZE_ADJUSTHS': Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS,
'SANITIZE_ALL': Chem.rdmolops.SanitizeFlags.SANITIZE_ALL,
'SANITIZE_CLEANUP': Chem.rdmolops.SanitizeFlags.SANITIZE_CLEANUP,
'SANITIZE_CLEANUPCHIRALITY': Chem.rdmolops.SanitizeFlags.SANITIZE_CLEANUPCHIRALITY,
'SANITIZE_FINDRADICALS': Chem.rdmolops.SanitizeFlags.SANITIZE_FINDRADICALS,
'SANITIZE_KEKULIZE': Chem.rdmolops.SanitizeFlags.SANITIZE_KEKULIZE,
'SANITIZE_NONE': Chem.rdmolops.SanitizeFlags.SANITIZE_NONE,
'SANITIZE_PROPERTIES': Chem.rdmolops.SanitizeFlags.SANITIZE_PROPERTIES,
'SANITIZE_SETAROMATICITY': Chem.rdmolops.SanitizeFlags.SANITIZE_SETAROMATICITY,
'SANITIZE_SETCONJUGATION': Chem.rdmolops.SanitizeFlags.SANITIZE_SETCONJUGATION,
'SANITIZE_SETHYBRIDIZATION': Chem.rdmolops.SanitizeFlags.SANITIZE_SETHYBRIDIZATION,
'SANITIZE_SYMMRINGS': Chem.rdmolops.SanitizeFlags.SANITIZE_SYMMRINGS}
#mol = Chem.SanitizeMol(mol,s_dict['SANITIZE_KEKULIZE'])
mol = Chem.SanitizeMol(mol, s_dict['SANITIZE_ADJUSTHS'] | s_dict['SANITIZE_SETAROMATICITY'] |
s_dict['SANITIZE_CLEANUP'] | s_dict['SANITIZE_CLEANUPCHIRALITY'] |
s_dict['SANITIZE_FINDRADICALS'] | s_dict['SANITIZE_NONE'] |
s_dict['SANITIZE_PROPERTIES'] | s_dict['SANITIZE_SETCONJUGATION'] |
s_dict['SANITIZE_SETHYBRIDIZATION'] | s_dict['SANITIZE_SYMMRINGS']
)
return mol
True | False
mol = Chem.RWMol(Chem.MolFromSmiles('CC.c1ccnc1', sanitize = False))
#mol.AddBond(8,mol.GetNumAtoms()-1,Chem.BondType.SINGLE)
print(SanitizeNoKEKU(mol))
print(mol.GetAromaticAtoms().__len__())
mol
from rdkit import Chem
m = Chem.MolFromSmiles('CN(C)(C)C', sanitize=False)
problems = Chem.DetectChemistryProblems(m)
print(len(problems))
m
SanitizeNoKEKU(m)
Chem.SanitizeFlags.SANITIZE_ADJUSTHS
print(problems[0].GetType())
#print(problems[0].GetAtomIdx())
print(problems[0].Message())
Chem.MolFromSmiles('CN1C=CC=CC1=O')
Chem.MolFromSmiles('CN(C)(C)C', sanitize=False)
# wants: a single class for pretraining and rl training
# also want a singler logger for everything
# also should put in cross validation for the supervised portion
# means a logger instance in the init method
class SupervisedToReinforcement():
def __init__(self, PPO_env, PPO_Train_Steps, policy_model,rewards, run_title):
self.writer = SummaryWriter('./run_title')
self.reward_module = FinalRewardModule(sef.writer,rewards)
self.PPO_env = PPO_env
self.PPO_Train_Steps = PPO_Train_Steps
self.SV_trainer = Supervised_trainer(policy_model)
self.SV_trainer.writer = self.writer
self.PPO_env.env.RewardModule = self.reward_module
self.PPO_env.actor = self.policy_model
def Train():
sv_trainer.Train(20,16, 1,24)
self.PPO_env.learn(self.PPO_Train_Steps)
class AdversarialTraining():
def __init__(self, PPO_agent,Disc, epochs, G_steps,
D_steps, K, G_pretrain_steps, D_train_size,
D_batch_size,pre_train_env, smiles_values):
self.PPO_agent = PPO_agent
self.Disc = Disc
self.epochs = epochs
self.G_steps = G_steps
self.D_steps = D_steps
self.K = K
self.pre_train_env = pre_train_env
self.D_batch_size = D_batch_size
self.D_train_size = D_train_size
self.smiles_values = smiles_values
def mini_batch_reward_train(self, batch_size, num_batch):
for j in range(num_batch):
graphs = self.PPO_agent.generate_graphs(batch_size)
for model in self.reward_models:
model.TrainOnBatch(graphs)
def _preTrain(self):
env,batch_size,timesteps_per_batch,clip,a_lr,c_lr,
n_updates_per_iteration,max_timesteps_per_episode,gamma
t_dict = vars(self.PPO_agent)
PPO_agent_pre = PPO_MAIN(t_dict['env'],t_dict['batch_size'],t_dict['timesteps_per_batch'],
t_dict['clip'],t_dict['a_lr'], t_dict['c_lr'],
t_dict['n_updates_per_iteration'],t_dict['max_timesteps_per_episode'],
t_dict['gamma'])
PPO_agent_pre.learn(G_pretrain_steps)
self.PPO_agent.assignActor(PPO_agent_pre.actor)
def pull_real_samples(self, g_number):
graphs = smiles_to_graph([self.smiles_values[random.randint(0,len(self.smiles_values))] for _ in range(g_number)])
print(len(graphs), "graph len")
return graphs
def i_hate_python(self):
a = self.PPO_agent.generate_graphs(10)
def train(self, epochs):
self._preTrain()
for epoch in range(epochs):
print('G_train')
self.PPO_agent.learn(G_steps)
print('D_train')
for d_step in range(self.D_steps):
x_fake = self.PPO_agent.generate_graphs(self.D_steps)
x_real = self.pull_real_samples(self.D_train_size)
for k_step in range(self.K):
slices = list(range(0,self.D_train_size,self.D_batch_size)) + [self.D_train_size]
for idx in range(1,len(slices)):
slice_= slice(slices[idx-1],slices[idx])
print(slice_)
x_fake_batch = x_fake[slice_]
if x_fake_batch != []:
Y_fake_batch = torch.zeros(len(x_fake_batch),1)
x_real_batch = x_real[slice_]
Y_real_batch = torch.ones(len(x_real_batch),1)
self.Disc.train(x_fake_batch, Y_fake_batch)
self.Disc.train(x_real_batch,Y_real_batch)
```
|
github_jupyter
|
Lambda School Data Science, Unit 2: Predictive Modeling
# Applied Modeling, Module 3
### Objective
- Visualize and interpret partial dependence plots
### Links
- [Kaggle / Dan Becker: Machine Learning Explainability â Partial Dependence Plots](https://www.kaggle.com/dansbecker/partial-plots)
- [Christoph Molnar: Interpretable Machine Learning â Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) + [animated explanation](https://twitter.com/ChristophMolnar/status/1066398522608635904)
### Three types of model explanations this unit:
#### 1. Global model explanation:Â all features in relation to each other _(Last Week)_
- Feature Importances: _Default, fastest, good for first estimates_
- Drop-Column Importances: _The best in theory, but much too slow in practice_
- Permutaton Importances: _A good compromise!_
#### 2. Global model explanation:Â individual feature(s) in relation to target _(Today)_
- Partial Dependence plots
#### 3. Individual prediction explanation _(Tomorrow)_
- Shapley Values
_Note that the coefficients from a linear model give you all three types of explanations!_
### Setup
#### If you're using [Anaconda](https://www.anaconda.com/distribution/) locally
Install required Python packages, if you haven't already:
- [category_encoders](https://github.com/scikit-learn-contrib/categorical-encoding), version >= 2.0: `conda install -c conda-forge category_encoders` / `pip install category_encoders`
- [PDPbox](https://github.com/SauceCat/PDPbox): `pip install pdpbox`
- [Plotly](https://medium.com/plotly/plotly-py-4-0-is-here-offline-only-express-first-displayable-anywhere-fc444e5659ee), version >= 4.0
```
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python package:
# category_encoders, version >= 2.0
!pip install --upgrade category_encoders pdpbox plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Applied-Modeling.git
!git pull origin master
# Change into directory for module
os.chdir('module3')
```
## Lending Club: Predict interest rate
```
import pandas as pd
# Stratified sample, 10% of expired Lending Club loans, grades A-D
# Source: https://www.lendingclub.com/info/download-data.action
history_location = '../data/lending-club/lending-club-subset.csv'
history = pd.read_csv(history_location)
history['issue_d'] = pd.to_datetime(history['issue_d'], infer_datetime_format=True)
# Just use 36 month loans
history = history[history.term==' 36 months']
# Index & sort by issue date
history = history.set_index('issue_d').sort_index()
# Clean data, engineer feature, & select subset of features
history = history.rename(columns=
{'annual_inc': 'Annual Income',
'fico_range_high': 'Credit Score',
'funded_amnt': 'Loan Amount',
'title': 'Loan Purpose'})
history['Interest Rate'] = history['int_rate'].str.strip('%').astype(float)
history['Monthly Debts'] = history['Annual Income'] / 12 * history['dti'] / 100
columns = ['Annual Income',
'Credit Score',
'Loan Amount',
'Loan Purpose',
'Monthly Debts',
'Interest Rate']
history = history[columns]
history = history.dropna()
# Test on the last 10,000 loans,
# Validate on the 10,000 before that,
# Train on the rest
test = history[-10000:]
val = history[-20000:-10000]
train = history[:-20000]
# Assign to X, y
target = 'Interest Rate'
features = history.columns.drop('Interest Rate')
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
# The target has some right skew.
# It's not bad, but we'll log transform anyways
%matplotlib inline
import seaborn as sns
sns.distplot(y_train);
# Log transform the target
import numpy as np
y_train_log = np.log1p(y_train)
y_val_log = np.log1p(y_val)
y_test_log = np.log1p(y_test)
# Plot the transformed target's distribution
sns.distplot(y_train_log);
```
### Fit Linear Regression model, with original target
```
import category_encoders as ce
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
lr = make_pipeline(
ce.OrdinalEncoder(), # Not ideal for Linear Regression
StandardScaler(),
LinearRegression()
)
lr.fit(X_train, y_train)
print('Linear Regression R^2', lr.score(X_val, y_val))
```
### Fit Gradient Boosting model, with log transformed target
```
from xgboost import XGBRegressor
gb = make_pipeline(
ce.OrdinalEncoder(),
XGBRegressor(n_estimators=200, objective='reg:squarederror', n_jobs=-1)
)
gb.fit(X_train, y_train_log)
# print('Gradient Boosting R^2', gb.score(X_val, y_val_log))
# Convert back away from log space
```
### Explaining Linear Regression
```
example = X_val.iloc[[0]]
example
pred = lr.predict(example)[0]
print(f'Predicted Interest Rate: {pred:.2f}%')
def predict(model, example, log=False):
print('Vary income, hold other features constant', '\n')
example = example.copy()
preds = []
for income in range(20000, 200000, 20000):
example['Annual Income'] = income
pred = model.predict(example)[0]
if log:
pred = np.expm1(pred)
print(f'Predicted Interest Rate: {pred:.3f}%')
print(example.to_string(), '\n')
preds.append(pred)
print('Difference between predictions')
print(np.diff(preds))
predict(lr, example)
example2 = X_val.iloc[[2]]
predict(lr, example2);
```
### Explaining Gradient Boosting???
```
predict(gb, example, log=True)
predict(gb, example2, log=True)
```
## Partial Dependence Plots
From [PDPbox documentation](https://pdpbox.readthedocs.io/en/latest/):
>**The common headache**: When using black box machine learning algorithms like random forest and boosting, it is hard to understand the relations between predictors and model outcome. For example, in terms of random forest, all we get is the feature importance. Although we can know which feature is significantly influencing the outcome based on the importance calculation, it really sucks that we donât know in which direction it is influencing. And in most of the real cases, the effect is non-monotonic. We need some powerful tools to help understanding the complex relations between predictors and model prediction.
[Animation by Christoph Molnar](https://twitter.com/ChristophMolnar/status/1066398522608635904), author of [_Interpretable Machine Learning_](https://christophm.github.io/interpretable-ml-book/pdp.html#examples)
> Partial dependence plots show how a feature affects predictions of a Machine Learning model on average.
> 1. Define grid along feature
> 2. Model predictions at grid points
> 3. Line per data instance -> ICE (Individual Conditional Expectation) curve
> 4. Average curves to get a PDP (Partial Dependence Plot)
```
%matplotlib inline
import matplotlib.pyplot as plt
examples = pd.concat([example, example2])
for income in range(20000, 200000, 20000):
examples['Annual Income'] = income
preds_log = gb.predict(examples)
preds = np.expm1(preds_log)
for pred in preds:
plt.scatter(income, pred, color='grey')
plt.scatter(income, np.mean(preds), color='red')
```
## Partial Dependence Plots with 1 feature
#### PDPbox
- [Gallery](https://github.com/SauceCat/PDPbox#gallery)
- [API Reference: pdp_isolate](https://pdpbox.readthedocs.io/en/latest/pdp_isolate.html)
- [API Reference: pdp_plot](https://pdpbox.readthedocs.io/en/latest/pdp_plot.html)
```
# Later, when you save matplotlib images to include in blog posts or web apps,
# increase the dots per inch (double it), so the text isn't so fuzzy
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 72
```
#### You can customize it
PDPbox
- [API Reference: PDPIsolate](https://pdpbox.readthedocs.io/en/latest/PDPIsolate.html)
```
```
## Partial Dependence Plots with 2 features
See interactions!
PDPbox
- [Gallery](https://github.com/SauceCat/PDPbox#gallery)
- [API Reference: pdp_interact](https://pdpbox.readthedocs.io/en/latest/pdp_interact.html)
- [API Reference: pdp_interact_plot](https://pdpbox.readthedocs.io/en/latest/pdp_interact_plot.html)
Be aware of a bug in PDPBox version <= 0.20:
- With the `pdp_interact_plot` function, `plot_type='contour'` gets an error, but `plot_type='grid'` works
- This will be fixed in the next release of PDPbox: https://github.com/SauceCat/PDPbox/issues/40
```
```
### 3D with Plotly!
```
```
# Partial Dependence Plots with categorical features
1. I recommend you use Ordinal Encoder, outside of a pipeline, to encode your data first. Then use the encoded data with pdpbox.
2. There's some extra work to get readable category names on your plot, instead of integer category codes.
```
# Fit a model on Titanic data
import category_encoders as ce
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
df = sns.load_dataset('titanic')
df.age = df.age.fillna(df.age.median())
df = df.drop(columns='deck')
df = df.dropna()
target = 'survived'
features = df.columns.drop(['survived', 'alive'])
X = df[features]
y = df[target]
# Use Ordinal
encoder = ce.OrdinalEncoder()
X_encoded = encoder.fit_transform(X)
model = RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
model.fit(X_encoded, y)
# Use Pdpbox
%matplotlib inline
import matplotlib.pyplot as plt
from pdpbox import pdp
feature = 'sex'
pdp_dist = pdp.pdp_isolate(model=model, dataset=X_encoded, model_features=features, feature=feature)
pdp.pdp_plot(pdp_dist, feature);
# Look at the encoder's mappings
encoder.mapping
pdp.pdp_plot(pdp_dist, feature)
# Manually change the xticks labels
plt.xticks([1, 2], ['male', 'female']);
# Let's automate it
feature = 'sex'
for item in encoder.mapping:
if item['col'] == feature:
feature_mapping = item['mapping']
feature_mapping = feature_mapping[feature_mapping.index.dropna()]
category_names = feature_mapping.index.tolist()
category_codes = feature_mapping.values.tolist()
# Use Pdpbox
%matplotlib inline
import matplotlib.pyplot as plt
from pdpbox import pdp
feature = 'sex'
pdp_dist = pdp.pdp_isolate(model=model, dataset=X_encoded, model_features=features, feature=feature)
pdp.pdp_plot(pdp_dist, feature)
# Automatically change the xticks labels
plt.xticks(category_codes, category_names);
features = ['sex', 'age']
interaction = pdp_interact(
model=model,
dataset=X_encoded,
model_features=X_encoded.columns,
features=features
)
pdp_interact_plot(interaction, plot_type='grid', feature_names=features);
pdp = interaction.pdp.pivot_table(
values='preds',
columns=features[0], # First feature on x axis
index=features[1] # Next feature on y axis
)[::-1] # Reverse the index order so y axis is ascending
pdp = pdp.rename(columns=dict(zip(category_codes, category_names)))
plt.figure(figsize=(10,8))
sns.heatmap(pdp, annot=True, fmt='.2f', cmap='viridis')
plt.title('Partial Dependence of Titanic survival, on sex & age');
```
|
github_jupyter
|
# Obtaining movie data, API-testing
```
# open questions:
# API only allows 1k requests per day..
# initial load (static database) or load on request, maybe another API required then?
# regular updates?
import requests
import pandas as pd
```
# get imdb ids
```
# uses links.csv, a list of random imdbIds from https://grouplens.org/datasets/movielens/ , to obtain imdb ids,
# then loops through some of them and puts them into a list
def get_ids(n):
dtype_dic= {'movieId': str,'imdbId' : str, "tmdbId": str}
IDdf = pd.read_csv("data/temp_links.csv", dtype = dtype_dic)
#IMDB IDs to eventually be used as index
idlist = list(IDdf["imdbId"].head(n))
return idlist
imdbIDs = get_ids(500)
imdbIDs
```
# get data from omdb
```
# http://www.omdbapi.com/
# API-key d3de5220
# max # of requests per day ~ 1k
# Receiving data from API and putting it into df
def get_data_from_omdb(imdbIDs):
df0 = pd.DataFrame()
for id in imdbIDs:
url = f"http://www.omdbapi.com/?i=tt{id}&apikey=d3de5220"
result = requests.get(url)
j = result.json()
df_single_movie = pd.DataFrame(j)
df0 = pd.concat([df0, df_single_movie])
return df0
def perform_cleaning(df):
# turns date of release into date format
df["Released"] = pd.to_datetime(df["Released"])
#converting "xx mins" into "xx"
def get_mins(x):
y = x.replace(" min", "")
return y
df["Runtime"] = df["Runtime"].apply(get_mins)
df["Runtime"] = pd.to_numeric(df["Runtime"])
# drops duplicates, for some reason same movie appears always three times in df when converting json file...
df0 = df.drop_duplicates("imdbID", keep = "first", inplace = False)
return df0
df_raw = get_data_from_omdb(imdbIDs)
df = df_raw.copy()
df = perform_cleaning(df_raw)
df.to_csv("data/OMDB.csv", index = False)
```
# "parked" code for now
```
#df0 = pd.read_csv("data/OMDB.csv")
#df0.info()
#df = pd.read_csv("data/OMDB.csv", dtype = df_dtypes)
#df.head(3)
# provide a list of datatypes that the columns shall have --> leading zeros?
'''
df_columns = ["Title", 'Year', 'Rated', 'Released', 'Runtime', 'Genre', 'Director','Writer','Actors','Plot','Language', \
'Country','Awards', 'Poster', 'Ratings','Metascore','imdbRating','imdbVotes','imdbID','Type','DVD',\
'BoxOffice','Production','Website','Response']
df_dtypes = {'Title': str,'Year' : int, "Rated": str, "Released" : str, "Runtime": int, "Genre": str, "Director": str, \
"Writer": str, "Actors": str, "Plot": str, "Language": str, "Country": str, "Awards": str, "Poster": str, \
"Ratings": str, "Metascore": int, "imdbRating": str, "imdbVotes": str, "imdbID": str, "Type": str, \
"DVD": str, "BoxOffice": str, "Production": str, "Website": str, "Response": str}
'''
```
|
github_jupyter
|
# MLP ORF to GenCode
Use GenCode 38 and length-restricted data.
Use model pre-trained on Simulated ORF.
```
import time
def show_time():
t = time.time()
print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)))
show_time()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Flatten,TimeDistributed
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import ORF_counter
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py')
with open('GenCodeTools.py', 'w') as f:
f.write(r.text)
from GenCodeTools import GenCodeLoader
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py')
with open('KmerTools.py', 'w') as f:
f.write(r.text)
from KmerTools import KmerTools
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/DataPrep.py')
with open('DataPrep.py', 'w') as f:
f.write(r.text)
from DataPrep import DataPrep
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter
from SimTools.GenCodeTools import GenCodeLoader
from SimTools.KmerTools import KmerTools
from SimTools.DataPrep import DataPrep
BESTMODELPATH=DATAPATH+"BestModel-304"
LASTMODELPATH=DATAPATH+"LastModel"
```
## Data Load
```
PC_TRAINS=1000
NC_TRAINS=1000
PC_TESTS=40000
NC_TESTS=40000
PC_LENS=(200,4000)
NC_LENS=(200,4000) # Wen used 3500 for hyperparameter, 3000 for train
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
MAX_K = 3
INPUT_SHAPE=(None,84) # 4^3 + 4^2 + 4^1
NEURONS=32
DROP_RATE=0.30
EPOCHS=200
SPLITS=3
FOLDS=3
show_time()
loader=GenCodeLoader()
loader.set_label(1)
loader.set_check_utr(False) # not ORF-restricted
loader.set_check_size(*PC_LENS) # length-restricted
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
loader.set_label(0)
loader.set_check_utr(False)
loader.set_check_size(*NC_LENS) # length-restricted
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
show_time()
def dataframe_extract_sequence(df):
return df['sequence'].tolist()
pc_all = dataframe_extract_sequence(pcdf)
nc_all = dataframe_extract_sequence(ncdf)
pcdf=None
ncdf=None
show_time()
print("PC seqs pass filter:",len(pc_all),type(pc_all))
print("NC seqs pass filter:",len(nc_all),type(nc_all))
#PC seqs pass filter: 55381
#NC seqs pass filter: 46919
print("Simulated sequence characteristics:")
oc = ORF_counter()
print("PC seqs")
oc.describe_sequences(pc_all)
print("NC seqs")
oc.describe_sequences(nc_all)
oc=None
show_time()
```
## Data Prep
```
dp = DataPrep()
Xseq,y=dp.combine_pos_and_neg(pc_all,nc_all)
nc_all=None
pc_all=None
nc_all=None
print("The first few shuffled labels:")
print(y[:30])
show_time()
Xfrq=KmerTools.seqs_to_kmer_freqs(Xseq,MAX_K)
Xseq = None
y=np.asarray(y)
show_time()
# Assume X and y were shuffled.
train_size=PC_TRAINS+NC_TRAINS
X_train=Xfrq[:train_size]
X_test=Xfrq[train_size:]
y_train=y[:train_size]
y_test=y[train_size:]
print("Training set size=",len(X_train),"=",len(y_train))
print("Reserved test set size=",len(X_test),"=",len(y_test))
Xfrq=None
y=None
show_time()
```
## Load a trained neural network
```
show_time()
model = load_model(BESTMODELPATH)
print(model.summary())
```
## Test the neural network
```
def show_test_AUC(model,X,y):
ns_probs = [0 for _ in range(len(y))]
bm_probs = model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
def show_test_accuracy(model,X,y):
scores = model.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("Accuracy on test data.")
show_time()
show_test_AUC(model,X_test,y_test)
show_test_accuracy(model,X_test,y_test)
show_time()
```
|
github_jupyter
|
# Step 1: Data gathering
__Step goal__: Download and store the datasets used in this study.
__Step overview__:
1. London demographic data;
2. London shape files;
3. Counts data;
4. Metro stations and lines.
#### Introduction
All data is __open access__ and can be found on the official websites. Note, that the data sets can be updated by corresponding agencies; therefore, some discrepancies are possible: new variables will become available, or some data set will have fewer attributes.
```
import requests, zipfile, io
from datetime import datetime
import os
import pandas as pd
from bs4 import BeautifulSoup as bs
```
## 1. London demographic data
```
url = 'https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fcensusoutputareaestimatesinthelondonregionofengland%2fmid2017/sape20dt10amid2017coaunformattedsyoaestimateslondon.zip'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
directory = "../data/raw/population/"
if not os.path.exists(directory):
print(f'Succefully created new directory {directory}')
os.makedirs(directory)
z.extractall(path=directory)
print(f'Downloading date: {datetime.today().strftime("%d-%m-%Y %H:%M:%S")}')
```
## 2. London shape files
```
url = 'https://data.london.gov.uk/download/statistical-gis-boundary-files-london/9ba8c833-6370-4b11-abdc-314aa020d5e0/statistical-gis-boundaries-london.zip'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
directory = "../data/raw/geometry/london/"
if not os.path.exists(directory):
print(f'Succefully created new directory {directory}')
os.makedirs(directory)
z.extractall(path=directory)
print(f'Downloading date: {datetime.today().strftime("%d-%m-%Y %H:%M:%S")}')
```
## 3. Counts data
```
url = 'http://tfl.gov.uk/tfl/syndication/feeds/counts.zip?app_id=&app_key='
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
directory = "../data/raw/counts/"
if not os.path.exists(directory):
print(f'Succefully created new directory {directory}')
os.makedirs(directory)
z.extractall(path=directory)
print(f'Downloading date: {datetime.today().strftime("%d-%m-%Y %H:%M:%S")}')
```
## 4. Station locations ans lines
```
url = 'https://commons.wikimedia.org/wiki/London_Underground_geographic_maps/CSV'
r = requests.get(url)
soup = bs(r.content, 'lxml')
pre = soup.select('pre')
file_names = ['stations.csv', 'routes.csv', 'lines.csv']
directory = "../data/raw/geometry/metro_stations/"
if not os.path.exists(directory):
print(f'Succefully created new directory {directory}')
os.makedirs(directory)
for i, p in enumerate(pre):
df = pd.DataFrame([x.split(',') for x in p.text.split('\n')])
df.to_csv(directory + file_names[i])
print(f'Downloading date: {datetime.today().strftime("%d-%m-%Y %H:%M:%S")}')
```
## References
1. Office for National Statistics (2019). Census Output Area population estimates â London, England (supporting information). Retrieved from https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/censusoutputareaestimatesinthelondonregionofengland
2. London Datastore (2019). Statistical GIS Boundary Files for London. Retrieved from https://data.london.gov.uk/dataset/statistical-gis-boundary-files-london
3. Transport for London (2020). Transport for London API. Retrieved from https://api-portal.tfl.gov.uk/docs
4. Wikimedia Commons (2020). London Underground geographic maps/CSV. Retrieved from https://commons.wikimedia.org/wiki/London_Underground_geographic_maps/CSV
|
github_jupyter
|
<h1 align="center">Exploratory Analysis : Game of Thrones</h1>

One of the most popular television series of all time, Game of Thrones is a fantasy drama set in fictional continents of Westeros and Essos filled with multiple plots and a huge number of characters all battling for the Iron Throne! It is an adaptation of _Song of Ice and Fire_ novel series by **George R. R. Martin**.
Being a popular series, it has caught the attention of many, and Data Scientists aren't to be excluded. This notebook presents **Exploratory Data Analysis (EDA)** on the _Kaggle_ dataset enhanced by _Myles O'Neill_ (more details: [click here](https://www.kaggle.com/mylesoneill/game-of-thrones)). This dataset is based on a combination of multiple datasets collected and contributed by multiple people. We utilize the ```battles.csv``` in this notebook. The original battles data was presented by _Chris Albon_, more details are on [github](https://github.com/chrisalbon/war_of_the_five_kings_dataset)
---
The image was taken from Game of Thrones, or from websites created and owned by HBO, the copyright of which is held by HBO. All trademarks and registered trademarks present in the image are proprietary to HBO, the inclusion of which implies no affiliation with the Game of Thrones. The use of such images is believed to fall under the fair dealing clause of copyright law.
## Import required packages
```
import cufflinks as cf
import pandas as pd
from collections import Counter
# pandas display data frames as tables
from IPython.display import display, HTML
```
### Set Configurations
```
cf.set_config_file(theme='white')
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
```
## Load Dataset
In this step we load the ```battles.csv``` for analysis
```
# load dataset using cufflinks wrapper for later usage with plot.ly plots
battles_df = cf.pd.read_csv('battles.csv')
# Display sample rows
display(battles_df.head())
```
## Explore raw properties
```
print("Number of attributes available in the dataset = {}".format(battles_df.shape[1]))
# View available columns and their data types
battles_df.dtypes
```
<h3 align="center">Battles for the Iron Throne</h3>

```
# Analyze properties of numerical columns
battles_df.describe()
```
---
## Number of Battles Fought
This data is till **season 5** only
```
print("Number of battles fought={}".format(battles_df.shape[0]))
```
## Battle Distribution Across Years
The plot below shows that maximum bloodshed happened in the year 299 with a total of 20 battles fought!
```
battles_df.year.value_counts().iplot(kind='barh',
xTitle='Number of Battles',
yTitle='Year',
title='Battle Distribution over Years',
showline=True)
```
## Which Regions saw most Battles?
<img src="https://racefortheironthrone.files.wordpress.com/2016/11/riverlands-political-map.jpg?w=580&h=781" alt="RiverLands" style="width: 200px;" align="left"/> **Riverland**s seem to be the favorite battle ground followed by the famous **The North**. Interestingly, till season 5, there was only 1 battle beyond the wall. Spoiler Alert: Winter is Coming!
```
battles_df.region.value_counts().iplot(kind='bar',
xTitle='Regions',
yTitle='Number of Battles',
title='Battles by Regions',
showline=True)
```
### Death or Capture of Main Characters by Region
No prizes for guessing that Riverlands have seen some of the main characters being killed or captured. Though _The Reach_ has seen 2 battles, none of the major characters seemed to have fallen there.
```
battles_df.groupby('region').agg({'major_death':'sum',
'major_capture':'sum'}).iplot(kind='bar')
```
## Who Attacked the most?
The Baratheon boys love attacking as they lead the pack with 38% while Rob Stark has been the attacker in close second with 27.8% of the battles.
<img src="http://vignette3.wikia.nocookie.net/gameofthrones/images/4/4c/JoffreyBaratheon-Profile.PNG/revision/latest?cb=20160626094917" alt="joffrey" style="width: 200px;" align="left"/> <img src="https://meninblazers.com/.image/t_share/MTMwMDE5NTU4NTI5NDk1MDEw/tumblr_mkzsdafejy1r2xls3o1_400.png" alt="robb" style="width: 200px; height: 200px" align="right"/>
```
king_attacked = battles_df.attacker_king.value_counts().reset_index()
king_attacked.rename(columns={'index':'king','attacker_king':'battle_count'},inplace=True)
king_attacked.iplot(kind='pie',labels='king',values='battle_count')
```
## Who Defended the most?
Rob Stark and Baratheon boys are again on the top of the pack. Looks like they have been on either sides of the war lot many times.
```
king_defended = battles_df.defender_king.value_counts().reset_index()
king_defended.rename(columns={'index':'king','defender_king':'battle_count'},inplace=True)
king_defended.iplot(kind='pie',labels='king',values='battle_count')
```
## Battle Style Distribution
Plenty of battles all across, yet the men of Westeros and Essos are men of honor.
This is visible in the distribution which shows **pitched battle** as the most common style of battle.
```
battles_df.battle_type.value_counts().iplot(kind='barh')
```
## Attack or Defend?
Defending your place in Westeros isn't easy, this is clearly visible from the fact that 32 out of 37 battles were won by attackers
```
battles_df.attacker_outcome.value_counts().iplot(kind='barh')
```
## Winners
Who remembers losers? (except if you love the Starks)
The following plot helps us understand who won how many battles and how, by attacking or defending.
```
attack_winners = battles_df[battles_df.attacker_outcome=='win']['attacker_king'].value_counts().reset_index()
attack_winners.rename(columns={'index':'king','attacker_king':'attack_wins'},inplace=True)
defend_winners = battles_df[battles_df.attacker_outcome=='loss']['defender_king'].value_counts().reset_index()
defend_winners.rename(columns={'index':'king','defender_king':'defend_wins'},inplace=True)
winner_df = pd.merge(attack_winners,defend_winners,how='outer',on='king')
winner_df.fillna(0,inplace=True)
winner_df['total_wins'] = winner_df.apply(lambda row: row['attack_wins']+row['defend_wins'],axis=1)
winner_df[['king','attack_wins','defend_wins']].set_index('king').iplot(kind='bar',barmode='stack',
xTitle='King',
yTitle='Number of Wins',
title='Wins per King',
showline=True)
```
## Battle Commanders
A battle requires as much brains as muscle power.
The following is a distribution of the number of commanders involved on attacking and defending sides.
```
battles_df['attack_commander_count'] = battles_df.dropna(subset=['attacker_commander']).apply(lambda row: len(row['attacker_commander'].split()),axis=1)
battles_df['defend_commander_count'] = battles_df.dropna(subset=['defender_commander']).apply(lambda row: len(row['defender_commander'].split()),axis=1)
battles_df[['attack_commander_count',
'defend_commander_count']].iplot(kind='box',boxpoints='suspectedoutliers')
```
## How many houses fought in a battle?
Were the battles evenly balanced? The plots tell the whole story.
<img src="https://c1.staticflickr.com/4/3893/14834104277_54d309b4ca_b.jpg" style="height: 200px;"/>
```
battles_df['attacker_house_count'] = (4 - battles_df[['attacker_1',
'attacker_2',
'attacker_3',
'attacker_4']].isnull().sum(axis = 1))
battles_df['defender_house_count'] = (4 - battles_df[['defender_1',
'defender_2',
'defender_3',
'defender_4']].isnull().sum(axis = 1))
battles_df['total_involved_count'] = battles_df.apply(lambda row: row['attacker_house_count']+row['defender_house_count'],
axis=1)
battles_df['bubble_text'] = battles_df.apply(lambda row: '{} had {} house(s) attacking {} house(s) '.format(row['name'],
row['attacker_house_count'],
row['defender_house_count']),
axis=1)
```
## Unbalanced Battles
Most battles so far have seen more houses forming alliances while attacking.
There are only a few friends when you are under attack!
```
house_balance = battles_df[battles_df.attacker_house_count != battles_df.defender_house_count][['name',
'attacker_house_count',
'defender_house_count']].set_index('name')
house_balance.iplot(kind='bar',tickangle=-25)
```
## Battles and The size of Armies
Attackers don't take any chances, they come in huge numbers, keep your eyes open
```
battles_df.dropna(subset=['total_involved_count',
'attacker_size',
'defender_size',
'bubble_text']).iplot(kind='bubble',
x='defender_size',
y='attacker_size',
size='total_involved_count',
text='bubble_text',
#color='red',
xTitle='Defender Size',
yTitle='Attacker Size')
```
## Archenemies?
The Stark-Baratheon friendship has taken a complete U-turn with a total of 19 battles and counting. Indeed there is no one to be trusted in this land.
```
temp_df = battles_df.dropna(subset = ["attacker_king",
"defender_king"])[[
"attacker_king",
"defender_king"
]]
archenemy_df = pd.DataFrame(list(Counter([tuple(set(king_pair))
for king_pair in temp_df.values
if len(set(king_pair))>1]).items()),
columns=['king_pair','battle_count'])
archenemy_df['versus_text'] = archenemy_df.apply(lambda row:
'{} Vs {}'.format(
row['king_pair'][0],
row['king_pair'][1]),
axis=1)
archenemy_df.sort_values('battle_count',
inplace=True,
ascending=False)
archenemy_df[['versus_text',
'battle_count']].set_index('versus_text').iplot(
kind='bar')
```
---
Note: A lot more exploration is possible with the remaining attributes and their different combinations. This is just tip of the iceberg
|
github_jupyter
|
# HyperEuler on MNIST-trained Neural ODEs
```
import sys ; sys.path.append('..')
from torchdyn.models import *; from torchdyn import *
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.metrics.functional import accuracy
from tqdm import tqdm_notebook as tqdm
from src.custom_fixed_explicit import ButcherTableau, GenericExplicitButcher
from src.hypersolver import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# smaller batch_size; only needed for visualization. The classification model
# will not be retrained
batch_size=16
size=28
path_to_data='../../data/mnist_data'
all_transforms = transforms.Compose([
transforms.RandomRotation(20),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
train_data = datasets.MNIST(path_to_data, train=True, download=True,
transform=all_transforms)
test_data = datasets.MNIST(path_to_data, train=False,
transform=test_transforms)
trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
testloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
```
## Loading the pretrained Neural ODE
```
func = nn.Sequential(nn.Conv2d(32, 46, 3, padding=1),
nn.Softplus(),
nn.Conv2d(46, 46, 3, padding=1),
nn.Softplus(),
nn.Conv2d(46, 32, 3, padding=1)
).to(device)
ndes = []
for i in range(1):
ndes.append(NeuralDE(func,
solver='dopri5',
sensitivity='adjoint',
atol=1e-4,
rtol=1e-4,
s_span=torch.linspace(0, 1, 2)).to(device))
#ndes.append(nn.Conv2d(32, 32, 3, padding=1)))
model = nn.Sequential(nn.BatchNorm2d(1),
Augmenter(augment_func=nn.Conv2d(1, 31, 3, padding=1)),
*ndes,
nn.AvgPool2d(28),
#nn.Conv2d(32, 1, 3, padding=1),
nn.Flatten(),
nn.Linear(32, 10)).to(device)
state_dict = torch.load('../pretrained_models/nde_mnist')
# remove state_dict keys for `torchdyn`'s Adjoint nn.Module (not used here)
copy_dict = state_dict.copy()
for key in copy_dict.keys():
if 'adjoint' in key: state_dict.pop(key)
model.load_state_dict(state_dict)
```
### Visualizing pretrained flows
```
x, y = next(iter(trainloader)); x = x.to(device)
for layer in model[:2]: x = layer(x)
model[2].nfe = 0
traj = model[2].trajectory(x, torch.linspace(0, 1, 50)).detach().cpu()
model[2].nfe
```
Pixel-flows of the Neural ODE, solved with `dopri5`
```
fig, axes = plt.subplots(nrows=5, ncols=10, figsize=(22, 10))
K = 4
for i in range(5):
for j in range(10):
im = axes[i][j].imshow(traj[i*5+j, K, 0], cmap='inferno')
fig.tight_layout(w_pad=0)
```
### Defining the HyperSolver class (-- HyperEuler version --)
```
tableau = ButcherTableau([[0]], [1], [0], [])
euler_solver = GenericExplicitButcher(tableau)
hypersolv_net = nn.Sequential(
nn.Conv2d(32+32+1, 32, 3, stride=1, padding=1),
nn.PReLU(),
nn.Conv2d(32, 32, 3, padding=1),
nn.PReLU(),
nn.Conv2d(32, 32, 3, padding=1)).to(device)
#for p in hypersolv_net.parameters(): torch.nn.init.zeros_(p)
hs = HyperEuler(f=model[2].defunc, g=hypersolv_net)
x0 = torch.zeros(12, 32, 6, 6).to(device)
span = torch.linspace(0, 2, 10).to(device)
traj = model[2].trajectory(x0, span)
res_traj = hs.base_residuals(traj, span)
hyp_res_traj = hs.hypersolver_residuals(traj, span)
hyp_traj = hs.odeint(x0, span)
hyp_traj = hs.odeint(x0, span, use_residual=False).detach().cpu()
etraj = odeint(model[2].defunc, x0, span, method='euler').detach().cpu()
(hyp_traj - etraj).max()
```
### Training the Hypersolver
```
PHASE1_ITERS = 10 # num iters without swapping of the ODE initial condition (new sample)
ITERS = 15000
s_span = torch.linspace(0, 1, 10).to(device)
run_loss = 0.
# using test data for hypersolver training does not cause issues
# or task information leakage; the labels are not utilized in any way
it = iter(trainloader)
X0, Y = next(it)
Y = Y.to(device)
X0 = model[:2](X0.to(device))
model[2].solver = 'dopri5'
traj = model[2].trajectory(X0, s_span)
etraj = odeint(model[2].defunc, X0, s_span, method='euler')
opt = torch.optim.AdamW(hypersolv_net.parameters(), 1e-3, weight_decay=1e-8)
sched = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=ITERS, eta_min=5e-4)
for i in tqdm(range(ITERS)):
ds = s_span[1] - s_span[0]
base_traj = model[2].trajectory(X0, s_span)
residuals = hs.base_residuals(base_traj, s_span).detach()
# Let the model generalize to other ICs after PHASE1_ITERS
if i > PHASE1_ITERS:
if i % 10 == 0: # swapping IC
try:
X0, _ = next(it)
except:
it = iter(trainloader)
X0, _ = next(it)
X0 = model[:2](X0.to(device))
model[2].solver = 'dopri5'
base_traj = model[2].trajectory(X0, s_span)
residuals = hs.base_residuals(base_traj.detach(), s_span).detach()
corrections = hs.hypersolver_residuals(base_traj.detach(), s_span)
loss = torch.norm(corrections - residuals.detach(), p='fro', dim=(3, 4)).mean() * ds**2
loss.backward()
torch.nn.utils.clip_grad_norm_(hypersolv_net.parameters(), 1)
if i % 10 == 0: print(f'\rLoss: {loss}', end='')
opt.step()
sched.step()
opt.zero_grad()
it = iter(testloader)
X0, _ = next(it)
X0 = model[:2](X0.to(device))
steps = 10
s_span = torch.linspace(0, 1, steps)
# dopri traj
model[2].solver = 'dopri5'
traj = model[2].trajectory(X0, s_span).detach().cpu()
# euler traj
model[2].solver = 'euler'
etraj = model[2].trajectory(X0, s_span).detach().cpu()
#etraj = hs.odeint(X0, s_span, use_residual=False).detach().cpu()
straj = hs.odeint(X0, s_span, use_residual=True).detach().cpu()
```
Evolution of absolute error: [Above] HyperEuler, [Below] Euler
```
fig, axes = plt.subplots(nrows=2, ncols=steps-1, figsize=(10, 4))
K = 1
vmin = min(torch.abs(straj[steps-1,:]-traj[steps-1,:]).mean(1)[K].min(),
torch.abs(etraj[steps-1,:]-traj[steps-1,:]).mean(1)[K].min())
vmax = max(torch.abs(straj[steps-1,:]-traj[steps-1,:]).mean(1)[K].max(),
torch.abs(etraj[steps-1,:]-traj[steps-1,:]).mean(1)[K].max())
for i in range(steps-1):
im = axes[0][i].imshow(torch.abs(straj[i+1,:]-traj[i+1,:]).mean(1)[K], cmap='inferno', vmin=vmin, vmax=vmax)
for i in range(steps-1):
im = axes[1][i].imshow(torch.abs(etraj[i+1,:]-traj[i+1,:]).mean(1)[K], cmap='inferno', vmin=vmin, vmax=vmax)
fig.colorbar(im, ax=axes.ravel().tolist(), orientation='horizontal')
#tikz.save('MNIST_interpolation_AE_plot.tex')
```
Evolution of absolute error: HyperEuler (alone). Greater detail
```
fig, axes = plt.subplots(nrows=1, ncols=steps-1, figsize=(10, 4))
for i in range(steps-1):
im = axes[i].imshow(torch.abs(straj[i+1,:]-traj[i+1,:]).mean(1)[K], cmap='inferno')
fig.colorbar(im, ax=axes.ravel().tolist(), orientation='horizontal')
```
### Evaluating ODE solution error
```
x = []
# NOTE: high GPU mem usage for generating data below for plot (on GPU)
# consider using less batches (and iterating) or performing everything on CPU
for i in range(5):
x_b, _ = next(it)
x += [model[:2](x_b.to(device))]
x = torch.cat(x); x.shape
STEPS = range(8, 50)
euler_avg_error, euler_std_error = [], []
hyper_avg_error, hyper_std_error = [], []
midpoint_avg_error, midpoint_std_error = [], []
rk4_avg_error, rk4_std_error = [], []
for step in tqdm(STEPS):
s_span = torch.linspace(0, 1, step)
# dopri traj
model[2].solver = 'dopri5'
traj = model[2].trajectory(x, s_span).detach().cpu()
# euler traj
model[2].solver = 'euler'
etraj = model[2].trajectory(x, s_span).detach().cpu()
# hypersolver
s_span = torch.linspace(0, 1, step)
straj = hs.odeint(x, s_span, use_residual=True).detach().cpu()
#midpoint
model[2].solver = 'midpoint'
s_span = torch.linspace(0, 1, step//2)
mtraj = model[2].trajectory(x, s_span).detach().cpu()
#midpoint
model[2].solver = 'rk4'
s_span = torch.linspace(0, 1, step//4)
rtraj = model[2].trajectory(x, s_span).detach().cpu()
# errors
euler_error = torch.abs((etraj[-1].detach().cpu() - traj[-1].detach().cpu()) / traj[-1].detach().cpu()).sum(1)
hyper_error = torch.abs((straj[-1].detach().cpu() - traj[-1].detach().cpu()) / traj[-1].detach().cpu()).sum(1)
midpoint_error = torch.abs((mtraj[-1].detach().cpu() - traj[-1].detach().cpu()) / traj[-1].detach().cpu()).sum(1)
rk4_error = torch.abs((rtraj[-1].detach().cpu() - traj[-1].detach().cpu()) / traj[-1].detach().cpu()).sum(1)
# mean, stdev
euler_avg_error += [euler_error.mean().item()] ; euler_std_error += [euler_error.mean(dim=1).mean(dim=1).std(0).item()]
hyper_avg_error += [hyper_error.mean().item()] ; hyper_std_error += [hyper_error.mean(dim=1).mean(dim=1).std(0).item()]
midpoint_avg_error += [midpoint_error.mean().item()] ; midpoint_std_error += [midpoint_error.mean(dim=1).mean(dim=1).std(0).item()]
rk4_avg_error += [rk4_error.mean().item()] ; rk4_std_error += [rk4_error.mean(dim=1).mean(dim=1).std(0).item()]
euler_avg_error, euler_std_error = np.array(euler_avg_error), np.array(euler_std_error)
hyper_avg_error, hyper_std_error = np.array(hyper_avg_error), np.array(hyper_std_error)
midpoint_avg_error, midpoint_std_error = np.array(midpoint_avg_error), np.array(midpoint_std_error)
rk4_avg_error, rk4_std_error = np.array(rk4_avg_error), np.array(rk4_std_error)
range_steps = range(8, 50, 1)
fig, ax = plt.subplots(1, 1); fig.set_size_inches(8, 3)
ax.plot(range_steps, euler_avg_error, color='red', linewidth=3, alpha=0.5)
ax.fill_between(range_steps, euler_avg_error-euler_std_error, euler_avg_error+euler_std_error, alpha=0.05, color='red')
ax.plot(range_steps, hyper_avg_error, c='black', linewidth=3, alpha=0.5)
ax.fill_between(range_steps, hyper_avg_error+hyper_std_error, hyper_avg_error-hyper_std_error, alpha=0.05, color='black')
# start from 10 steps, balance the steps
mid_range_steps = range(8, 50, 2)
ax.plot(mid_range_steps, midpoint_avg_error[::2], color='green', linewidth=3, alpha=0.5)
ax.fill_between(mid_range_steps, midpoint_avg_error[::2]-midpoint_std_error[::2], midpoint_avg_error[::2]+midpoint_std_error[::2], alpha=0.1, color='green')
# start from 10 steps, balance the steps
mid_range_steps = range(8, 50, 4)
ax.plot(mid_range_steps, rk4_avg_error[::4], color='gray', linewidth=3, alpha=0.5)
ax.fill_between(mid_range_steps, rk4_avg_error[::4]-rk4_std_error[::4], rk4_avg_error[::4]+rk4_std_error[::4], alpha=0.05, color='gray')
ax.set_ylim(0, 200)
ax.set_xlim(8, 40)
ax.legend(['Euler', 'HyperEuler', 'Midpoint', 'RK4'])
ax.set_xlabel('NFEs')
ax.set_ylabel('Terminal error (MAPE)')
```
|
github_jupyter
|
```
import numpy as np
import tensorflow as tf
import random as rn
import os
import matplotlib.pyplot as plt
%matplotlib inline
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(1)
rn.seed(1)
from keras import backend as K
tf.compat.v1.set_random_seed(1)
#sess = tf.Session(graph=tf.get_default_graph())
#K.set_session(sess)
import sys
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Dropout, Flatten
from keras.layers import Conv1D,MaxPooling1D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.optimizers import RMSprop
import keras.regularizers
import scipy
import math
import sys
import pandas as pd
from scipy.ndimage.filters import gaussian_filter1d
from sklearn.metrics import mean_squared_error
from scipy.stats import linregress
from scipy import interpolate
from scipy import signal
import pickle
from video_process_utils import *
import collections
target_col = "SEMLS_dev_residual"
#assign train/validation/test ids
alldata_processed =\
pd.read_csv("./data/processed/alldata_processed_with_dev_residual.csv" )
alldata_processed['videoid'] = alldata_processed['videoid'].apply(lambda x: int(x))
alldata_processed = alldata_processed[alldata_processed[target_col].notnull()]
alldata_processed = alldata_processed.groupby(['videoid','side']).head(1)
ids_nonmissing_target = set(alldata_processed['videoid'].unique())
datasplit_df = pd.read_csv('./data/processed/train_test_valid_id_split.csv')
datasplit_df['videoid'] = datasplit_df['videoid'].apply(lambda x: int(x))
all_ids = set(datasplit_df['videoid']).intersection(ids_nonmissing_target)
train_ids = set(datasplit_df[datasplit_df['dataset'] == 'train']['videoid']).intersection(ids_nonmissing_target)
validation_ids = set(datasplit_df[datasplit_df['dataset'] == 'validation']['videoid']).intersection(ids_nonmissing_target)
test_ids = set(datasplit_df[datasplit_df['dataset'] == 'test']['videoid']).intersection(ids_nonmissing_target)
with open('./data/processed/all_processed_video_segments.pickle', 'rb') as handle:
processed_video_segments = pickle.load(handle)
x_columns = [2*LANK,2*LANK+1,2*LKNE,2*LKNE+1,
2*LHIP,2*LHIP+1,2*LBTO,2*LBTO+1,
2*RANK,2*RANK+1,2*RKNE,2*RKNE+1,
2*RHIP,2*RHIP+1,2*RBTO,2*RBTO+1,50,51,52,53,54,55,56]
target_dict = {}
for i in range(len(alldata_processed)):
row = alldata_processed.iloc[i]
target_dict[row['videoid']] = row[target_col]
if target_col == "gmfcs":
processed_video_segments = list(filter(lambda x: target_dict[x[0]] in range(1,6), processed_video_segments))
X = [t[2] for t in processed_video_segments if t[0] in all_ids]
X = np.stack(X)[:,:,x_columns]
y = np.array([target_dict[t[0]] for t in processed_video_segments if t[0] in all_ids])
X_train = [t[2] for t in processed_video_segments if t[0] in train_ids]
X_train = np.stack(X_train)[:,:,x_columns]
X_validation = [t[2] for t in processed_video_segments if t[0] in validation_ids]
X_validation = np.stack(X_validation)[:,:,x_columns]
y_train = np.array([target_dict[t[0]] for t in processed_video_segments if t[0] in train_ids])
y_validation = np.array([target_dict[t[0]] for t in processed_video_segments if t[0] in validation_ids])
videoid_count_dict = collections.Counter(np.array([t[0] for t in processed_video_segments]))
train_videoid_weights = [1./videoid_count_dict[t[0]] for t in processed_video_segments if t[0] in train_ids]
train_videoid_weights = np.array(train_videoid_weights).reshape(-1,1)
validation_videoid_weights = [1./videoid_count_dict[t[0]] for t in processed_video_segments if t[0] in validation_ids]
validation_videoid_weights = np.array(validation_videoid_weights).reshape(-1,1)
target_min = np.min(y_train,axis=0)
target_range = np.max(y_train,axis=0) - np.min(y_train,axis=0)
print(target_min, target_range)
y_train_scaled = ((y_train-target_min)/target_range).reshape(-1,1)
y_validation_scaled = ((y_validation-target_min)/target_range).reshape(-1,1)
y_validation_scaled = np.hstack([y_validation_scaled,validation_videoid_weights])
y_train_scaled = np.hstack([y_train_scaled,train_videoid_weights])
c_i_factor = np.mean(np.vstack([train_videoid_weights,validation_videoid_weights]))
vid_length = 124
def step_decay(initial_lrate,epochs_drop,drop_factor):
def step_decay_fcn(epoch):
return initial_lrate * math.pow(drop_factor, math.floor((1+epoch)/epochs_drop))
return step_decay_fcn
epochs_drop,drop_factor = (10,0.8)
initial_lrate = 0.001
dropout_amount = 0.5
last_layer_dim = 10
filter_length = 8
conv_dim = 32
l2_lambda = 10**(-3.5)
def w_mse(weights):
def loss(y_true, y_pred):
#multiply by len(weights) to make the magnitude invariant to number of components in target
return K.mean(K.sum(K.square(y_true-y_pred)*weights,axis=1)*tf.reshape(y_true[:,-1],(-1,1)))/c_i_factor
return loss
#we don't want to optimize for the column counting video occurences of course, but
#they are included in the target so we can use that column for the loss function
weights = [1.0,0]
normal_weights = [1.0,0]
#normalize weights to sum to 1 to prevent affecting loss function
weights = weights/np.sum(weights)
normal_weights = normal_weights/np.sum(normal_weights)
#fixed epoch budget of 100 that empirically seems to be sufficient
n_epochs = 100
mse_opt = w_mse(weights)
#monitor our actual objective
mse_metric = w_mse(target_range**2*normal_weights)
hyper_str = "params_"
for param in [initial_lrate,epochs_drop,drop_factor,dropout_amount,conv_dim,last_layer_dim,filter_length,l2_lambda]:
hyper_str = hyper_str + str(param) + "_"
K.clear_session()
#K.set_session(sess)
model = Sequential()
model.add(Conv1D(conv_dim,filter_length, input_dim=X_train.shape[2],input_length=vid_length,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv1D(conv_dim,filter_length,padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(dropout_amount))
model.add(Conv1D(conv_dim,filter_length,padding='same',kernel_regularizer=keras.regularizers.l2(l2_lambda)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv1D(conv_dim,filter_length,padding='same',kernel_regularizer=keras.regularizers.l2(l2_lambda)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(dropout_amount))
model.add(Conv1D(conv_dim,filter_length,padding='same',kernel_regularizer=keras.regularizers.l2(l2_lambda)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv1D(conv_dim,filter_length,padding='same',kernel_regularizer=keras.regularizers.l2(l2_lambda)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=3))
model.add(Dropout(dropout_amount))
model.add(Flatten())
model.add(Dense(last_layer_dim,activation='relu'))
model.add(Dense(2, activation='linear'))
checkpoint_folder = "./data/checkpoints/cnn_checkpoints_%s" % (target_col)
from keras.callbacks import LearningRateScheduler
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TerminateOnNaN
train_model = True
if not os.path.exists(checkpoint_folder):
os.makedirs(checkpoint_folder)
filepath=checkpoint_folder+"/weights-{epoch:02d}-{val_loss_1:.4f}.hdf5"
if train_model:
opt = RMSprop(lr=0.0,rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss=mse_opt,metrics=[mse_metric],
optimizer=opt)
checkpoint = \
ModelCheckpoint(filepath, monitor='val_loss_2', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
lr = LearningRateScheduler(step_decay(initial_lrate,epochs_drop,drop_factor))
history = model.fit(X_train, y_train_scaled,callbacks=[checkpoint,lr,TerminateOnNaN()],
validation_data=(X_validation,y_validation_scaled),
batch_size=32, epochs=n_epochs,shuffle=True)
import statsmodels.api as sm
def undo_scaling(y,target_range,target_min):
return y*target_range+target_min
weight_files = os.listdir(checkpoint_folder)
weight_files_df = pd.DataFrame(weight_files,columns=['filename'])
weight_files_df['num'] = weight_files_df['filename'].apply(lambda x: int(x.split('-')[1]))
weight_files_df.sort_values(by='num',ascending=True,inplace=True)
def predict_and_aggregate_singlevar(y,X,ids,model,target_col):
df = pd.DataFrame(y,columns=[target_col])
target_col_pred = target_col + "_pred"
videoids = [t[0] for t in processed_video_segments if t[0] in ids]
df["videoid"] = np.array(videoids)
preds = model.predict(X)
df[target_col_pred] = undo_scaling(preds[:,0],target_range,target_min)
df["count"] = 1
df = df.groupby(['videoid'],as_index=False).agg({target_col_pred:np.mean,'count':np.sum,target_col:np.mean})
df['ones'] = 1
return df
video_ids = [t[0] for t in processed_video_segments if t[0] in all_ids]
predictions_df = pd.DataFrame(video_ids,columns=['videoid'])
predictions_df[target_col] = y
predictions_df = predictions_df.merge(right=datasplit_df[['videoid','dataset']],on=['videoid'],how='left')
for i in range(0,len(weight_files_df)):
weight_file = weight_files_df['filename'].iloc[i]
print(weight_file)
model.load_weights(checkpoint_folder + "/%s" % (weight_file))
preds = model.predict(X)
predictions_df["%s_pred_%s" % (target_col,i)] = undo_scaling(preds[:,0],target_range,target_min)
predictions_df.groupby(['videoid','dataset'],as_index=False).mean().to_csv("./data/predictions/cnn_%s_singlesided_predictions_all_epochs.csv" % (target_col),index=False)
# Save best models
# This must be run after finding the best model with select_optimal_epoch
maps = {
"gmfcs": "./data/checkpoints/cnn_checkpoints_gmfcs/weights-08-0.5025.hdf5", #
"speed": "./data/checkpoints/cnn_checkpoints_speed/weights-77-0.0336.hdf5", #
"cadence": "./data/checkpoints/cnn_checkpoints_cadence/weights-36-0.0211.hdf5", #
"SEMLS_dev_residual": "./data/checkpoints/cnn_checkpoints_SEMLS_dev_residual/weights-32-0.8929.hdf5", #
# "GDI": "./data/checkpoints/cnn_checkpoints_GDI/weights-88-72.0330.hdf5" #
"GDI": "./data/checkpoints/cnn_checkpoints_GDI/weights-92-90.8354.hdf5" #
}
for col in maps.keys():
model_folder_path = "./data/models/%s_best.pb" % (col)
model.load_weights(maps[col])
model.save(model_folder_path)
```
|
github_jupyter
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Modeling" data-toc-modified-id="Modeling-1"><span class="toc-item-num">1 </span>Modeling</a></span><ul class="toc-item"><li><span><a href="#Victims" data-toc-modified-id="Victims-1.1"><span class="toc-item-num">1.1 </span>Victims</a></span></li><li><span><a href="#Perpetrators" data-toc-modified-id="Perpetrators-1.2"><span class="toc-item-num">1.2 </span>Perpetrators</a></span></li><li><span><a href="#ViolenceEvent" data-toc-modified-id="ViolenceEvent-1.3"><span class="toc-item-num">1.3 </span>ViolenceEvent</a></span></li></ul></li></ul></div>
```
import sys
sys.version
from pathlib import Path
import pprint
%load_ext cypher
# https://ipython-cypher.readthedocs.io/en/latest/
# used for cell magic
from py2neo import Graph
NEO4J_URI="bolt://localhost:7687"
graph = Graph(NEO4J_URI)
graph
def clear_graph():
print(graph.run("MATCH (n) DETACH DELETE n").stats())
clear_graph()
graph.run("RETURN apoc.version();").data()
graph.run("call dbms.components() yield name, versions, edition unwind versions as version return name, version, edition;").data()
```
# Modeling
```
import pandas as pd
```
We are modeling data from the pinochet dataset, available in https://github.com/danilofreire/pinochet
> Freire, D., Meadowcroft, J., Skarbek, D., & Guerrero, E.. (2019). Deaths and Disappearances in the Pinochet Regime: A New Dataset. https://doi.org/10.31235/osf.io/vqnwu.
The dataset has 59 variables with information about the victims, the perpetrators, and geographical
coordinates of each incident.
```
PINOCHET_DATA = "../pinochet/data/pinochet.csv"
pin = pd.read_csv(PINOCHET_DATA)
pin.head()
pin.age.isna().sum()
```
The dataset contains informations about perpetrators, victims, violence events and event locations. We will develop models around these concepts, and we will stablish relationships between them later.
## Victims
- victim_id*: this is not the same as in the dataset.
- individual_id
- group_id
- first_name
- last_name
- age
- minor
- male
- number_previous_arrests
- occupation
- occupation_detail
- victim_affiliation
- victim_affiliation_detail
- targeted
```
victim_attributes = [
"individual_id",
"group_id",
"first_name",
"last_name",
"age",
"minor",
"male",
"number_previous_arrests",
"occupation",
"occupation_detail",
"victim_affiliation",
"victim_affiliation_detail",
"targeted",
]
pin_victims = pin[victim_attributes]
pin_victims.head()
# https://neo4j.com/docs/labs/apoc/current/import/load-csv/
PINOCHET_CSV_GITHUB = "https://raw.githubusercontent.com/danilofreire/pinochet/master/data/pinochet.csv"
query = """
WITH $url AS url
CALL apoc.load.csv(url)
YIELD lineNo, map, list
RETURN *
LIMIT 1"""
graph.run(query, url = PINOCHET_CSV_GITHUB).data()
%%cypher
CALL apoc.load.csv('pinochet.csv')
YIELD lineNo, map, list
RETURN *
LIMIT 1
```
## Perpetrators
- perpetrator_affiliation
- perpetrator_affiliation_detail
- war_tribunal
```
perpetrators_attributes = [
"perpetrator_affiliation",
"perpetrator_affiliation_detail",
"war_tribunal",
]
pin_perps = pin[perpetrators_attributes]
pin_perps.head()
```
## ViolenceEvent
```
clear_graph()
query = Path("../services/graph-api/project/queries/load_csv.cql").read_text()
# pprint.pprint(query)
graph.run(query, url = PINOCHET_CSV_GITHUB).stats()
```
|
github_jupyter
|
```
# evaluate RFE for classification
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold, RepeatedKFold
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
# define dataset
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
#create pipeline
rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=5)
model= DecisionTreeClassifier() # the model shouldn't necessary the same with rfe's model
pipeline = Pipeline(steps=[('s', rfe),('m', model)]) # 's' and 'm' is just a chosen letters you can use any letter
# evaluate model
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores=cross_val_score(pipeline, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
# report performance
print('Accuracy: %.3f (%.3f)'% (np.mean(n_scores), np.std(n_scores)))
# make a prediction with an RFE pipeline
# define dataset
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
#create pipeline
rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=5)
model= DecisionTreeClassifier() # the model shouldn't necessary the same with rfe's model
pipeline = Pipeline(steps=[('s', rfe),('m', model)]) # 's' and 'm' is just a chosen letters you can use any letter
# fit the model on all available data
pipeline.fit(X,y)
# make a prediction for one example
data = [[2.56999479, 0.13019997, 3.16075093, -435936352, -1.61271951, -1.39352057, -2.48924933, -1.93094078, 3.26130366, 1.05692145]]
yhat = pipeline.predict(data)
print('Predicted Class: %d' % (yhat))
# test regression dataset
from sklearn.datasets import make_regression
# define dataset
X, y= make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=1)
# summarize the dataset
print(X.shape, y.shape)
# evaluate RFE for regression
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score, RepeatedKFold
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeRegressor
from sklearn.pipeline import Pipeline
# define dataset
X, y= make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=1)
# create pipeline
rfe = RFE(estimator=DecisionTreeRegressor(), n_features_to_select=5)
model= DecisionTreeRegressor() # the model shouldn't necessary the same with rfe's model
pipeline = Pipeline(steps=[('s', rfe),('m', model)])
# evaluate model
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(pipeline, X, y, scoring= 'neg_mean_absolute_error', cv=cv, n_jobs=-1, error_score='raise')
# reporting MAE of the model across all the folds, the sklearn library make the MAE negative so it maximize
# instead of minimizing. This means the negative MAE values closer to zero are better and the perefect MAE is zero.
# report performance
print('MAE: %.3f (%.3f)'% (np.mean(n_scores), np.std(n_scores)))
# make a regression prediction with an RFE pipeline
from numpy import mean
from numpy import std
from sklearn.datasets import make_regression
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeRegressor
from sklearn.pipeline import Pipeline
#define dataset
X, y= make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=1)
# create pipeline
rfe = RFE(estimator=DecisionTreeRegressor(), n_features_to_select=5)
model= DecisionTreeRegressor() # the model shouldn't necessary the same with rfe's model
pipeline = Pipeline(steps=[('s', rfe),('m', model)])
# fit the model on all available data
pipeline.fit(X,y)
# make a prediction for one example
data = [[-2.022220122, 0.31563495, 0.8279464, -0.30620401, 0.116003707, -1.44411381, 0.87616892, -0.50446586, 0.23009474, 0.76201118]]
yhat=pipeline.predict(data)
print('Predicted: %.3f' % (yhat))
# explore the number of selected features for RFE
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from matplotlib import pyplot
# get the dataset
def get_dataset():
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
return X,y
# get a list of models to evaluate
def get_models():
models=dict()
for i in range(2, 10):
rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=i)
model = DecisionTreeClassifier()
models[str(i)] = Pipeline(steps=[('s', rfe), ('m', model)])
return models
# evaluate a give model using cross-validation
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
return scores
# define datasets
X, y = get_dataset()
# get the model to evaluate
models = get_models()
# evaluate the models and store results
results, names = list(), list()
for name, model in models.items():
scores = evaluate_model(model, X, y)
results.append(scores)
names.append(name)
print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
# plot model performance for comparison
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show()
# automatically select the number of features for RFE
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score, RepeatedStratifiedKFold
from sklearn.feature_selection import RFECV #-> for automation we should use 'REFCV' instead of 'REF'
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
# define dataset
def get_dataset():
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
return X,y
X, y = get_dataset()
# create pipeline
rfe = RFECV(estimator=DecisionTreeClassifier())
model = DecisionTreeClassifier()
pipeline = Pipeline(steps = [('s', rfe), ('m', model)])
# evaluate model
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(pipeline, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
# report performance
print('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
# by using RFE we might be interested to know which feature get selected and which not
from sklearn.datasets import make_classification
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeClassifier
# define dataset
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
# define RFE
rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=5)
# fit RFE
rfe.fit(X, y)
#Summarize all features
for i in range(X.shape[1]):
print('Column: %d, Selected %s, Rank: %.3f' % (i, rfe.support_[i], rfe.ranking_[i]))
# .support_ reports True or False
# .ranking_ reports the importance of each feature
# explore the algorithm wrapped by RFE -> This will tell us which algorithm is better to be used in RFE
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression, Perceptron
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from matplotlib import pyplot
# get the dataset
def get_dataset():
X, y = make_classification(n_samples=1000, n_features=10, n_informative=5, n_redundant=5, random_state=1)
return X,y
# get a list of models to evaluate
def get_models():
models=dict()
#lr
rfe=RFE(estimator=LogisticRegression(), n_features_to_select=5)
model = DecisionTreeClassifier()
models['lr']= Pipeline(steps=[('s', rfe), ('m', model)])
#perceptron
rfe=RFE(estimator=Perceptron(), n_features_to_select=5)
model = DecisionTreeClassifier()
models['per']= Pipeline(steps=[('s', rfe), ('m', model)])
# cart
rfe=RFE(estimator=DecisionTreeClassifier(), n_features_to_select=5)
model = DecisionTreeClassifier()
models['cart']= Pipeline(steps=[('s', rfe), ('m', model)])
# rf
rfe=RFE(estimator=RandomForestClassifier(), n_features_to_select=5)
model = DecisionTreeClassifier()
models['rf']= Pipeline(steps=[('s', rfe), ('m', model)])
# gbm
rfe=RFE(estimator=GradientBoostingClassifier(), n_features_to_select=5)
model = DecisionTreeClassifier()
models['gbm']= Pipeline(steps=[('s', rfe), ('m', model)])
return models
# evaluate a give model using cross-validation
def evaluate_model(model, X, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
return scores
# define datasets
X, y = get_dataset()
# get the model to evaluate
models = get_models()
# evaluate the models and store results
results, names = list(), list()
for name, model in models.items():
scores = evaluate_model(model, X, y)
results.append(scores)
names.append(name)
print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
# plot model performance for comparison
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show()
```
|
github_jupyter
|
# [Day 8](https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem)
```
{'1':'a'}.update({'2':'c'})
d = {}
for i in range(int(input())):
x = input().split()
d[x[0]] = x[1]
while True:
try:
name = input()
if name in d:
print(name, "=", d[name], sep="")
else:
print("Not found")
except:
break
n = int(input().strip())
print(max(len(length) for length in bin(n)[2:].split('0')))
```
# Day 13
```
class Shape:
def area(): pass
def perimeter(): pass
class square(Shape):
def __init_(self,side):
self.side=side
s=Shape()
s
from abc import ABC,abstractclassmethod
class Shape(ABC):
@abstractclassmethod
def area(): pass
@abstractclassmethod
def perimeter(): pass
class square(Shape):
def __init__(self,side):
self.__side=side
def area(self):
return self.__side**2
def perimeter(self):
return self.__side*4
s=Shape() #if class is abstrat then we cant refrence this as
r=square(34)
print(r.area())
print(r.perimeter())
from abc import ABCMeta, abstractmethod
class Book(object, metaclass=ABCMeta):
def __init__(self,title,author):
self.title=title
self.author=author
@abstractmethod
def display(): pass
#Write MyBook class
class MyBook(Book):
price=0
def __init__(self,t,a,p):
super(Book,self).__init__()
self.price=p
def display(self):
print('Title: {}'.format(title))
print('Author: {}'.format(author))
print('Price: {}'.format(price))
title=input()
author=input()
price=int(input())
new_novel=MyBook(title,author,price)
new_novel.display()
```
# Day 17
```
#Write your code here
class Calculator:
def power(self,a,c):
if a>0 and c>0:
return a**c
else:
raise Exception('n and p should be non-negative')
myCalculator=Calculator()
T=int(input())
for i in range(T):
n,p = map(int, input().split())
try:
ans=myCalculator.power(n,p)
print(ans)
except Exception as e:
print(e)
```
# Day 18
```
s=list(input())
l=len(s)
if l%2!=0:
s.pop(l//2)
for i in range(l//2):
if s.pop(0)!=s.pop(-1):
print('oo')
break
else:
print('aagram')
[]+[1]
import sys
class Solution:
# Write your code here
def __init__(self):
self.s=[]
self.q=[]
def pushCharacter(self,i):
self.s+=[i]
def enqueueCharacter(self,j):
self.q=[j]+self.q
def popCharacter(self):
return self.s.pop()
def dequeueCharacter(self):
return self.q.pop()
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
```
# Day 20
```
class Swaps:
def __init__(self,n,a):
self.n=n
self.a=a
self.numberOfSwaps=0
def calculate(self):
for i in range(self.n):
#Track number of elements swapped during a single array traversal
for j in range(self.n-1):
# Swap adjacent elements if they are in decreasing order
if self.a[j] > self.a[j + 1]:
self.numberOfSwaps+=1
temp=self.a[j]
self.a[j]=self.a[j + 1]
self.a[j+1]=temp
#If no elements were swapped during a traversal, array is sorted
if self.numberOfSwaps == 0:
break;
def display(self):
self.calculate()
print('Array is sorted in {0} swaps.\nFirst Element: {1}\nLast Element: {2}'.format(self.numberOfSwaps,a[0],a[-1]))
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
s=Swaps(n,a)
s.display()
s.display()
def isPrime(n) :
if (n <= 1) :
return False
if (n <= 3) :
return True
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
for _ in range(int(input())):
if isPrime(int(input())):
print('Prime')
else:
print('Not prime')
rd, rm, ry = [int(x) for x in input().split(' ')]
ed, em, ey = [int(x) for x in input().split(' ')]
if (ry, rm, rd) <= (ey, em, ed):
print(0)
elif (ry, rm) == (ey, em):
print(15 * (rd - ed))
elif ry == ey:
print(500 * (rm - em))
else:
print(10000)
```
|
github_jupyter
|
# Introduction to XGBoost Spark with GPU
The goal of this notebook is to show how to train a XGBoost Model with Spark RAPIDS XGBoost library on GPUs. The dataset used with this notebook is derived from Fannie Maeâs Single-Family Loan Performance Data with all rights reserved by Fannie Mae. This processed dataset is redistributed with permission and consent from Fannie Mae. This notebook uses XGBoost to train 12-month mortgage loan delinquency prediction model .
A few libraries required for this notebook:
1. NumPy
2. cudf jar
3. xgboost4j jar
4. xgboost4j-spark jar
5. rapids-4-spark.jar
This notebook also illustrates the ease of porting a sample CPU based Spark xgboost4j code into GPU. There is only one change required for running Spark XGBoost on GPU. That is replacing the API `setFeaturesCol(feature)` on CPU with the new API `setFeaturesCols(features)`. This also eliminates the need for vectorization (assembling multiple feature columns in to one column) since we can read multiple columns.
#### Import All Libraries
```
from ml.dmlc.xgboost4j.scala.spark import XGBoostClassificationModel, XGBoostClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import SparkSession
from pyspark.sql.types import FloatType, IntegerType, StructField, StructType
from time import time
```
Besides CPU version requires two extra libraries.
```Python
from pyspark.ml.feature import VectorAssembler
from pyspark.sql.functions import col
```
#### Create Spark Session and Data Reader
```
spark = SparkSession.builder.getOrCreate()
reader = spark.read
```
#### Specify the Data Schema and Load the Data
```
label = 'delinquency_12'
schema = StructType([
StructField('orig_channel', FloatType()),
StructField('first_home_buyer', FloatType()),
StructField('loan_purpose', FloatType()),
StructField('property_type', FloatType()),
StructField('occupancy_status', FloatType()),
StructField('property_state', FloatType()),
StructField('product_type', FloatType()),
StructField('relocation_mortgage_indicator', FloatType()),
StructField('seller_name', FloatType()),
StructField('mod_flag', FloatType()),
StructField('orig_interest_rate', FloatType()),
StructField('orig_upb', IntegerType()),
StructField('orig_loan_term', IntegerType()),
StructField('orig_ltv', FloatType()),
StructField('orig_cltv', FloatType()),
StructField('num_borrowers', FloatType()),
StructField('dti', FloatType()),
StructField('borrower_credit_score', FloatType()),
StructField('num_units', IntegerType()),
StructField('zip', IntegerType()),
StructField('mortgage_insurance_percent', FloatType()),
StructField('current_loan_delinquency_status', IntegerType()),
StructField('current_actual_upb', FloatType()),
StructField('interest_rate', FloatType()),
StructField('loan_age', FloatType()),
StructField('msa', FloatType()),
StructField('non_interest_bearing_upb', FloatType()),
StructField(label, IntegerType()),
])
features = [ x.name for x in schema if x.name != label ]
train_data = reader.schema(schema).option('header', True).csv('/data/mortgage/csv/train')
trans_data = reader.schema(schema).option('header', True).csv('/data/mortgage/csv/test')
```
Note on CPU version, vectorization is required before fitting data to classifier, which means you need to assemble all feature columns into one column.
```Python
def vectorize(data_frame):
to_floats = [ col(x.name).cast(FloatType()) for x in data_frame.schema ]
return (VectorAssembler()
.setInputCols(features)
.setOutputCol('features')
.transform(data_frame.select(to_floats))
.select(col('features'), col(label)))
train_data = vectorize(train_data)
trans_data = vectorize(trans_data)
```
#### Create a XGBoostClassifier
```
params = {
'eta': 0.1,
'gamma': 0.1,
'missing': 0.0,
'treeMethod': 'gpu_hist',
'maxDepth': 10,
'maxLeaves': 256,
'objective':'binary:logistic',
'growPolicy': 'depthwise',
'minChildWeight': 30.0,
'lambda_': 1.0,
'scalePosWeight': 2.0,
'subsample': 1.0,
'nthread': 1,
'numRound': 100,
'numWorkers': 1,
}
classifier = XGBoostClassifier(**params).setLabelCol(label).setFeaturesCols(features)
```
The CPU version classifier provides the API `setFeaturesCol` which only accepts a single column name, so vectorization for multiple feature columns is required.
```Python
classifier = XGBoostClassifier(**params).setLabelCol(label).setFeaturesCol('features')
```
The parameter `num_workers` should be set to the number of GPUs in Spark cluster for GPU version, while for CPU version it is usually equal to the number of the CPU cores.
Concerning the tree method, GPU version only supports `gpu_hist` currently, while `hist` is designed and used here for CPU training.
#### Train the Data with Benchmark
```
def with_benchmark(phrase, action):
start = time()
result = action()
end = time()
print('{} takes {} seconds'.format(phrase, round(end - start, 2)))
return result
model = with_benchmark('Training', lambda: classifier.fit(train_data))
```
#### Save and Reload the Model
```
model.write().overwrite().save('/data/new-model-path')
loaded_model = XGBoostClassificationModel().load('/data/new-model-path')
```
#### Transformation and Show Result Sample
```
def transform():
result = loaded_model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
result.select(label, 'rawPrediction', 'probability', 'prediction').show(5)
```
#### Evaluation
```
accuracy = with_benchmark(
'Evaluation',
lambda: MulticlassClassificationEvaluator().setLabelCol(label).evaluate(result))
print('Accuracy is ' + str(accuracy))
spark.stop()
```
|
github_jupyter
|
# Exp 41 analysis
See `./informercial/Makefile` for experimental
details.
```
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
# ls ../data/exp2*
```
# Load and process data
```
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp41"
best_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_best.pkl"))
sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl"))
best_params
```
# Performance
of best parameters
```
env_name = 'BanditHardAndSparse2-v0'
num_episodes = 20*10
# Run w/ best params
result = meta_bandit(
env_name=env_name,
num_episodes=num_episodes,
lr=best_params["lr"],
tie_threshold=best_params["tie_threshold"],
seed_value=19,
save="exp41_best_model.pkl"
)
# Plot run
episodes = result["episodes"]
actions =result["actions"]
scores_R = result["scores_R"]
values_R = result["values_R"]
scores_E = result["scores_E"]
values_E = result["values_E"]
# Get some data from the gym...
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Init plot
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(5, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, scores_E, color="purple", alpha=0.9, s=2, label="E")
plt.ylabel("log score")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="E")
plt.ylabel("log Q(s,a)")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# -
plt.savefig("figures/epsilon_bandit.pdf", bbox_inches='tight')
plt.savefig("figures/epsilon_bandit.eps", bbox_inches='tight')
```
# Sensitivity
to parameter choices
```
total_Rs = []
ties = []
lrs = []
trials = list(sorted_params.keys())
for t in trials:
total_Rs.append(sorted_params[t]['total_R'])
ties.append(sorted_params[t]['tie_threshold'])
lrs.append(sorted_params[t]['lr'])
# Init plot
fig = plt.figure(figsize=(10, 18))
grid = plt.GridSpec(4, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("total R")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.scatter(trials, ties, color="black", alpha=.3, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("Tie threshold")
_ = sns.despine()
plt.subplot(grid[2, 0])
plt.scatter(trials, lrs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("lr")
_ = sns.despine()
```
# Distributions
of parameters
```
# Init plot
fig = plt.figure(figsize=(5, 6))
grid = plt.GridSpec(2, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(ties, color="black")
plt.xlabel("tie threshold")
plt.ylabel("Count")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.hist(lrs, color="black")
plt.xlabel("lr")
plt.ylabel("Count")
_ = sns.despine()
```
of total reward
```
# Init plot
fig = plt.figure(figsize=(5, 2))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(total_Rs, color="black", bins=50)
plt.xlabel("Total reward")
plt.ylabel("Count")
plt.xlim(0, 10)
_ = sns.despine()
```
|
github_jupyter
|
# DOPPELGANGER #
## Ever wondered how your "doppelganger" dog would look like?
# EXPERIMENT LOCALLY
### Prepare Environment
Install and import needed modules.
```
import numpy as np
import pandas as pd
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.xception import preprocess_input
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
Set image path and explore enivornment.
```
images_path = 'code/training/Images'
len(os.listdir(os.path.join(images_path)))
```
Set parameters.
```
batch_size = 200
img_w_size = 299
img_h_size = 299
```
Build Data Generator
```
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
image_generator = datagen.flow_from_directory(
images_path,
target_size=(img_w_size, img_h_size),
batch_size=batch_size,
class_mode=None,
shuffle=False)
images = image_generator.next()
images.shape
```
### Show a sample picture!
```
sample_image_idx = 1
plt.imshow((images[sample_image_idx] + 1) / 2)
```
## Transform Images to Lower Feature Space (Bottleneck) ##
```
base_model = Xception(include_top=False,
weights='imagenet',
input_shape=(img_w_size, img_h_size, 3),
pooling='avg')
bottlenecks = base_model.predict(images)
bottlenecks.shape
```
### Show Bottleneck
```
plt.plot(bottlenecks[0])
plt.show()
from sklearn.neighbors import DistanceMetric
dist = DistanceMetric.get_metric('euclidean')
```
### Calculate pairwise distances
```
bn_dist = dist.pairwise(bottlenecks)
bn_dist.shape
```
## Pre-Process Image Similarities ##
```
plt.imshow(bn_dist, cmap='gray')
```
Set visualization parameters.
```
n_rows = 5
n_cols = 5
n_result_images = n_rows * n_cols
```
# Find Similar Images #
## Define `image_search()`
```
def image_search(img_index, n_rows=n_rows, n_columns=n_cols):
n_images = n_rows * n_cols
# create Pandas Series with distances from image
dist_from_sel = pd.Series(bn_dist[img_index])
# sort Series and get top n_images
retrieved_indexes = dist_from_sel.sort_values().head(n_images)
retrieved_images = []
# create figure, loop over closest images indices
# and display them
plt.figure(figsize=(10, 10))
i = 1
for idx in retrieved_indexes.index:
plt.subplot(n_rows, n_cols, i)
plt.imshow((images[idx] + 1) / 2)
if i == 1:
plt.title('Selected image')
else:
plt.title("Dist: {:0.4f}".format(retrieved_indexes[idx]))
i += 1
retrieved_images += [images[idx]]
plt.tight_layout()
return np.array(retrieved_images)
```
## Perform Image Search
```
similar_to_idx = 0
plt.imshow((images[similar_to_idx] + 1) / 2)
similar_images_sorted = image_search(similar_to_idx)
similar_images_sorted.shape
```
## Convert images to gray-scale ##
```
grayscaled_similar_images_sorted = similar_images_sorted.mean(axis=3)
flattened_grayscale_images = grayscaled_similar_images_sorted.reshape(n_result_images, -1)
flattened_grayscale_images.shape
_, h, w = grayscaled_similar_images_sorted.shape
# Compute a PCA
n_components = 10
pca = PCA(n_components=n_components, whiten=True).fit(flattened_grayscale_images)
# apply PCA transformation to training data
pca_transformed = pca.transform(flattened_grayscale_images)
```
## Visualize Eigenfaces
```
def plot_gallery(images, titles, h, w, rows=n_rows, cols=n_cols):
plt.figure()
for i in range(rows * cols):
plt.subplot(rows, cols, i + 1)
plt.imshow(images[i].reshape(h, w), cmap=plt.cm.gray)
plt.title(titles[i])
plt.xticks(())
plt.yticks(())
eigenfaces = pca.components_.reshape((n_components, h, w))
eigenface_titles = ["eigenface {0}".format(i) for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w, 3, 3)
plt.show()
```
## Show Average Face
```
average_face = eigenfaces[9]
plt.imshow((average_face + 1) / 2)
```
# BUILD CONTAINER
```
!cat code/training/doppelganger-train.py
!cat code/training/Dockerfile
!cat code/training/doppelganger-train-deploy.yaml
```
# RUN TRAINING POD
Deploy the training job to Kubernetes
```
!kubectl create -f code/training/doppelganger-train-deploy.yaml
!kubectl logs doppelganger-train -c doppelganger-train --namespace deployment
!kubectl delete -f code/training/doppelganger-train-deploy.yaml
```
# RUN INFERENCE POD
Use the previously trained model and run an inference service on Kubernetes
```
!cat code/inference/DoppelgangerModel.py
!cat code/inference/Dockerfile-v1
!cat code/inference/doppelganger-predict-deploy.yaml
```
### Deploy the service
```
!kubectl create -f code/inference/doppelganger-predict-deploy.yaml
```
### Make a prediction
```
plt.imshow((images[0] + 1) / 2)
```
### Run a curl command to get a prediction from the REST API
```
!curl https://community.cloud.pipeline.ai/seldon/deployment/doppelganger-model/api/v0.1/predictions -d '{"data":{"ndarray":[[0]]}}' -H "Content-Type: application/json"
```
### Clean up
```
!kubectl delete -f code/inference/doppelganger-predict-deploy.yaml
```
|
github_jupyter
|
## Exercise 3.10 Taxicab (tramcar) problem
Suppose you arrive in a new city and see a taxi numbered 100. How many taxis are there in this city? Let us assume taxis are numbered sequentially as integers starting from 0, up to some unknown upper bound $\theta$. (We number taxis from 0 for simplicity; we can also count from 1 without changing the analysis.) Hence the likelihood function is $p(x) = U(0,\theta)$, the uniform distribution. The goal is to estimate $\theta$. We will use the Bayesian analysis from Exercise 3.9.
a) Suppose we see one taxi numbered 100, so $D = \{100\}, m = 100, N = 1$. Using an (improper) non-informative prior on Ξ of the form $p(\theta) = Pa(\theta|0, \theta) \propto 1/\theta$, what is the posterior $p(\theta|D)$?
**Solution**: Using that of 3.9, the posterior $p(\theta|D) = Pa(\theta|1, 100)$.
b) Compute the posterior mean, mode and median number of taxis in the city, if such quantities exist.
**Solution**:
The Pareto distribution $Pa(\theta|1, 100)$ does not have a mean defined (since $\mathbb{E}(\theta|a, b) = \frac{ab}{a-1}$). The mode of the distribution is at 100.
The median of the distribution is given by
$$
\int_{\mathrm{median}}^\infty 100\theta^{-2} = 0.5
$$
which gives median = 200.
(c) Rather than trying to compute a point estimate of the number of taxis, we can compute the predictive density over the next taxicab number using
$$
p(D'|D, \alpha) = \int p(D'|\theta)p(\theta|D, \alpha)d\theta = p(D'|\beta)
$$
where $\alpha = (b, K)$ are the hyper-parameters, $\beta = (c, N + K )$ are the updated hyper-parameters. Now
consider the case $D = \{m\}$, and $D' = \{x\}$. Using Equation 3.95, write down an expression for $p(x|D, \alpha)$.
As above, use a non-informative prior $b = K = 0$.
**Solution**:
Let's compute the predictive density over the next taxi number: First, we need to compute the posterior $p(\theta|D)$:
$$
p(\theta|D) = \mathrm{Pareto}(\theta|N + K, \max(m, b)) = \mathrm{Pareto}(\theta|1 + 0, \max(m, 0) = \mathrm{Pareto}(\theta|1, m)
$$
Since the posterior is a Pareto distribution like the prior, we can use it as a 'prior' for inference on $D'$ and use the expression of $p(D)$ (evidence) and the joint distribution $p(D, \theta)$. So our new 'prior' has the following distribution $p(\theta|D) = \mathrm{Pareto}(\theta, K'=1, b'=m)$. The number o samples is $N'=1$ and $m'=\max(D') = x$. Now we can calculate the predictive distribution:
\begin{aligned}
p(x|D, \alpha) & = \frac{K'}{(N'+K')b'^{N'}}\mathbb{I}(x\le m) + \frac{K'b'^{K'}}{(N'+K')m'^{N'+K'}}\mathbb{I}(x > m) \\
& = \frac{1}{2m}\mathbb{I}(x\le m) + \frac{m}{2x^2}\mathbb{I}(x > m)
\end{aligned}
(d) Use the predictive density formula to compute the probability that the next taxi you will see (say, the next day) has number 100, 50 or 150, i.e., compute $p(x = 100|D,\alpha)$, $p(x = 50|D,\alpha)$, $p(x = 150|D, \alpha)$.
**Solution**:
If we suppose $m = 100$, $p(100|D, \alpha) = 0.005$, $p(50|D, \alpha) = 0.01$, $p(150 |D, \alpha) = 0.002$
|
github_jupyter
|
## Nearest Neighbor item based Collaborative Filtering

Source: https://towardsdatascience.com
```
##Dataset url: https://grouplens.org/datasets/movielens/latest/
import pandas as pd
import numpy as np
r_cols = ['user_id','movie_id','rating']
movies_df = pd.read_csv('u.item.csv', names=['movieId','title'],sep='|',usecols=range(2))
m_cols = ['movie_id','title']
rating_df=pd.read_csv('u.data.csv', names=['userId', 'movieId', 'rating'],usecols=range(3))
movies_df.head()
rating_df.head()
df = pd.merge(rating_df,movies_df,on='movieId')
df.head()
combine_movie_rating = df.dropna(axis = 0, subset = ['title'])
# combine_movie_rating.shape
movie_ratingCount = (combine_movie_rating.
groupby(by = ['title'])['rating'].
count().
reset_index().
rename(columns = {'rating': 'totalRatingCount'})
[['title', 'totalRatingCount']]
)
movie_ratingCount.head()
rating_with_totalRatingCount = combine_movie_rating.merge(movie_ratingCount, left_on = 'title', right_on = 'title', how = 'left')
rating_with_totalRatingCount.head()
pd.set_option('display.float_format', lambda x: '%.3f' % x)
print(movie_ratingCount['totalRatingCount'].describe())
popularity_threshold = 50
rating_popular_movie= rating_with_totalRatingCount.query('totalRatingCount >= @popularity_threshold')
rating_popular_movie.head()
rating_popular_movie.shape
## First lets create a Pivot matrix
movie_features_df=rating_popular_movie.pivot_table(index='title',columns='userId',values='rating').fillna(0)
movie_features_df.head()
from scipy.sparse import csr_matrix
movie_features_df_matrix = csr_matrix(movie_features_df.values)
# print(movie_features_df_matrix)
from sklearn.neighbors import NearestNeighbors
model_knn = NearestNeighbors(metric = 'cosine', algorithm = 'brute')
model_knn.fit(movie_features_df_matrix)
movie_features_df.shape
# query_index = np.random.choice(movie_features_df.shape[0])
# print(query_index)
query_index = movie_features_df.index.get_loc('Star Wars (1977)')
distances, indices = model_knn.kneighbors(movie_features_df.iloc[query_index,:].values.reshape(1, -1), n_neighbors = 6)
movie_features_df.head()
distances
indices
for i in range(0, len(distances.flatten())):
if i == 0:
print('Recommendations for {0}:\n'.format(movie_features_df.index[query_index]))
else:
print('{0}: {1}, with distance of {2}:'.format(i, movie_features_df.index[indices.flatten()[i]], distances.flatten()[i]))
```
## Cosine Similarity

```
my_ratings = movie_features_df[0]
my_ratings = my_ratings.loc[my_ratings!=0]
my_ratings
simCandidates = pd.Series()
for i in range(0,len(my_ratings.index)):
print("Adding sims for ",my_ratings.index[i],"...")
query_index = movie_features_df.index.get_loc(my_ratings.index[i])
# print(query_index)
distances, indices = model_knn.kneighbors(movie_features_df.iloc[query_index,:].values.reshape(1, -1), n_neighbors = 6)
distances = (1/(1+distances)) * my_ratings[i]
# print(distances)
sims = pd.Series(distances.flatten(),
name="ratings", index=movie_features_df.index[indices.flatten()])
# sims = distances.map(lambda x: (1/x)*myRatings[i])
print(sims)
simCandidates = simCandidates.append(sims)
print('\nsorting..\n')
simCandidates.sort_values(inplace=True,ascending=False)
print(simCandidates.head(20))
simCandidates = simCandidates.groupby(simCandidates.index).sum()
simCandidates.sort_values(inplace=True,ascending=False)
simCandidates.head(10)
filteredSims = simCandidates.drop(my_ratings.index)
filteredSims.head(10)
```
This is the final Recommendation of movies of similar that i was like earlier such as `Empire Strikes Back, The (1980)`, `Gone with the Wind (1939)`, `Star Wars (1977)`
|
github_jupyter
|
# PaddleOCR DJL example
In this tutorial, we will be using pretrained PaddlePaddle model from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR) to do Optical character recognition (OCR) from the given image. There are three models involved in this tutorial:
- Word detection model: used to detect the word block from the image
- Word direction model: used to find if the text needs to rotate
- Word recognition model: Used to recognize test from the word block
## Import dependencies and classes
PaddlePaddle is one of the Deep Engines that requires DJL hybrid mode to run inference. Itself does not contains NDArray operations and needs a supplemental DL framework to help with that. So we import Pytorch DL engine as well in here to do the processing works.
```
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.14.0
%maven ai.djl.paddlepaddle:paddlepaddle-model-zoo:0.14.0
%maven org.slf4j:slf4j-api:1.7.32
%maven org.slf4j:slf4j-simple:1.7.32
// second engine to do preprocessing and postprocessing
%maven ai.djl.pytorch:pytorch-engine:0.14.0
import ai.djl.*;
import ai.djl.inference.Predictor;
import ai.djl.modality.Classifications;
import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.ImageFactory;
import ai.djl.modality.cv.output.*;
import ai.djl.modality.cv.util.NDImageUtils;
import ai.djl.ndarray.*;
import ai.djl.ndarray.types.DataType;
import ai.djl.ndarray.types.Shape;
import ai.djl.repository.zoo.*;
import ai.djl.paddlepaddle.zoo.cv.objectdetection.PpWordDetectionTranslator;
import ai.djl.paddlepaddle.zoo.cv.imageclassification.PpWordRotateTranslator;
import ai.djl.paddlepaddle.zoo.cv.wordrecognition.PpWordRecognitionTranslator;
import ai.djl.translate.*;
import java.util.concurrent.ConcurrentHashMap;
```
## the Image
Firstly, let's take a look at our sample image, a flight ticket:
```
String url = "https://resources.djl.ai/images/flight_ticket.jpg";
Image img = ImageFactory.getInstance().fromUrl(url);
img.getWrappedImage();
```
## Word detection model
In our word detection model, we load the model exported from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.0/doc/doc_en/inference_en.md#convert-detection-model-to-inference-model). After that, we can spawn a DJL Predictor from it called detector.
```
var criteria1 = Criteria.builder()
.optEngine("PaddlePaddle")
.setTypes(Image.class, DetectedObjects.class)
.optModelUrls("https://resources.djl.ai/test-models/paddleOCR/mobile/det_db.zip")
.optTranslator(new PpWordDetectionTranslator(new ConcurrentHashMap<String, String>()))
.build();
var detectionModel = criteria1.loadModel();
var detector = detectionModel.newPredictor();
```
Then, we can detect the word block from it. The original output from the model is a bitmap that marked all word regions. The `PpWordDetectionTranslator` convert the output bitmap into a rectangle bounded box for us to crop the image.
```
var detectedObj = detector.predict(img);
Image newImage = img.duplicate();
newImage.drawBoundingBoxes(detectedObj);
newImage.getWrappedImage();
```
As you can see above, the word block are very narrow and does not include the whole body of all words. Let's try to extend it a bit for a better result. `extendRect` extend the box height and width to a certain scale. `getSubImage` will crop the image and extract the word block.
```
Image getSubImage(Image img, BoundingBox box) {
Rectangle rect = box.getBounds();
double[] extended = extendRect(rect.getX(), rect.getY(), rect.getWidth(), rect.getHeight());
int width = img.getWidth();
int height = img.getHeight();
int[] recovered = {
(int) (extended[0] * width),
(int) (extended[1] * height),
(int) (extended[2] * width),
(int) (extended[3] * height)
};
return img.getSubImage(recovered[0], recovered[1], recovered[2], recovered[3]);
}
double[] extendRect(double xmin, double ymin, double width, double height) {
double centerx = xmin + width / 2;
double centery = ymin + height / 2;
if (width > height) {
width += height * 2.0;
height *= 3.0;
} else {
height += width * 2.0;
width *= 3.0;
}
double newX = centerx - width / 2 < 0 ? 0 : centerx - width / 2;
double newY = centery - height / 2 < 0 ? 0 : centery - height / 2;
double newWidth = newX + width > 1 ? 1 - newX : width;
double newHeight = newY + height > 1 ? 1 - newY : height;
return new double[] {newX, newY, newWidth, newHeight};
}
```
Let's try to extract one block out:
```
List<DetectedObjects.DetectedObject> boxes = detectedObj.items();
var sample = getSubImage(img, boxes.get(5).getBoundingBox());
sample.getWrappedImage();
```
## Word Direction model
This model is exported from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.0/doc/doc_en/inference_en.md#convert-angle-classification-model-to-inference-model) that can help to identify if the image is required to rotate. The following code will load this model and create a rotateClassifier.
```
var criteria2 = Criteria.builder()
.optEngine("PaddlePaddle")
.setTypes(Image.class, Classifications.class)
.optModelUrls("https://resources.djl.ai/test-models/paddleOCR/mobile/cls.zip")
.optTranslator(new PpWordRotateTranslator())
.build();
var rotateModel = criteria2.loadModel();
var rotateClassifier = rotateModel.newPredictor();
```
## Word Recgonition model
The word recognition model is exported from [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.0/doc/doc_en/inference_en.md#convert-recognition-model-to-inference-model) that can recognize the text on the image. Let's load this model as well.
```
var criteria3 = Criteria.builder()
.optEngine("PaddlePaddle")
.setTypes(Image.class, String.class)
.optModelUrls("https://resources.djl.ai/test-models/paddleOCR/mobile/rec_crnn.zip")
.optTranslator(new PpWordRecognitionTranslator())
.build();
var recognitionModel = criteria3.loadModel();
var recognizer = recognitionModel.newPredictor();
```
Then we can try to play with these two models on the previous cropped image:
```
System.out.println(rotateClassifier.predict(sample));
recognizer.predict(sample);
```
Finally, let's run these models on the whole image and see the outcome. DJL offers a rich image toolkit that allows you to draw the text on image and display them.
```
Image rotateImg(Image image) {
try (NDManager manager = NDManager.newBaseManager()) {
NDArray rotated = NDImageUtils.rotate90(image.toNDArray(manager), 1);
return ImageFactory.getInstance().fromNDArray(rotated);
}
}
List<String> names = new ArrayList<>();
List<Double> prob = new ArrayList<>();
List<BoundingBox> rect = new ArrayList<>();
for (int i = 0; i < boxes.size(); i++) {
Image subImg = getSubImage(img, boxes.get(i).getBoundingBox());
if (subImg.getHeight() * 1.0 / subImg.getWidth() > 1.5) {
subImg = rotateImg(subImg);
}
Classifications.Classification result = rotateClassifier.predict(subImg).best();
if ("Rotate".equals(result.getClassName()) && result.getProbability() > 0.8) {
subImg = rotateImg(subImg);
}
String name = recognizer.predict(subImg);
names.add(name);
prob.add(-1.0);
rect.add(boxes.get(i).getBoundingBox());
}
newImage.drawBoundingBoxes(new DetectedObjects(names, prob, rect));
newImage.getWrappedImage();
```
|
github_jupyter
|
# FÃsica de partÃculas ... com R e tidyverse
Esse tutorial utiliza os dados abertos do experimento CMS do LHC [CMS Open Data](http://opendata.cern.ch/about/cms) DisponÃveis no site [CERN Open Data portal](http://opendata.cern.ch).
Para rodar esse tutorial offline, vide o arquivo [README](https://github.com/cms-opendata-education/cms-rmaterial-multiple-languages/blob/master/README.md), con instruções em inglés. Eu estou rodando o notebook na minha instalação local de R.
Também é possÃvel copiar as linhas de código e colar na consola de comandos do RStudio, ou num script e logo rodá-lo.
**Créditos:**
* Adaptado do original de [Edith Villegas Garcia](https://github.com/edithvillegas), [Andrew John Lowe](https://github.com/andrewjohnlowe) e [Achintya Rao](https://github.com/RaoOfPhysics).
* Traduzido ao português e adicionado o ajuste por [Clemencia Mora Herrera](https://github.com/clemencia).
---
## Os dados:
Este tutorial, introduce análise de dados com R usando dados divulgados ao público no portal **CMS Open Data**.
A origem desses dados colisões de prótons do LHC no ano 2011 (Energia do centro-de-massa de 7 TeV).
Estes dados contém medições de partÃculas do estado final, dois ***múons*** (uma versão um pouco mais pesada do elétron, comúm em raios cósmicos).
A primeira imagem mostra um desenho esquemático do LHC e seus 4 principais experimentos.
<figure>
<img src="https://github.com/cms-opendata-education/zboson-exercise/blob/master/images/LHC.png?raw=true" alt="image missing" style="height: 350px" />
<figcaption> Imagem 1: O LHC e seus 4 principais experimentos. ©
<a href="https://cds.cern.ch/record/1708847">CERN</a>
</figcaption>
</figure>
No LHC prótons são acelerados a altÃssimas velocidades e feitos colidir em pontos determinados (os 4 da figura acima), onde cada experimento com seus detectores registra e salva a informação dos produtos da colisão. A energia da colisão pode ser convertida em massa de novas partÃculas ($E=mc^2$) que podem decair em outras mais leves, deixando sinais nos instrumentos de medição em cada detector. Os sinais são traduzidos em momentum ($p=mv$), carga da partÃcula, energia e a sua direção de saida do ponto de interação.
O seguinte é um vÃdeo que mostra como acontecem as colisões e medições no acelerador LHC.
```
library(IRdisplay)
display_html('<iframe width="560" height="315" src="https://www.youtube.com/embed/pQhbhpU9Wrg" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
```
Se combinarmos a informação da energia e momentum dos dois múons para cada observação (_evento_), podemos observar que em certos valores de ***massa invariante*** (a energia de uma partÃcula de massa $m$ em reposo é $E=m c^2$ --> relatividade restrita isto é sempre válido no sistema de referência dela própia, então essa massa própria é constante para sistemas de referência diferentes) a frequência de observações é maior: isto quer dizer que existe uma partÃcula subatómica que decaiu em um par de múons e chamamos isso de uma "ressonância". Podemos inferir a presença dessas partÃculas indiretamente observando seus produtos de decaimento, os múons, e a sua frequência.
<figure>
<img src="http://github.com/cms-opendata-education/zboson-exercise/blob/master/images/eventdisplay.png?raw=true" alt="image missing" style="height: 350px" />
<figcaption> Imagem 2: Visualização da detecção de dois múons em uma colisão no CMS. </figcaption>
</figure>
<figure>
<img src="http://github.com/cms-opendata-education/zboson-exercise/blob/master/images/CMS.jpg?raw=true" alt="image missing" style="height: 350px" />
<figcaption> Imagem 3: Estrutura do experimento CMS, aberto. ©
<a href="https://cds.cern.ch/record/1433717">CERN</a>
</figcaption>
</figure>
<figure>
<img src="http://github.com/cms-opendata-education/zboson-exercise/blob/master/images/CMS2.gif?raw=true" alt="image missing" style="height: 350px" />
<figcaption>Imagem 4: Seção transversal do CMS, e como as partÃculas são detectadas nele. ©
<a href=\"https://cms-docdb.cern.ch/cgi-bin/PublicDocDB/ShowDocument?docid=4172\">CERN</a>
</figcaption>
</figure>
Nesse tutorial podemos criar um gráfico de frequências onde é possÃvel perceber os picos correspondentes a algumas dessas partÃculas que tem preferência pelo decaimento em dois múons.
## Breve introdução a R
R é uma linguagem de programação usada amplamente em estatÃstica e ciência de dados.
_"R é a lÃngua franca da estatÃstica"_ (W. Zeviani, UFPR)
[ http://leg.ufpr.br/~walmes/cursoR/data-vis/slides/01-tidyverse.pdf ]
### Os tipos de dados em R
Os tipos básicos de dados em R são:
- Logical -- lógicos ou booleanos: TRUE ou FALSE)
- Numeric -- números em geral, números reais
- Integer -- inteiros
- Complex -- complexos
- Character -- carateres ou sequências deles: letras, números como carater, sÃmbolos e frases
Em geral, sem ter que especificar, R assigna automátiamente um tipo às variáveis declaradas.
Qualquer número é tipo ```numeric```, mas para especificar ```integer``` temos que adicionar um aletra "L" no final do valor.
A linha abaixo declara a variável ```a``` com valor $5$ de tipo inteiro.
```
a <- 5L
```
Para declarar variáveis complexas a sintaxe é a seguinte:
```
b <- 5 + 3i
d <- 8 + 0i
```
As variáveis lógicas podem tomar o valor ```TRUE``` ou ```FALSE```, mas també pode ser assignado o valor resultante de uma expressão condicional, p.ex.:
```
c <- 3 > 5
```
As variáveis de carateres podem ser letras, frases ou outros carateres incluindo números entre aspas.
```
cr <- "3!"
```
Para saber o valor de cada variável eu simplesmente chamo o nome:
```
a
b
c
d
cr
```
#### Vetores
à possÃvel agrupar valores em variáveis vetoriais dessa forma:
```
a <- c(2, 3, 5)
```
Os vetores podem ser de qualquer tipo. Também podemos aplicar condições aos vetores para criar um vetor lógico:
```
a <- c(2, 5, 8, 3, 9)
b <- a > 3
```
O vetor ```b``` é o resultado da avaliação da condição ```x>3``` para cada elemento ```x``` do vetor ```a```.
```
b
```
Para acessar algum elemento do vetor, podemos chamar o nome da variável vetor com o Ãndice do elemento desejado. O contador começa de 1 (outras linguagens utilizam 0).
Entáo o primeiro elemento de ```a``` será acessado assim:
```
a[1]
```
Também é possÃvel acessar os elementos que satisfazem uma condição. A linha seguinte entrega o subconjunto (sub-vetor) dos elementos de ```a``` que tem valor maior que $3$.
```
c<-a[a>3]
c
```
#### Matrices
Em R podemos criar uma matriz a partir de vetores. As matrices são estruturas de dados em 2 dimensões.
Podemos criar a matriz especificando os valores nela, o numero de linhas e colunas e se o preenchimento sera por filas ou por colunas.
Neste exemplo começamos por um vetor do 1 ao 9:
```
a <- c(1:9)
a
```
Logo declaramos ```A``` uma matriz de 3x3 componentes, preenchidas por linha, com os 9 elementos de ```a```.
```
A <- matrix(a, nrow=3, ncol=3, byrow=TRUE)
A
```
Para accessar os elementos da matriz, usamos colchetes com o numero de linha e coluna. Por exemplo para acessar o elemento na segunda linha, terceira coluna de ```A``` fazemos:
```
A[2,3]
```
Podemos acessar uma linha completa se especificamos só o primeiro número e deixamos em branco o Ãndice das colunas, e viceversa. Por exemplo a chamada ```A[2,]``` retorna os valores da segunda linha de ```A```.
```
A[2,]
```
As matrices podem ser acessadas com condições, como foi no caso dos vetores.
```
# Criar um vetor de valores 1 a 25
a <- c(1:25)
# Criar a matriz a partir desse vetor com 5 linhas e 5 colunas, preenchendo linha por linha.
A <- matrix(a, nrow=5, ncol=5, byrow=TRUE)
# Acessar os elementos de A que sejam maiores que 12
# ao colocar a condição "A>12" nos colchetes
# a variável nova é um vetor.
C<-A[A>12]
print(C)
length(C)
```
#### Arrays (Arranjos)
Arrays são similares às matrices, mas podem ter mais de duas dimensões.
Podem ser criadas, como as matrices, a partir de um vetor e especificando as dimensões escolhidas.
```
# Criar um vetor com valores 1 a 27
a <- c(1:27)
# Criar um array a partir do vetor a
# que contem 3 matrices de 3 linhas e 3 colunas .
A <- array(a, dim=c(3,3,3))
# Imprimir o array.
print(A)
```
#### Listas
As listas são como vetores, mas podem conter diferentes tipos de dados concomitantemente, e também vetores, entre seus elementos.
```
l <- list(c(1,2,3),'a', 1, 1+5i)
l
```
#### Data Frames
Data frames são como listas de vetores com o mesmo comprimento. São usados para armazenar dados em forma de tabela.
Para criar um data frame podemos fazer, por exemplo:
```
data <- data.frame(
Nome = c("Thereza", "Diana"),
Genero = c('F','F'),
Idade = c(20, 23)
)
data
```
(Para mim, aqui reside a beleza do R ... essa simplicidade!)
Para acessar uma coluna em particular, é simplesmente usar o ```$``` e o nome da coluna.
Por exemplo para ver os nomes:
```
data$Nome
```
Se queremos ver só uma linha (instância ou observação do seu experimento, medição) chamamos o número da linha
```
data[1,]
```
E R tem várias funções para importar arquivos (em formato texto, csv, até xls!) direto para data frames.
Como não amar?
## Explorando o CMS Open Data
Agora vamos à tarefa em mãos: analisar dados do CMS.
---
### Importar dados dos arquivos CSV
No portal do [CERN Open Data](http://opendata.cern.ch) tem vários conjuntos de dados disponÃveis. Nós vamos usar dados que já foram reduzidos ao formato CSV (comma-separated values), importá-los em R e analizar seu conteúdo.
Os dados desse tutorial vêm do seguinte registro: [http://opendata.cern.ch/record/545](http://opendata.cern.ch/record/545)
Para importar usamos o seguinte comando:
```
mumu <- read.csv("http://opendata.cern.ch/record/545/files/Dimuon_DoubleMu.csv")
```
O comando anterior carregou os dados do arquivo `Dimuon_DoubleMu.csv` numa variável chamada `mumu`, que é um data frame.
Para olhar o conteúdo das primeiras 6 linhas podemos chamar a função `head` e para saber o número de observações usamos a função `nrow`
```
nrow(mumu)
head(mumu)
```
O nosso conjunto de dados tem 100 mil linhas (cada linha é um evento de colisão) e 21 colunas (cada coluna é uma variável da descrição ou das medições dos produtos finais do evento).
Nesse ponto já podemos chamar o *tidyverse*. Para ter uma visualização mais "agradável" dos dados podemos mudar de data frame para *tibble*.
```
require(tidyverse)
tbmumu<- mumu %>% as_tibble()
```
O tidyverse inclui o pacote `magrittr`, que introduz o operador *pipe* (como tubulação, mas também "ce ci n'est pas une pipe") com sÃmbolo `%>%` o que entrega o objeto da esquerda como argumento à função da direita. E com esses encanamentos é possÃvel fazer várias operações sucessivas de forma concisa.
Então o código acima aplica a função `as_tibble` ao data frame `mumu` e o resultado (que é um *tibble*) é armazenado na variável `tbmumu`.
Logo ao imprimir as primeiras 6 linhas da nossa tabela tipo *tibble* temos uma visualização que:
* cabe na tela
* dá informações sobre aquilo que não coube
```
print(head(tbmumu))
```
Do output acima podemos ver as primeiras 12 colunas, com seus tipos, com cores para valores negativos, e temos a informação adicional de 9 variáveis não mostradas. Podemos acessar as colunas e linhas da mesma forma do data frame.
```
# imprime os primeiros 6 elementos da coluna chamada E1
# o que é retornado com esse operador é um vetor
print(head(tbmumu$E1))
# imprime a primeira linha de dados
# retora um novo tibble que é sub-conjunto do original
print(tbmumu[1,])
# Este outro exemplo retorna o subconjunto das 10 primeiras linhas
print(tbmumu[1:10,])
```
### Calcular a Massa invariante
Nossa tabela tem observações de colisões com 2 *múons* no estado final.
Como vimos na tabela, temos valores para energia (E), o momentum linear (px, py, pz), a *pseudo-rapidez* (eta ou η, que tem relação com o ángulo polar) e o ángulo azimutal (phi ou Ï).
Podemos calcular a massa invariante, ou seja a energia equivalente em repouso que produziu esses múons, com a seguinte equação:
$M = \sqrt{(\sum{E})^2 - ||\sum{p}||^2}$
onde $M$ é a massa invariante, $\sum{E}$ é o total da soma das energias (cinética relativÃstica) das partÃculas finais, e $\sum{p}$ é o total da soma dos momentos lineares.
No nosso código, vamos calcular a massa invariante usando os valores de `px`, `py` e `pz` e a energia dos dois múons. Primeiramente precisamos calcular a soma vetorial do momentum.
A função `mutate` do **tidyverse** faz o cálculo especificado para cada observação e _adiciona novas variáveis_, nesse casso `ptotal`, `E` e `mass`
```
tbmumu<-tbmumu%>%mutate(ptotal = sqrt((px1+px2)^2 + (py1+py2)^2 + (pz1+pz2)^2),
E = E1+E2,
mass = sqrt(E^2 - ptotal^2))
tbmumu%>% select(Run, Event, ptotal,E, mass)%>%head()
```
à possÃvel também definir uma função para fazer nosso cálculo:
```
myfunctionname = function(arg1, arg2...)
{
statements
return(a)
}
```
Por exemplo podemos definir uma função para a magnitude soma vetorial de dois vetores de 3 componentes, e outra função que entrega o resultado para a massa invariante a partir de `ptotal` e `E`
```
sumvecmag = function(x1,x2,y1,y2,z1,z2){
x = x1+x2
y = y1+y2
z = z1+z2
tot = sqrt(x^2+y^2+z^2)
return(tot)
}
invmass = function(ptot, E) {
m = sqrt(E^2 - ptot^2)
return(m)
}
```
Agora podemos adicionar uma nova coluna calculada chamando as funções definidas :
```
tbmumu<- tbmumu %>% mutate(
ptotal_f = sumvecmag( px1, px2, py1, py2 , pz1, pz2),
E = E1 + E2,
mass_f=invmass(ptotal_f,E))
# Visualizar as primeiras 6 linhas do tibble, selecionando só as colunas do meu interesse
print(head(tbmumu%>% select(ptotal,ptotal_f, E, mass, mass_f)))
```
### Fazer um Histograma
Em fÃsica de partÃculas trabalhamos com distribuições de frequências, quer dizer histogramas.
Nesse caso, quero olhar só uma porção dos dados, onde a variável massa esta entre 1.1 e 5 (GeV). Para isto posso utilizar a função `filter` do tidyverse, com operadores `%>%`
```
tbsel <- tbmumu%>% filter(mass>1.1 & mass < 5)
```
A visualização do gráfico do histograma pode ser feita com a função própria básica do R
```
Sys.setlocale(locale = "en_US.UTF-8") #Para ter carateres de acentos
library(repr)
options(repr.plot.width=6,repr.plot.height=4 ) #Para ter gráficos de tamanho que caiba na tela
hist(tbsel$mass, breaks = 200, xlim=c(1,5),
main="Histograma da Massa Invariante",
xlab = "Massa (GeV)",ylab="Frequência ",
lty="blank",
col="purple")
```
Observamos um pico maior perto do valor de $3.1$ GeV e outro pequeno perto de $3.7$ GeV.
Esses valores correspondem à s massas de duas partÃculas que decaem em dois múons ou mais especÃficamente, um múon e um anti-múon (múon carregado positivamente).
Olhando na base de dados do [Particle Data Group](http://pdg.lbl.gov/), podemos ver que essas partÃculas são os **mésons** (partÃculas **hadrÃŽnicas** compostas de um quark e um anti-quark) ***J/Ï(1S)*** e ***Ï(2S)***, respectivamente.
### Graficando com o tidyverse
Podemos condensar o processo de importar, manipular as variáveis e graficar o histograma com um código muito enxuto:
```
read_csv("http://opendata.cern.ch/record/545/files/Dimuon_DoubleMu.csv",
col_types = cols()) %>%
mutate(ptotal = sqrt((px1+px2)^2 + (py1+py2)^2 + (pz1+pz2)^2),
E = E1+E2,
mass = sqrt(E^2 - ptotal^2)) %>%
filter(mass >0.1 & mass<120) %>%
ggplot(aes(mass)) +
geom_histogram(bins = 250, fill = "purple", alpha = 0.5) +
xlab("Massa (GeV)") +
ylab("Frequência") +
scale_x_continuous(trans = 'log10') +
scale_y_continuous(trans = 'log10') +
ggtitle("Espectro de di-múons no CMS") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
```
Agora está ficando bom!
A cadeia de comandos pode ser lida como uma frase, com uma sucessão de **ações** sobre os dados:
"**Leia** o arquivo,
logo **mude** o conteúdo criando as novas variáveis `ptotal`,`E` e `mass`,
logo **filtre** para ver só as observações no intervalo desejado,
logo **grafique** com os parâmetros apropriados"
O pacote de gráficos do tidyverse é ``ggplot2``, onde as diferentes opções do gráfico estão sequenciadas com sÃmbolo `+`. Neste caso eu escolhi usar uma escala `log-log` que permite ter uma visualização abrangente de várias ordens de grandeza, e assim observar vários picos de ressonância.
As opções do gráfico são:
- `ggplot()` a função central do pacote `ggplot2`, que trabalha com o princÃpio de *camadas*:
1. `aes(mass)` quer dizer que vamos usar a variável `mass`
1. `geom_histogram()` tomar a variável e fazer o histograma
1. `xlab()` e `ylab()` os nomes dos eixos
1. `ggtitle()` tÃtulo do gráfico
1. `theme_bw()` tema preto-e-branco,
1. `theme()` permite manipular alguns elementos especÃficos do gráfico
## Ajustando uma função ao pico do $J/\psi$
Voltando ao *tibble* onde já habiamos selecionado o intervalo que apresenta o pico do méson $J/\psi$, podemos chamar a função hist sem graficar para ter só os resultados das frequências nos intervalos.
```
a<-hist(tbsel$mass,breaks=200,plot=FALSE)
mydf<- data.frame(x=a$mids, nobs=a$counts)
print(head(mydf))
library(latex2exp)
mydf %>%
ggplot(aes(x,nobs, ymin=nobs-sqrt(nobs),ymax=nobs+sqrt(nobs))) +
geom_point() +
geom_errorbar() +
xlab("Massa (GeV)")+
ylab("Frequência")+
ggtitle(TeX("Histograma do pico dos mésons J/$\\psi$ e $\\psi$"))+
theme_bw() + theme(plot.title = element_text(hjust = 0.5))
```
### A função que descreve os dados
Uma possÃvel função para descrever esses dois picos seria a soma de uma gaussiana com média perto de $3.1$, outra com média perto de $3.7$ e uma reta decrescente como "base" (nosso *background*).
```
my2gausspluslin <- function(x, mean1, sigma1, norm1,mean2,sigma2,norm2,a,b) {
f <- norm1 * exp(-1*((x-mean1)/sigma1)^2)+ norm2 * exp(-1*((x-mean2)/sigma2)^2) + a*x +b
return(f)
}
```
Vou chamar a função `nls` que faz a optimização dos parámetros da função com mÃnimos quadrados não lineares [documentação](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/nls.html).
```
res <- nls( nobs ~ my2gausspluslin(x,mean1,sigma1,norm1,mean2,sigma2,norm2,a,b),
data = mydf,
start=list(mean1=3.1, sigma1=0.1, norm1=3000,mean2=3.7,sigma2=0.05,norm2=30,a=-10,b=100))
summary(res)
```
Aqui o resultado foi salvo na variável `res`, e posso aplicar esse resultado com a função `predict`. Vamos adicionar uma coluna do número calculado a partir da função ao data frame e salvar com um novo nome de `nexp` por *expected*, número esperado segundo um modelo de 2 gaussianas mais um fundo linear.
```
newdf<- mydf%>% mutate(nexp = predict(res,mydf))
print(head(newdf))
```
Graficamos a predição
```
newdf%>%
ggplot(aes(x,nexp))+
geom_path(color="purple")+
xlab("Massa (GeV)")+
ylab("frequência")+
ggtitle("Predição do ajuste da função gaussiana + reta decrescente")+
theme_bw() + theme(plot.title = element_text(hjust = 0.5))
```
### Resultado
Finalmente graficamos os pontos das frequências observadas (com erros de distribuição de *Poisson*, $\sigma_n =\sqrt{n}$ ) junto com a linha da predição.
```
ggplot(newdf) +
geom_path(aes(x,nexp),color="purple")+
geom_point(aes(x,nobs))+
geom_errorbar(aes(x,nobs, ymin=nobs-sqrt(nobs),ymax=nobs+sqrt(nobs)))+
xlab("Massa (GeV)")+
ylab("frequência")+
ggtitle("Resultado dos dados com a função do ajuste")+
theme_bw() +theme(plot.title = element_text(hjust = 0.5))
```
## Motivação!
Agora estou entusiasmada, vamos dar uma olhada no pico de maior massa nesse espectro?
à o pico do bóson Z, que é análogo a um fóton, só que com massa alta (para uma partÃcula subatómica).
```
tbZboson<-tbmumu %>% filter(mass>70 & mass <110)
tbZboson %>%
ggplot(aes(mass)) +
geom_histogram(bins = 80, fill = "purple", alpha = 0.5) +
xlab("Massa (GeV)") +
ylab("Frequência") +
ggtitle("Pico do bóson Z") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
tbZboson %>% filter(abs(eta1)<2.4 & abs(eta2)<2.4, pt1>20 & pt2>20, type1=="G" & type2=="G") %>%
ggplot(aes(mass)) +
geom_histogram(bins = 80, fill = "purple", alpha = 0.5) +
xlab("Massa (GeV)") +
ylab("Frequência") +
ggtitle("Pico do bóson Z") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
zfilt<- tbZboson %>% filter(abs(eta1)<2.4 & abs(eta2)<2.4, pt1>20 & pt2>20, type1=="G" & type2=="G")
zh<- hist(zfilt$mass,breaks=80,plot=FALSE)
zdf<-data.frame(x=zh$mids,n=zh$counts)
print(head(zdf))
breitwpluslin <- function(x,M,gamma,N,a,b){
b<- a*x +b
s<- N*( (2*sqrt(2)*M*gamma*sqrt(M**2*(M**2+gamma**2)))/(pi*sqrt(M**2+sqrt(M**2*(M**2+gamma**2)))) )/((x**2-M**2)**2+M**2*gamma**2)
return(b+s)
}
library(minpack.lm)
resz <-nlsLM( n~ breitwpluslin(x,m,g,norm,a,b),
data = zdf,
start=list(m=90, g=3, norm =100,a=-10,b=100))
summary(resz)
newz<- zdf %>% mutate(nexp=predict(resz,zdf))
print(as_tibble(newz))
newz%>%
ggplot(aes(x,nexp))+
geom_path(color="purple")+
geom_point(aes(x,n))+
geom_errorbar(aes(x,n, ymin=n-sqrt(n),ymax=n+sqrt(n)))+
xlab("Massa (GeV)")+
ylab("frequência")+
ggtitle("Predição do ajuste da função Breit-Wigner + reta decrescente")+
theme_bw() + theme(plot.title = element_text(hjust = 0.5))
```
Ahhh amei!
Corações roxinhos 4 ever ððð
|
github_jupyter
|
æç« æ¥èª äœè
ïŒåå® æŽå€æºåšåŠä¹ ç¬è®°è®¿é®[è¿é](https://github.com/zlotus/notes-LSJU-machine-learning)
# 第åäºè®²ïŒPCAçå¥åŒåŒåè§£ãç¬ç«æååæ
å顟äžäžäžäžè®²çå
容ââPCAç®æ³ïŒäž»èŠæäžäžªæ¥éª€ïŒ
1. å°æ°æ®æ£è§åäžºé¶ææä»¥ååäœåæ¹å·®ïŒ
2. 计ç®åæ¹å·®ç©éµ$\displaystyle\varSigma=\frac{1}{m}x^{(i)}\left(x^{(i)}\right)^T$ïŒ
3. æŸå°$\varSigma$çå$k$䞪ç¹åŸåéã
åšäžäžè®²çæåïŒæä»¬è¿ä»ç»äºPCAåšé¢éšè¯å«äžçåºçšãè¯æ³äžäžïŒåšé¢éšè¯å«äž$x^{(i)}\in\mathbb R^{10000}$ïŒé£ä¹$\varSigma\in\mathbb R^{100000000}$ïŒèèŠæ±è¿ç§äº¿çº§å«çç©éµçç¹åŸåéå¹¶äžå®¹æïŒè¿äžªé®é¢æä»¬å
æŸäžæŸã
æ¥çåŠäžäžªäŸåïŒåšåé¢ïŒ[第äºè®²](https://github.com/zlotus/notes-LSJU-machine-learning/blob/master/chapter05.ipynb)ïŒæä»¬è®šè®ºè¿å
³äºååŸé®ä»¶åç±»çé®é¢ïŒæä»¬äŒæé äžäžªè¯æ±è¡šåéïŒå
¶æ¯äžªåéæå¯¹åºçåè¯åŠæåšé®ä»¶äžåºç°å眮䞺$1$ïŒåæ£å眮䞺$0$ãåœæä»¬å¯¹è¿ç§æ°æ®åºçšPCAæ¶ïŒäº§ççç®æ³æ¢äºäžªååïŒå«å**æœåšè¯ä¹çŽ¢åŒïŒLSI: latent semantic analysisïŒ**ãäžè¿åšLSIäžïŒæä»¬éåžžè·³è¿é¢å€çé¶æ®µïŒå 䞺æ£è§åæ°æ®äœ¿å®ä»¬å
·æçžåçæ¹å·®å¯èœäŒå€§å¹
å¢å äžåå åè¯çæéãæ¯åŠè¯Žæä»¬æ¿å°äºäžäžªææ¡£çå
容$x^{(i)}$ïŒç°åšåžæç¥éè¿ä»œææ¡£äžæä»¬å·²æææ¡£äžçåªäžªçžäŒŒåºŠæé«ïŒä¹å°±æ¯è¡¡é䞀䞪é«ç»ŽèŸå
¥åéæè¡šç€ºçææ¡£ççžäŒŒæ§$\mathrm{sim}\left(x^{(i)},x^{(j)}\right), x\in\mathbb R^{50000}$ãæä»¬éåžžçåæ³æ¯è¡¡éè¿äž€äžªåéçŽæ¥ç倹è§ïŒåп倹è§è¶å°å讀䞺ä»ä»¬è¶çžäŒŒïŒäºæ¯$\displaystyle\mathrm{sim}\left(x^{(i)},x^{(j)}\right)=\cos\theta=\frac{\left(x^{(i)}\right)^Tx^{(j)}}{\left\lVert x^{(i)}\right\rVert\left\lVert x^{(j)}\right\rVert}$ã忥çäžé¢è¿äžªååïŒ$x^{(i)}\left(x^{(j)}\right)^T=\sum_kx_k^{(i)}x_k^{(j)}=\sum_k1\{\textrm{ææ¡£iåjéœå
å«è¯è¯k}\}$ïŒåŠæææ¡£äžæ²¡æè¯è¯éå€ïŒå该åŒç»æäžº$0$ãäœæ¯ïŒåŠæææ¡£$j$å
å«åè¯âstudyâèææ¡£$j$å
å«åè¯âlearnâïŒé£ä¹åŠæäžç¯ä»ç»âstudy strategyâçæç« åäžç¯ä»ç»âmethod of learningâçæç« åšè¿ç§ç®æ³äžå°±æ¯æ å
³çïŒæä»¬ç°åšæ³èŠäœ¿å®ä»¬çžå
³ãäºæ¯äžåŒå§ïŒâlearnâåéäžâstudyâå鿝çžäºæ£äº€çïŒå®ä»¬çå
积䞺é¶ïŒæä»¬åšè¿äž€äžªåéä¹éŽåæŸäžäžªåé$u$ïŒç¶åå°âlearnâäžâstudyâæåœ±åš$u$äžïŒæ€æ¶äž€äžªåéçæåœ±ç¹åš$u$äžå°çžè·åŸè¿ïŒé£ä¹å®ä»¬åå
积æ¶å°äŒåŸå°äžäžªæ£æ°ïŒè¡šç€ºå®ä»¬çžå
³ãäºæ¯ïŒåŠæç®æ³åéå°äžç¯å
³äºâæ¿æ²»âçæç« ååŠäžç¯å
å«åŸå€æ¿æ²»å®¶ååçæç« æ¶ïŒå®äŒå€æè¿äž€ç¯æç« æ¯çžå
³çã
## å¥åŒåŒåè§£ïŒSVD: Singular Value DecompositionïŒ
æä»¬åŒå
¥å¥åŒåŒåè§£ïŒå¯ä»¥åè线æ§ä»£æ°ç¬è®°äžç[å¥åŒåŒåè§£](http://nbviewer.jupyter.org/github/zlotus/notes-linear-algebra/blob/master/chapter30.ipynb)ïŒæ¥è§£å³äžåŒå§éå°ç倧ç©éµæ±ç¹åŸåéçé®é¢ãæ¯åŠåšæœåšè¯ä¹çŽ¢åŒäžçèŸå
¥ïŒæ¯äžäžª$50000$绎çåéïŒé£ä¹å
¶å¯¹åºç$\varSigma\in\mathbb R^{50000\times50000}$ïŒè¿ç§è§æš¡çç©éµå€ªå€§äºïŒæä»¬éèŠäœ¿çšåŠäžç§æ¹æ³å®ç°PCAã
对ç©éµ$A\in\mathbb R^{m\times n}$ïŒæ»æ$A=UDV^T,U\in\mathbb R^{m\times m},D\in\mathbb R^{m\times n},V^T\in\mathbb R^{n\times n}$ïŒå
¶äž$D=\begin{bmatrix}\sigma_1&&&\\&\sigma_2&&\\&&\ddots&\\&&&\sigma_n\end{bmatrix}$æ¯äžäžªå¯¹è§ç©éµïŒè$\sigma_i$称䞺ç©éµçå¥åŒåŒãåè§£ä¹åçç©éµäžº$\begin{bmatrix}A\\m\times n\end{bmatrix}=\begin{bmatrix}U\\m\times m\end{bmatrix}\begin{bmatrix}D\\m\times n\end{bmatrix}\begin{bmatrix}V^T\\n\times n\end{bmatrix}$ã
ç°åšæ¥è§å¯åæ¹å·®ç©éµçå®ä¹åŒ$\displaystyle\varSigma=\sum_{i=1}^mx^{(i)}\left(x^{(i)}\right)^T$ïŒåšåé¢çç« èäžïŒ[第äºè®²](chapter02.ipynb)ïŒæä»¬ä»ç»è¿äžç§å«åâ讟计ç©éµâçæé æ¹åŒïŒä¹å°±æ¯å°æ¯äžäžªæ ·æ¬åéäœäžºç©éµ$X$çäžè¡æŒåäžäžªç©éµïŒ$X=\begin{bmatrix}â\left(x^{(1)}\right)^Tâ\\â\left(x^{(2)}\right)^Tâ\\\vdots\\â\left(x^{(m)}\right)^Tâ\end{bmatrix}$ïŒåæä»¬å¯ä»¥å°åæ¹å·®ç©éµçšè®Ÿè®¡ç©éµæ¥è¡šç€ºïŒ$\varSigma=\begin{bmatrix}\mid&\mid&&\mid\\x^{(1)}&x^{(2)}&\cdots&x^{(m)}\\\mid&\mid&&\mid\end{bmatrix}\begin{bmatrix}â\left(x^{(1)}\right)^Tâ\\â\left(x^{(2)}\right)^Tâ\\\vdots\\â\left(x^{(m)}\right)^Tâ\end{bmatrix}$ã
æåå°±æ¯è®¡ç®$\varSigma$çå$k$䞪ç¹åŸåéäºïŒæä»¬éæ©å¯¹$X$åå¥åŒåŒåè§£$X=UDV^T$ïŒè$X^TX=VDU^TUD^TV^T=VD^2V^T=\varSigma$ïŒäºæ¯åš$D$äžä»å€§å°å°æåå¥åŒåŒå¹¶åš$V$äžåå$k$䞪å¥åŒåŒå¯¹åºçç¹åŸåéå³å¯ã
容æçåº$X\in\mathbb R^{m\times50000}$ïŒåè¿ç§è§æš¡ç©éµçå¥åŒåŒåè§£äŒæ¯çŽæ¥è®¡ç®$\varSigma\in\mathbb R^{50000\times50000}$çç¹åŸåéå¿«åŸå€ãè¿å°±æ¯äœ¿çšSVDå®ç°PCAç®æ³ç计ç®è¿çšã
ïŒäžè¿ïŒåŒåŸæ³šæçæ¯ïŒåšäžåç计ç®èœ¯ä»¶ïŒçè³æ¯åšåäžç§èœ¯ä»¶çäžåçæ¬äžïŒå¯¹SVDç计ç®å¯èœéµåŸªæç§é»è®€ç绎æ°çºŠå®ïŒå 䞺SVDç»åžžäŒåŸå°åžŠæåŸå€é¶å
çŽ ç$U$å$D$ïŒè蜯件å¯èœäŒæç
§æç§çºŠå®èåŒè¿äºé¶å
çŽ ãæä»¥ïŒåšäœ¿çšSVDæ¶ïŒéèŠæ³šæè¿æ ·ç绎æ°çºŠå®ãïŒ
## æ çç£åŠä¹ åç®æ³ç对æ¯
æä»¬åšåé¢ä»ç»å ååææš¡åæ¶æåºïŒå®æ¯å¯¹æ¯äžªå å$z^{(i)}$è¿è¡é«æ¯å»ºæš¡ïŒæ¯äžç§å¯¹æŠçå¯åºŠè¿è¡äŒ°è®¡çç®æ³ïŒå®è¯åŸå¯¹è®ç»æ ·æ¬$X$çæŠçå¯åºŠè¿è¡å»ºæš¡ïŒèåšå
¶åä»ç»çPCAåææäžåïŒå®å¹¶äžæ¯äžäžªæŠçç®æ³ïŒå 䞺å®å¹¶æ²¡æäœ¿çšä»»äœæŠçååžæåè®ç»éïŒèæ¯çŽæ¥å»å¯»æŸå空éŽãä»è¿éæä»¬å¯ä»¥å€§èŽççå°ïŒåŠäœåšè§£å³é®é¢æ¶åšå ååæäžPCAéŽååèïŒåŠæç®æ å°±æ¯éäœæ°æ®ç»Žæ°ãå¯»æŸæ°æ®æåšçå空éŽïŒæä»¬å°±æŽåŸåäºäœ¿çšPCAïŒèå ååæäŒåè®Ÿæ°æ®æ¬æ¥å°±åšæå空éŽå
ïŒåŠææç»ŽåºŠåŸé«çæ°æ®$X$ïŒèæåæ³å¯¹$X$建暡ïŒé£ä¹å°±åºè¯¥äœ¿çšå ååæç®æ³ïŒæ¯åŠååŒåžžæ£æµïŒæä»¬å¯ä»¥å»ºç«å
³äº$P(X)$çæš¡åïŒåŠææäžäžªäœæŠçäºä»¶ïŒå°±å¯ä»¥å°è¿äžªäºä»¶åè§£åšåå åååžäžïŒè¿è䌰计å
¶åŒåžžæ
åµïŒãè¿äž€ç§ç®æ³çå
±åç¹å°±æ¯ïŒå®ä»¬éœäŒåè®Ÿæ°æ®äœäºæé è¿æäžªäœç»Žçå空éŽã
忥çå顟äžåŒå§ä»ç»çäž€ç§æ çç£åŠä¹ æ¹æ³ïŒæ··å髿¯æš¡å以å$k$-meansç®æ³ãè¿äž€ç§ç®æ³çå
±åç¹æ¯ââå®ä»¬éœäŒåè®Ÿæ°æ®èéåšäœäºæå 䞪ç°å
ãäžåç¹æ¯æ··å髿¯æš¡åæ¯äžç§å¯¹æŠçå¯åºŠè¿è¡äŒ°è®¡çç®æ³ïŒè$k$-meansåäžæ¯ãæä»¥ïŒåŠææä»¬éèŠå°æ°æ®åæç°å¹¶å¯¹æ¯äžäžªç°å»ºæš¡ïŒé£ä¹æä»¬å°±åŸåäºäœ¿çšé«æ¯æ··åæš¡åïŒèåŠææä»¬åªæ³å°æ°æ®åç°ïŒå¹¶äžèŠæ±ç¡®å®æ¯äžªç°çæŠçç³»ç»ïŒåå°±æŽåŸåäºäœ¿çš$k$-meansç®æ³ã
绌åäžé¢çè§ç¹å¯ä»¥åŸå°è¡šæ ŒïŒäŸ¿äºè®°å¿ïŒ
$$
\begin{array}
{c|c|c}
&\textbf{Model }P(x)&\textbf{Not probabilistic}\\\hline
\textbf{Subspace}&\textrm{Factor Analysis}&\textrm{PCA}\\\hline
\textbf{Cluster}&\textrm{Mixtures of Gaussians}&k\textrm{-means}
\end{array}
$$
# 第åäºéšåïŒç¬ç«æååæïŒIndependent components analysisïŒ
æ¥äžæ¥ïŒæä»¬å°ä»ç»ç¬ç«æååæïŒICA: Independent components analysisïŒã类䌌äºPCAïŒç¬ç«æååæä¹äŒå¯»æŸäžç»æ°çåºïŒçšæ¥éæ°è¡šç€ºè®ç»æ ·æ¬ïŒç¶èè¿äž€äžªç®æ³çç®æ æªç¶äžåã
䞟äžäžªå®é
é®é¢äœäžºäŸåïŒåšç¬¬äžè®²ïŒæä»¬ä»ç»äºäžäžªåšéž¡å°Ÿé
äŒäžä»åæçèæ¯é³äžå犻åèšè
é³é¢çåºçšãå讟æ$n$䞪åèšè
åšäžäžªèäŒäžåæ¶è¯Žè¯ïŒèåšå±åéçåè¯çä»
ææå°è¿ç§$n$䞪åèšè
ç声é³å å åšäžèµ·çé³é¢ãäœæ¯ïŒå讟æä»¬æ$n$䞪äžåçè¯çïŒç±äºæ¯äžªè¯çå°äžªåèšè
çè·çŠ»éœäžçžåïŒåè¿äºè¯çè®°åœäžæ¥çæ¯äžå圢åŒçåèšè
声é³å å ãé£ä¹ïŒéè¿äœ¿çšè¿äºè¯ççé³é¢è®°åœïŒæä»¬èœåŠå犻åºè¿$n$䞪åèšè
åèªçé³é¢ä¿¡å·ïŒ
äžºäºæ£åŒçæè¿°è¿äžªé®é¢ïŒæä»¬åè®Ÿæ°æ®$x\in\mathbb R^n$ïŒè¿äºæ°æ®ç±$n$䞪çžäºç¬ç«çæ¥æºçæïŒèæä»¬èœå€è§æµå°çæ°æ®äžºïŒ
$$
x=As
$$
è¿éç$A$æ¯äžäžªæªç¥çæ¹éµïŒé垞被称䞺**æ··åç©éµïŒmixing matrixïŒ**ãéè¿éå€è§æµïŒæä»¬åŸå°äžäžªæ°æ®é$\left\{x^{(i)};i=1,\cdots,m\right\}$ïŒèæä»¬çç®æ æ¯è¿åé£äžç»çæâè§æµå°çæ°æ®éïŒ$x^{(i)}=As^{(i)}$ïŒâç声鳿º$s^{(i)}$ã
åšéž¡å°Ÿé
èäŒé®é¢äžïŒ$s^{(i)}$æ¯äžäžª$n$绎åéïŒ$s_j^{(i)}$衚瀺åèšè
$j$åšç¬¬$i$次é³é¢ééæ¶ååºç声é³ãè$x^{(i)}$乿¯$n$绎åéïŒ$x_j^{(i)}$衚瀺è¯ç$j$åšç¬¬$i$次é³é¢éæ ·æ¶è®°åœäžæ¥çé³é¢ã
什$W=A^{-1}$äœäžº**å犻ç©éµïŒunmixing matrixïŒ**ãæä»¬çç®æ å°±æ¯æ±åº$W$ïŒè¿è䜿çš$s^{(i)}=Wx^{(i)}$ä»è¯çæ¶éçé³é¢äžè¿ååºåç¬ç«ç鳿ºãæç
§äžèŽ¯çæ è®°æ³ïŒæä»¬é垞䜿çš$w_i^T$衚瀺$W$ç©éµç第$i$è¡ïŒåæ$W=\begin{bmatrix}âw_1^Tâ\\\vdots\\âw_n^Tâ\end{bmatrix}$ãé£ä¹ïŒå¯¹$w_i\in\mathbb R^n$ïŒæç¬¬$j$䞪åèšè
鳿ºå¯ä»¥äœ¿çš$s_j^{(i)}=w_j^Tx^{(i)}$衚瀺ã
## 1. ICAäºä¹æ§
$W=A^{-1}$èœå€ååºä»ä¹çšåºŠçè¿åïŒåŠææ²¡æå
³äºé³æºåæ··åç©éµçå
éªç»éªïŒäžéŸçåºïŒç©éµ$A$ååšåºæäºä¹æ§äœ¿åŸå®äžå¯èœåªéè¿$x^{(i)}$å°±è¿å并对åºåºæ¯äžäžª$s^{(i)}$ã
* 什$P$䞺$n$é¶çœ®æ¢ç©éµïŒä¹å°±æ¯$P$çæ¯è¡æ¯åéœåªæäžäžª$1$ïŒå
¶äœå
çŽ å䞺é¶ã䞟䞪äŸåïŒ$P=\begin{bmatrix}0&1&0\\1&0&0\\0&0&1\end{bmatrix},\ P=\begin{bmatrix}0&1\\1&0\end{bmatrix},\ P=\begin{bmatrix}1&0\\0&1\end{bmatrix}$ãå
¶äœçšæ¯ïŒå¯¹åé$z$ïŒ$Pz$äŒäº§çäžäžªå°$z$çååééæ°æååŸå°çç眮æ¢åçåé$z'$ã对äºç»å®ç$x^{(i)}$ïŒæä»¬æ æ³å蟚åº$W$å$PW$ïŒä¹å°±æ¯æ æ³ç¡®å®å犻ç©éµäžçæ¯äžè¡åé对åºåªäžäœåèšè
ïŒãäžéŸé¢æïŒé³æºä¹ååšè¿ç§çœ®æ¢äºä¹æ§ïŒäžè¿è¿ç§äºä¹æ§å¯¹äºå€§å€æ°åºçšèèšå¹¶äžæ¯éèŠé®é¢ã
* æ€å€ïŒæä»¬ä¹æ æ³ç¡®å®$w_i$ç倧å°ãæ¯åŠåœ$A$å䞺$2A$ïŒèæ¯äžª$s^{(i)}$å䞺$0.5s^{(i)}$æ¶ïŒå¯¹äºæä»¬çè§æµåŒ$x^{(i)}=2A\cdot(0.5)s^{(i)}$没æä»»äœåœ±åãæŽå¹¿æ³çïŒåŠæ$A$çæå被å äžäºçŒ©æŸå å$\alpha$ïŒèçžåºç鳿ºå被猩æŸå å$\frac{1}{\alpha}$è°æŽäºå€§å°ïŒé£ä¹æä»¬å°æ æ³ä»$x^{(i)}$è¿äžåäžæ¡ä»¶äžè¿åè¿æ¬¡çŒ©æŸè°æŽãå æ€ïŒæä»¬æ æ³è¿å鳿ºçåæå€§å°ãäžè¿ïŒå¯¹äºæä»¬å
³å¿çé®é¢ïŒå
æ¬éž¡å°Ÿé
èäŒé®é¢ïŒïŒè¿äžªäºä¹æ§ä¹å¹¶äžéèŠãç¹å«æ¯åšæ¬äŸäžïŒäœ¿çšæ£çŒ©æŸå å$\alpha$è°æŽåèšè
鳿º$s_j^{(i)}$ç倧å°åªäŒåœ±åå°åèšè
é³éç倧å°ãèäžå³äœ¿æ¹å鳿ºç笊å·ä¹æ²¡å
³ç³»ïŒ$s_j^{(i)}$å$-s_j^{(i)}$åšæ©é³åšäžå¬èµ·æ¥æ¯å®å
šäžæ ·çã绌äžïŒåŠæç®æ³åŸåºç$w_i$被æéé¶çŒ©æŸå å圱åïŒé£ä¹äœ¿çš$s_i=w_i^Tx$åŸå°ççžåºé³æºä¹äŒåå°è¿äžªçŒ©æŸå åç圱åïŒç¶èè¿ç§äºä¹æ§ä¹å¹¶äžéèŠãïŒè¿äžªICAäºä¹æ§åæ ·éçšäºåé¢ä»ç»çèMEGäžãïŒ
æä»¬äžçŠèŠé®ïŒäžé¢æå°çè¿äž€ç§æ
嵿¯ICAæ¶åçææå¯èœçäºä¹æ§åïŒåªèŠæº$s_i$äžæä»é«æ¯ååžïŒé£ä¹çæ¡å°±æ¯è¯å®çã
* æä»¬éè¿äžäžªäŸåæ¥ç髿¯ååžäžçæ°æ®äŒäº§çä»ä¹éº»çŠïŒèèåœ$n=2$æ¶ç$s\sim\mathcal N(0,I)$ïŒæ€å€ç$I$æ¯äžäžªäºé¶åäœç©éµã泚æå°æ 忣æååž$\mathcal N(0,I)$æŠçå¯åºŠççé«çº¿åŸæ¯äžç»åå¿åšåç¹çæ£åïŒå
¶æŠçå¯åºŠå
·ææèœ¬å¯¹ç§°æ§ãå讟æä»¬è§æµå°æäº$x=As$ïŒ$A$æ¯æ··åç©éµïŒå äžºæºæºæä»$\mathcal N(0,I)$ïŒåæ··ååç$x$åæ ·æä»äžäžªä»¥$0$䞺ææã以$\mathrm E\left[xx^T\right]=\mathrm E\left[Ass^TA^T\right]=AA^T$äžºåæ¹å·®ç髿¯ååžãç°åšä»€$R$äžºä»»ææ£äº€ç©éµïŒéæ£åŒçä¹å«äœæèœ¬ç©éµæåå°ç©éµïŒïŒåæ$RR^T=R^TR=I$ïŒå¹¶ä»€$A'=AR$ãé£ä¹åŠææºè¢«$A'$æ··åïŒèäžæ¯è¢«$A$æ··åïŒïŒåæä»¬äŒè§æµå°$x'=A's$ãè$x'$乿ä»é«æ¯ååžïŒåæ ·ä»¥$0$åŽæ£çœã以$\mathrm E\left[x'\left(x'\right)^T\right]=\mathrm E\left[A'ss^T\left(A'\right)^T\right]=\mathrm E\left[ARss^T\left(AR\right)^T\right]=ARR^TA^T=AA^T$ãå¯ä»¥çå°ïŒäžè®ºæ··åç©éµæ¯$A$è¿æ¯$A'$ïŒæä»¬è§æµå°çåŒéœäŒæä»åäžäžªé«æ¯ååž$\mathcal N\left(0,AA^T\right)$ïŒäºæ¯ïŒæä»¬æ æ³èŸšå«è§æµå°çéæºå鿝æ¥èªæ··åç©éµ$A$è¿æ¯$A'$ãå æ€ïŒæ··åç©éµäžå¯ä»¥å
å«ä»»æçæèœ¬ç©éµïŒèæä»¬æ¯æ æ³ä»è§æµæ°æ®äžçå°è¿äžªæèœ¬ç©éµçç迹çïŒæä»¥æä»¬ä¹å°±æ æ³å®å
šè¿ååºæºäºã
äžé¢çè¿æ®µè®ºè¿°æ¯åºäºâå€å
æ 忣æååžæ¯æèœ¬å¯¹ç§°çâè¿äžæ§èŽšçã尜管ICAå¯¹äºæä»é«æ¯ååžçæ°æ®ååšè¿æ ·ç猺é·ïŒäœåªèŠæ°æ®äžæä»é«æ¯ååžïŒåšæ°æ®å
è¶³çæ
åµäžïŒæä»¬è¿æ¯å¯ä»¥å犻åº$n$䞪ç¬ç«çæºçã
## 2. æŠçå¯åºŠäžçº¿æ§åæ¢
åšå±åŒICAç®æ³çæšå¯Œä¹åïŒæä»¬å
æ¥ç®èŠä»ç»äžäžçº¿æ§åæ¢å¯¹æŠçå¯åºŠç圱åã
å讟æäžäžªä»$p_s(s)$äžæœåçéæºåé$s$ïŒäžºäºç®çºŠè¡šèŸŸïŒä»€$s\in\mathbb R$äžºå®æ°ãç°åšæç
§$x=As$å®ä¹éæºåé$x$ïŒ$x\in\mathbb R,A\in\mathbb R$ïŒïŒä»€$p_x$䞺$x$çæŠçå¯åºŠãé£ä¹ïŒä»ä¹æ¯$p_x$ïŒ
什$W=A^{-1}$ïŒèŠè®¡ç®äžäžªç¹å®$x$çæŠçïŒä¹å°±æ¯å°è¯è®¡ç®$s=Wx$ïŒä¹åå䜿çš$p_s$䌰计åç¹$s$å€çæŠçïŒä»èåŸåº$p_x(x)=p_s(Wx)$çç»è®ºââ*ç¶èè¿æ¯äžäžªé误çç»è®º*ãæ¯åŠä»€$s\sim\mathrm{Uniform}[0,1]$æä»ååååžïŒå$s$çæŠçå¯åºŠäžº$p_s(s)=1\{0\leq s\leq1\}$ïŒå什$A=2$ïŒå$x=2s$ãæŸç¶$x$æ¯äžäžªåšåºéŽ$[0,2]$äžçååååžïŒåå
¶æŠçå¯åºŠåœæ°äžº$p_x(x)=(0.5)1\{0\leq x\leq2\}$ãèæ€æ¶ç$W=A^{-1}=0.5$ïŒåŸææŸïŒ$p_s(Wx)$å¹¶äžçäº$p_x(x)$ãæ€æ¶ïŒçæ£æç«çæ¯åŒå$p_x(x)=p_s(Wx)\lvert W\rvert$ã
äžè¬çïŒè¥$s$æ¯äžäžªåéåŒïŒæ¥èªä»¥$p_s$䞺æŠçå¯åºŠçæååžïŒäžå¯¹äºå¯éç©éµ$A$æ$x=As$ïŒ$A$çéç©éµäžº$W=A^{-1}$ïŒé£ä¹$x$çæŠçå¯åºŠäžºïŒ
$$
p_x(x)=p_s(Wx)\cdot\lvert W\rvert
$$
**泚æïŒ**åŠææŸç»è§è¿$A$å°$[0,1]^n$æ å°å°äžç»äœç§¯$\lvert A\rvert$çéåäžïŒå¯ä»¥åè[å
æé»æ³åãéç©éµãäœç§¯](http://nbviewer.jupyter.org/github/zlotus/notes-linear-algebra/blob/master/chapter20.ipynb)ïŒïŒåå°±æåŠäžç§è®°å¿äžé¢è¿äžªå
³äº$p_x$çå
¬åŒçæ¹æ³ïŒè¿äžªæ¹æ³åæ ·å¯ä»¥äžè¬ååé¢$1$绎çäŸåã什$A\in\mathbb R^{n\times n}$ïŒä»€$W=A^{-1}$ïŒå什$C_1=[0,1]^n$䞺$n$绎è¶
ç«æ¹äœïŒå¹¶å®ä¹$C_2=\{As:\ s\in C_1\}\subseteq\mathbb R^n$ïŒå³$C_2$æ¯åå$C_1$åšæ å°$A$äœçšäžåŸå°çåïŒãæç
§äžé¢è¿äºæ¡ä»¶ïŒåšçº¿æ§ä»£æ°äžæäžäžªç°æçæ åå
¬åŒå¯ä»¥äœ¿çšïŒåæ¶è¿ä¹æ¯è¡ååŒçäžç§å®ä¹æ¹åŒïŒïŒ$C_2$çäœç§¯äžº$\lvert A\rvert$ãå讟$s$æä»åš$[0,1]^n$äžçååååžïŒåå
¶æŠçå¯åºŠåœæ°äžº$p_s(s)=1\{s\in C_1\}$ãåŸææŸ$x$䞺åš$C_2$äžçååååžïŒå
¶æŠçå¯åºŠäžº$\displaystyle p_x(x)=\frac{1\{x\in C_2\}}{\mathrm{Vol}(C_2)}$ïŒå 䞺å®å¿
é¡»åä»$C_2$å°$1$ç积åïŒãåå©çšâéç©éµçè¡ååŒæ¯åç©éµè¡ååŒçåæ°âè¿äžæ§èŽšïŒåæ$\displaystyle\frac{1}{\mathrm{Vol}(C_2)}=\frac{1}{\lvert A\rvert}=\left\lvert A^{-1}\right\rvert=\lvert W\rvert$ãæç»æä»¬ååŸå°$p_x(x)=1\{x\in C_2\}\lvert W\rvert=1\{Wx\in C_1\}\lvert W\rvert=p_s(Wx)\lvert W\rvert$ã
## 3. ICAç®æ³
ç°åšæä»¬å¯ä»¥åŒå§æšå¯ŒICAç®æ³äºãè¿äžªç®æ³æ¥èªBellåSejnowskiïŒæä»¬è¿éç»åºçå
³äºICAççè§£ïŒäŒå°å
¶ç忝äžäžªç𿥿倧å䌌ç¶äŒ°è®¡çç®æ³ïŒè¿äžè¯¥ç®æ³åæ¬çæŒç»äžåïŒè¯¥æŒç»æ¶åå°äžäžªç§°äžºinfomaxååçå€æçæŠå¿µïŒäžè¿åšç°ä»£å
³äºICAçæšå¯Œäžå·²ç»æ²¡æå¿
èŠæåäºïŒã
æä»¬å讟æ¯äžªæº$s_i$æ¥èªäžäžªç±æŠçå¯åºŠåœæ°$p_s$å®ä¹çååžïŒèäž$s$çèåååžäžºïŒ
$$
p(s)=\prod_{i=1}^np_s(s_i)
$$
泚æå°æä»¬äœ¿çšåæºç蟹çŒååžçä¹ç§¯æ¥å¯¹èåååžå»ºæš¡ïŒä¹å°±æ¯é»è®€å讟æ¯äžäžªæºæ¯ç¬ç«çã䜿çšäžäžèåŸå°çå
¬åŒèœå€åŸå°å
³äº$x=As=W^{-1}s$çæŠçå¯åºŠïŒ
$$
p(x)=\prod_{i=1}^np_s\left(w_i^Tx\right)\cdot\lvert W\rvert
$$
æ¥äžæ¥å°±å©äžºæ¯äžäžªç¬ç«çæº$p_s$䌰计æŠçå¯åºŠäºã
以ååšæŠçè®ºäžæä»¬åŠè¿ïŒå¯¹äºç»å®çå®éæºåé$z$ïŒå
¶çŽ¯ç§¯ååžåœæ°ïŒCDF: Cumulative Distribution FunctionïŒ$F$å¯ä»¥äœ¿çšæŠçå¯åºŠåœæ°ïŒPDF: Probability Density FunctionïŒæ¥è®¡ç®ïŒ$F(z_0)=P(z\leq z_0)=\int_{-\infty}^{z_0}p_z(z)\mathrm dz$ãåœç¶ïŒä¹å¯ä»¥éè¿å¯¹CDFæ±å¯ŒåŸå°$z$çæŠçå¯åºŠåœæ°$p_z(z)=F'(z)$ã
å æ€ïŒèŠæ±åºæ¯äžª$s_i$çPDFïŒåªéèŠæ±åºå®ä»¬å¯¹åºçCDFçå³å¯ïŒèCDFæ¯äžäžªåœæ°åŒä»$0$å°$1$çåè°å¢åœæ°ãæ ¹æ®äžäžèçæšå¯ŒïŒæä»¬ç¥éICA对æä»é«æ¯ååžçæ°æ®æ æïŒæä»¥äžèœéæ©é«æ¯ååžç环积ååžåœæ°äœäžºCDFãæä»¬ç°åšèŠéæ©äžäžªåççâé»è®€âåœæ°ïŒå
¶åœæ°åŒä¹æ¯ä»$0$çŒæ
¢çåè°éå¢è³$1$ââè¿å°±æ¯åé¢ïŒ[第äžè®²](chapter03.ipynb)ïŒä»ç»çé»èŸåœæ°ïŒå³Sååœæ°ïŒ$\displaystyle g(s)=\frac{1}{1+e^{-s}}$ãäºæ¯æ$p_s(s)=g'(s)$ã
ïŒåŠææä»¬æå
³äºæºæ°æ®éçå
éªç¥è¯ïŒå·²ç»ç¥éæºçPDFç圢åŒïŒé£ä¹å°±å¯ä»¥çšè¯¥PDF对åºçCDFæ¥ä»£æ¿äžé¢çé»èŸåœæ°ãåŠæäžç¥éPDFç圢åŒïŒé£ä¹é»èŸåœæ°å°±æ¯äžäžªåŸåççé»è®€åœæ°ïŒå 䞺åšå€çåŸå€é®é¢æ¶ïŒé»èŸåœæ°éœæå
·æè¯å¥œç衚ç°ãå¹¶äžïŒåšæ¬äŸäžæä»¬äœ¿çšçèŸå
¥è§æµæ°æ®é$x^{(i)}$èŠä¹å·²ç»è¢«é¢å€ç䞺ææäžº$0$çæ°æ®ïŒèŠä¹$x^{(i)}$åšèªç¶ç¶æäžå°±æ¯ææäžº$0$çæ°æ®éïŒåŠé³é¢ä¿¡å·ãèé¶æææ¯å¿
é¡»çïŒå 䞺æä»¬å讟äº$p_s(s)=g'(s)$ïŒè¡šæ$\mathrm E[s]=0$ââè¿é诎æäžäžïŒå¯¹é»èŸåœæ°æ±å¯ŒäŒåŸå°äžäžªå¯¹ç§°åœæ°ïŒè¿ä¹å°±æ¯PDFïŒæä»¥è¿äžªå¯¹ç§°çPDF对åºçéæºåéå¿
é¡»ä¿è¯ææäžº$0$ââåæ$\mathrm E[x]=\mathrm E[As]=0$ã顺䟿åæäžç¹ïŒé€äºé»èŸåœæ°ïŒPDFä¹ç»åžžéçš[ææ®ææ¯ååž](https://zh.wikipedia.org/wiki/%E6%8B%89%E6%99%AE%E6%8B%89%E6%96%AF%E5%88%86%E5%B8%83)/[Laplace distribution](https://en.wikipedia.org/wiki/Laplace_distribution) $\displaystyle p(s)=\frac{1}{2}e^{-\lvert s\rvert}$ãïŒ
æ¹éµ$W$æ¯æš¡åäžçåæ°ïŒå¯¹å·²ç»å®è®ç»é$\left\{x^{(i)};i=1,\cdots,m\right\}$ïŒæå¯¹æ°äŒŒç¶åœæ°äžºïŒ
$$
\mathscr l(W)=\sum_{i=1}^m\left(\sum_{j=1}^n\log g'\left(w_j^Tx^{(i)}\right)+\log\lvert W\rvert\right)
$$
æä»¬çç®æ æ¯çš$W$æå€§åäžé¢çåœæ°ïŒäœ¿çšæ§èŽš$\nabla_W\lvert W\rvert=\lvert W\rvert\left(W^{-1}\right)^T$ïŒåè§[第äºè®²](chapter02.ipynb)ïŒå¯¹äŒŒç¶åœæ°æ±å¯ŒïŒäŸ¿å¯ä»¥åŸå°äžäžªä»¥$\alpha$䞺åŠä¹ éççéæºæ¢¯åºŠäžåçæŽæ°è§åã对äºè®ç»é$x^{(i)}$çæŽæ°è§åæ¯ïŒ
$$
W:=W+\alpha\left(\begin{bmatrix}
1-2g\left(w_1^Tx^{(i)}\right)\\
1-2g\left(w_2^Tx^{(i)}\right)\\
\vdots\\
1-2g\left(w_n^Tx^{(i)}\right)
\end{bmatrix}\left(x^{(i)}\right)^T+\left(W^T\right)^{-1}\right)
$$
åšç®æ³è¿ä»£æ¶æä¹åïŒéè¿$s^{(i)}=Wx^{(i)}$è¿åæºå³å¯ã
**泚æïŒ**åšæé 䌌ç¶åœæ°æ¶ïŒæä»¬é»è®€å讟äºåæ ·æ¬$x^{(i)}$æ¯çžäºç¬ç«çïŒèŠæ³šæïŒè¿äžæ¯ææ ·æ¬$x^{(i)}$çååééŽæ¯çžäºç¬ç«çïŒãäºæ¯èœå€åŸå°è®ç»éç䌌ç¶åœæ°äžº$\prod_ip\left(x^{(i)};W\right)$ãè¿äžªå讟åš$x^{(i)}$衚瀺æŒè®²é³é¢æå
¶ä»åºäºæ¶éŽåºåçæ°æ®æ¶æ¯é误çïŒå 䞺è¿ç§æ ·æ¬æ°æ®ä¹éŽå¹¶äžæ¯çžäºç¬ç«çïŒäžè¿è¿äžç¹ä¹å¯ä»¥è¯æåšè®ç»æ ·æ¬å
è¶³çåæäžïŒååšçžå
³å
³ç³»çåæ ·æ¬å¹¶äžäŒåœ±åICAç衚ç°ãäžè¿ïŒå¯¹äºçžå
³çè®ç»æ ·æ¬ïŒåœæä»¬äœ¿çšéæºæ¢¯åºŠäžåæ¶ïŒåŠæéæºæ¹åè®ç»æ ·æ¬èœœå
¥ç®æ³ç次åºïŒææ¶å¯èœäŒå éç®æ³çæ¶æè¿çšãïŒå»ºè®®åå€å€ç»ç»è¿ä¹±åºçè®ç»éïŒç¶åçšäžåç顺åºäžºæš¡åå èœœæ ·æ¬ïŒææ¶æå©äºå¿«éæ¶æãïŒ
|
github_jupyter
|
<h1 align="center">SimpleITK Spatial Transformations</h1>
**Summary:**
1. Points are represented by vector-like data types: Tuple, Numpy array, List.
2. Matrices are represented by vector-like data types in row major order.
3. Default transformation initialization as the identity transform.
4. Angles specified in radians, distances specified in unknown but consistent units (nm,mm,m,km...).
5. All global transformations **except translation** are of the form:
$$T(\mathbf{x}) = A(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c}$$
Nomenclature (when printing your transformation):
* Matrix: the matrix $A$
* Center: the point $\mathbf{c}$
* Translation: the vector $\mathbf{t}$
* Offset: $\mathbf{t} + \mathbf{c} - A\mathbf{c}$
6. Bounded transformations, BSplineTransform and DisplacementFieldTransform, behave as the identity transform outside the defined bounds.
7. DisplacementFieldTransform:
* Initializing the DisplacementFieldTransform using an image requires that the image's pixel type be sitk.sitkVectorFloat64.
* Initializing the DisplacementFieldTransform using an image will "clear out" your image (your alias to the image will point to an empty, zero sized, image).
8. Composite transformations are applied in stack order (first added, last applied).
## Transformation Types
SimpleITK supports the following transformation types.
<table width="100%">
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1TranslationTransform.html">TranslationTransform</a></td><td>2D or 3D, translation</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1VersorTransform.html">VersorTransform</a></td><td>3D, rotation represented by a versor</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1VersorRigid3DTransform.html">VersorRigid3DTransform</a></td><td>3D, rigid transformation with rotation represented by a versor</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1Euler2DTransform.html">Euler2DTransform</a></td><td>2D, rigid transformation with rotation represented by a Euler angle</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1Euler3DTransform.html">Euler3DTransform</a></td><td>3D, rigid transformation with rotation represented by Euler angles</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1Similarity2DTransform.html">Similarity2DTransform</a></td><td>2D, composition of isotropic scaling and rigid transformation with rotation represented by a Euler angle</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1Similarity3DTransform.html">Similarity3DTransform</a></td><td>3D, composition of isotropic scaling and rigid transformation with rotation represented by a versor</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1ScaleTransform.html">ScaleTransform</a></td><td>2D or 3D, anisotropic scaling</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1ScaleVersor3DTransform.html">ScaleVersor3DTransform</a></td><td>3D, rigid transformation and anisotropic scale is <bf>added</bf> to the rotation matrix part (not composed as one would expect)</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1ScaleSkewVersor3DTransform.html">ScaleSkewVersor3DTransform</a></td><td>3D, rigid transformation with anisotropic scale and skew matrices <bf>added</bf> to the rotation matrix part (not composed as one would expect)</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1AffineTransform.html">AffineTransform</a></td><td>2D or 3D, affine transformation.</td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1BSplineTransform.html">BSplineTransform</a></td><td>2D or 3D, deformable transformation represented by a sparse regular grid of control points. </td></tr>
<tr><td><a href="http://www.itk.org/Doxygen/html/classitk_1_1DisplacementFieldTransform.html">DisplacementFieldTransform</a></td><td>2D or 3D, deformable transformation represented as a dense regular grid of vectors.</td></tr>
<tr><td><a href="http://www.itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1Transform.html">Transform</a></td>
<td>A generic transformation. Can represent any of the SimpleITK transformations, and a <b>composite transformation</b> (stack of transformations concatenated via composition, last added, first applied). </td></tr>
</table>
```
import SimpleITK as sitk
import utilities as util
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, fixed
OUTPUT_DIR = "output"
```
We will introduce the transformation types, starting with translation and illustrating how to move from a lower to higher parameter space (e.g. translation to rigid).
We start with the global transformations. All of them <b>except translation</b> are of the form:
$$T(\mathbf{x}) = A(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c}$$
In ITK speak (when printing your transformation):
<ul>
<li>Matrix: the matrix $A$</li>
<li>Center: the point $\mathbf{c}$</li>
<li>Translation: the vector $\mathbf{t}$</li>
<li>Offset: $\mathbf{t} + \mathbf{c} - A\mathbf{c}$</li>
</ul>
## TranslationTransform
Create a translation and then transform a point and use the inverse transformation to get the original back.
```
dimension = 2
offset = [2]*dimension # use a Python trick to create the offset list based on the dimension
translation = sitk.TranslationTransform(dimension, offset)
print(translation)
point = [10, 11] if dimension==2 else [10, 11, 12] # set point to match dimension
transformed_point = translation.TransformPoint(point)
translation_inverse = translation.GetInverse()
print('original point: ' + util.point2str(point) + '\n'
'transformed point: ' + util.point2str(transformed_point) + '\n'
'back to original: ' + util.point2str(translation_inverse.TransformPoint(transformed_point)))
```
## Euler2DTransform
Rigidly transform a 2D point using a Euler angle parameter specification.
Notice that the dimensionality of the Euler angle based rigid transformation is associated with the class, unlike the translation which is set at construction.
```
point = [10, 11]
rotation2D = sitk.Euler2DTransform()
rotation2D.SetTranslation((7.2, 8.4))
rotation2D.SetAngle(np.pi/2)
print('original point: ' + util.point2str(point) + '\n'
'transformed point: ' + util.point2str(rotation2D.TransformPoint(point)))
```
## VersorTransform (rotation in 3D)
Rotation using a versor, vector part of unit quaternion, parameterization. Quaternion defined by rotation of $\theta$ radians around axis $n$, is $q = [n*\sin(\frac{\theta}{2}), \cos(\frac{\theta}{2})]$.
```
# Use a versor:
rotation1 = sitk.VersorTransform([0,0,1,0])
# Use axis-angle:
rotation2 = sitk.VersorTransform((0,0,1), np.pi)
# Use a matrix:
rotation3 = sitk.VersorTransform()
rotation3.SetMatrix([-1, 0, 0, 0, -1, 0, 0, 0, 1]);
point = (10, 100, 1000)
p1 = rotation1.TransformPoint(point)
p2 = rotation2.TransformPoint(point)
p3 = rotation3.TransformPoint(point)
print('Points after transformation:\np1=' + str(p1) +
'\np2='+ str(p2) + '\np3='+ str(p3))
```
## Translation to Rigid [3D]
We only need to copy the translational component.
```
dimension = 3
t =(1,2,3)
translation = sitk.TranslationTransform(dimension, t)
# Copy the translational component.
rigid_euler = sitk.Euler3DTransform()
rigid_euler.SetTranslation(translation.GetOffset())
# Apply the transformations to the same set of random points and compare the results.
util.print_transformation_differences(translation, rigid_euler)
```
## Rotation to Rigid [3D]
Copy the matrix or versor and <b>center of rotation</b>.
```
rotation_center = (10, 10, 10)
rotation = sitk.VersorTransform([0,0,1,0], rotation_center)
rigid_versor = sitk.VersorRigid3DTransform()
rigid_versor.SetRotation(rotation.GetVersor())
#rigid_versor.SetCenter(rotation.GetCenter()) #intentional error, not copying center of rotation
# Apply the transformations to the same set of random points and compare the results.
util.print_transformation_differences(rotation, rigid_versor)
```
In the cell above, when we don't copy the center of rotation we have a constant error vector, $\mathbf{c}$ - A$\mathbf{c}$.
## Similarity [2D]
When the center of the similarity transformation is not at the origin the effect of the transformation is not what most of us expect. This is readily visible if we limit the transformation to scaling: $T(\mathbf{x}) = s\mathbf{x}-s\mathbf{c} + \mathbf{c}$. Changing the transformation's center results in scale + translation.
```
def display_center_effect(x, y, tx, point_list, xlim, ylim):
tx.SetCenter((x,y))
transformed_point_list = [ tx.TransformPoint(p) for p in point_list]
plt.scatter(list(np.array(transformed_point_list).T)[0],
list(np.array(transformed_point_list).T)[1],
marker='^',
color='red', label='transformed points')
plt.scatter(list(np.array(point_list).T)[0],
list(np.array(point_list).T)[1],
marker='o',
color='blue', label='original points')
plt.xlim(xlim)
plt.ylim(ylim)
plt.legend(loc=(0.25,1.01))
# 2D square centered on (0,0)
points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]
# Scale by 2
similarity = sitk.Similarity2DTransform();
similarity.SetScale(2)
interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(similarity), point_list = fixed(points),
xlim = fixed((-10,10)),ylim = fixed((-10,10)));
```
## Rigid to Similarity [3D]
Copy the translation, center, and matrix or versor.
```
rotation_center = (100, 100, 100)
theta_x = 0.0
theta_y = 0.0
theta_z = np.pi/2.0
translation = (1,2,3)
rigid_euler = sitk.Euler3DTransform(rotation_center, theta_x, theta_y, theta_z, translation)
similarity = sitk.Similarity3DTransform()
similarity.SetMatrix(rigid_euler.GetMatrix())
similarity.SetTranslation(rigid_euler.GetTranslation())
similarity.SetCenter(rigid_euler.GetCenter())
# Apply the transformations to the same set of random points and compare the results.
util.print_transformation_differences(rigid_euler, similarity)
```
## Similarity to Affine [3D]
Copy the translation, center and matrix.
```
rotation_center = (100, 100, 100)
axis = (0,0,1)
angle = np.pi/2.0
translation = (1,2,3)
scale_factor = 2.0
similarity = sitk.Similarity3DTransform(scale_factor, axis, angle, translation, rotation_center)
affine = sitk.AffineTransform(3)
affine.SetMatrix(similarity.GetMatrix())
affine.SetTranslation(similarity.GetTranslation())
affine.SetCenter(similarity.GetCenter())
# Apply the transformations to the same set of random points and compare the results.
util.print_transformation_differences(similarity, affine)
```
## Scale Transform
Just as the case was for the similarity transformation above, when the transformations center is not at the origin, instead of a pure anisotropic scaling we also have translation ($T(\mathbf{x}) = \mathbf{s}^T\mathbf{x}-\mathbf{s}^T\mathbf{c} + \mathbf{c}$).
```
# 2D square centered on (0,0).
points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]
# Scale by half in x and 2 in y.
scale = sitk.ScaleTransform(2, (0.5,2));
# Interactively change the location of the center.
interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(scale), point_list = fixed(points),
xlim = fixed((-10,10)),ylim = fixed((-10,10)));
```
## Unintentional Misnomers (originally from ITK)
Two transformation types whose names may mislead you are ScaleVersor and ScaleSkewVersor. Basing your choices on expectations without reading the documentation will surprise you.
ScaleVersor - based on name expected a composition of transformations, in practice it is:
$$T(x) = (R+S)(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c},\;\; \textrm{where } S= \left[\begin{array}{ccc} s_0-1 & 0 & 0 \\ 0 & s_1-1 & 0 \\ 0 & 0 & s_2-1 \end{array}\right]$$
ScaleSkewVersor - based on name expected a composition of transformations, in practice it is:
$$T(x) = (R+S+K)(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c},\;\; \textrm{where } S = \left[\begin{array}{ccc} s_0-1 & 0 & 0 \\ 0 & s_1-1 & 0 \\ 0 & 0 & s_2-1 \end{array}\right]\;\; \textrm{and } K = \left[\begin{array}{ccc} 0 & k_0 & k_1 \\ k_2 & 0 & k_3 \\ k_4 & k_5 & 0 \end{array}\right]$$
Note that ScaleSkewVersor is is an over-parametrized version of the affine transform, 15 parameters (scale, skew, versor, translation) vs. 12 parameters (matrix, translation).
## Bounded Transformations
SimpleITK supports two types of bounded non-rigid transformations, BSplineTransform (sparse representation) and DisplacementFieldTransform (dense representation).
Transforming a point that is outside the bounds will return the original point - identity transform.
## BSpline
Using a sparse set of control points to control a free form deformation. Using the cell below it is clear that the BSplineTransform allows for folding and tearing.
```
# Create the transformation (when working with images it is easier to use the BSplineTransformInitializer function
# or its object oriented counterpart BSplineTransformInitializerFilter).
dimension = 2
spline_order = 3
direction_matrix_row_major = [1.0,0.0,0.0,1.0] # identity, mesh is axis aligned
origin = [-1.0,-1.0]
domain_physical_dimensions = [2,2]
bspline = sitk.BSplineTransform(dimension, spline_order)
bspline.SetTransformDomainOrigin(origin)
bspline.SetTransformDomainDirection(direction_matrix_row_major)
bspline.SetTransformDomainPhysicalDimensions(domain_physical_dimensions)
bspline.SetTransformDomainMeshSize((4,3))
# Random displacement of the control points.
originalControlPointDisplacements = np.random.random(len(bspline.GetParameters()))
bspline.SetParameters(originalControlPointDisplacements)
# Apply the BSpline transformation to a grid of points
# starting the point set exactly at the origin of the BSpline mesh is problematic as
# these points are considered outside the transformation's domain,
# remove epsilon below and see what happens.
numSamplesX = 10
numSamplesY = 20
coordsX = np.linspace(origin[0]+np.finfo(float).eps, origin[0] + domain_physical_dimensions[0], numSamplesX)
coordsY = np.linspace(origin[1]+np.finfo(float).eps, origin[1] + domain_physical_dimensions[1], numSamplesY)
XX, YY = np.meshgrid(coordsX, coordsY)
interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),
tx = fixed(bspline), original_control_point_displacements = fixed(originalControlPointDisplacements));
```
## DisplacementField
A dense set of vectors representing the displacement inside the given domain. The most generic representation of a transformation.
```
# Create the displacement field.
# When working with images the safer thing to do is use the image based constructor,
# sitk.DisplacementFieldTransform(my_image), all the fixed parameters will be set correctly and the displacement
# field is initialized using the vectors stored in the image. SimpleITK requires that the image's pixel type be
# sitk.sitkVectorFloat64.
displacement = sitk.DisplacementFieldTransform(2)
field_size = [10,20]
field_origin = [-1.0,-1.0]
field_spacing = [2.0/9.0,2.0/19.0]
field_direction = [1,0,0,1] # direction cosine matrix (row major order)
# Concatenate all the information into a single list
displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)
# Set the interpolator, either sitkLinear which is default or nearest neighbor
displacement.SetInterpolator(sitk.sitkNearestNeighbor)
originalDisplacements = np.random.random(len(displacement.GetParameters()))
displacement.SetParameters(originalDisplacements)
coordsX = np.linspace(field_origin[0], field_origin[0]+(field_size[0]-1)*field_spacing[0], field_size[0])
coordsY = np.linspace(field_origin[1], field_origin[1]+(field_size[1]-1)*field_spacing[1], field_size[1])
XX, YY = np.meshgrid(coordsX, coordsY)
interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),
tx = fixed(displacement), original_control_point_displacements = fixed(originalDisplacements));
```
## Composite transform (Transform)
The generic SimpleITK transform class. This class can represent both a single transformation (global, local), or a composite transformation (multiple transformations applied one after the other). This is the output typed returned by the SimpleITK registration framework.
The choice of whether to use a composite transformation or compose transformations on your own has subtle differences in the registration framework.
Composite transforms enable a combination of a global transformation with multiple local/bounded transformations. This is useful if we want to apply deformations only in regions that deform while other regions are only effected by the global transformation.
The following code illustrates this, where the whole region is translated and subregions have different deformations.
```
# Global transformation.
translation = sitk.TranslationTransform(2,(1.0,0.0))
# Displacement in region 1.
displacement1 = sitk.DisplacementFieldTransform(2)
field_size = [10,20]
field_origin = [-1.0,-1.0]
field_spacing = [2.0/9.0,2.0/19.0]
field_direction = [1,0,0,1] # direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement1.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)
displacement1.SetParameters(np.ones(len(displacement1.GetParameters())))
# Displacement in region 2.
displacement2 = sitk.DisplacementFieldTransform(2)
field_size = [10,20]
field_origin = [1.0,-3]
field_spacing = [2.0/9.0,2.0/19.0]
field_direction = [1,0,0,1] #direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement2.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)
displacement2.SetParameters(-1.0*np.ones(len(displacement2.GetParameters())))
# Composite transform which applies the global and local transformations.
composite = sitk.Transform(translation)
composite.AddTransform(displacement1)
composite.AddTransform(displacement2)
# Apply the composite transformation to points in ([-1,-3],[3,1]) and
# display the deformation using a quiver plot.
# Generate points.
numSamplesX = 10
numSamplesY = 10
coordsX = np.linspace(-1.0, 3.0, numSamplesX)
coordsY = np.linspace(-3.0, 1.0, numSamplesY)
XX, YY = np.meshgrid(coordsX, coordsY)
# Transform points and compute deformation vectors.
pointsX = np.zeros(XX.shape)
pointsY = np.zeros(XX.shape)
for index, value in np.ndenumerate(XX):
px,py = composite.TransformPoint((value, YY[index]))
pointsX[index]=px - value
pointsY[index]=py - YY[index]
plt.quiver(XX, YY, pointsX, pointsY);
```
## Writing and Reading
The SimpleITK.ReadTransform() returns a SimpleITK.Transform . The content of the file can be any of the SimpleITK transformations or a composite (set of transformations).
```
import os
# Create a 2D rigid transformation, write it to disk and read it back.
basic_transform = sitk.Euler2DTransform()
basic_transform.SetTranslation((1.0,2.0))
basic_transform.SetAngle(np.pi/2)
full_file_name = os.path.join(OUTPUT_DIR, 'euler2D.tfm')
sitk.WriteTransform(basic_transform, full_file_name)
# The ReadTransform function returns an sitk.Transform no matter the type of the transform
# found in the file (global, bounded, composite).
read_result = sitk.ReadTransform(full_file_name)
print('Different types: '+ str(type(read_result) != type(basic_transform)))
util.print_transformation_differences(basic_transform, read_result)
# Create a composite transform then write and read.
displacement = sitk.DisplacementFieldTransform(2)
field_size = [10,20]
field_origin = [-10.0,-100.0]
field_spacing = [20.0/(field_size[0]-1),200.0/(field_size[1]-1)]
field_direction = [1,0,0,1] #direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)
displacement.SetParameters(np.random.random(len(displacement.GetParameters())))
composite_transform = sitk.Transform(basic_transform)
composite_transform.AddTransform(displacement)
full_file_name = os.path.join(OUTPUT_DIR, 'composite.tfm')
sitk.WriteTransform(composite_transform, full_file_name)
read_result = sitk.ReadTransform(full_file_name)
util.print_transformation_differences(composite_transform, read_result)
```
<a href="02_images_and_resampling.ipynb"><h2 align=right>Next »</h2></a>
|
github_jupyter
|
```
##### derived from https://github.com/bozhu/AES-Python
import copy
Sbox = (
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,
)
InvSbox = (
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
)
Rcon = (
0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,
0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,
0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,
)
def text2matrix(text): ##
matrix = []
for i in range(16):
byte = (text >> (8 * (15 - i))) & 0xFF
if i % 4 == 0:
matrix.append([byte])
else:
matrix[i // 4].append(byte)
#print("{:32x}".format(text))
#print([[hex(a) for a in m] for m in matrix])
"""
A B C D E F G H I J K L M N O P
A B C D
E F G H
I J K L
M N O P
"""
return matrix
def matrix2text(matrix): ##
text = 0
for i in range(4):
for j in range(4):
text |= (matrix[i][j] << (120 - 8 * (4 * i + j)))
return text
class AES:
def __init__(self, master_key, iv=None, aes256=True):
self.num_rounds = 14 if aes256 else 10
self.change_key(master_key)
self.iv = iv
def matrix_xor_elementwise(self, s, k): ##
for i in range(4):
for j in range(4):
s[i][j] ^= k[i][j]
# shifts / movements only
def matrix_shift_rows(self, s): ##
s[0][1], s[1][1], s[2][1], s[3][1] = s[1][1], s[2][1], s[3][1], s[0][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[3][3], s[0][3], s[1][3], s[2][3]
def matrix_unshift_rows(self, s): ##
s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1]
s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2]
s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3]
def matrix_sbox_lookup(self, s): ##
for i in range(4):
for j in range(4):
s[i][j] = Sbox[s[i][j]]
def matrix_invsbox_lookup(self, s): ##
for i in range(4):
for j in range(4):
s[i][j] = InvSbox[s[i][j]]
def mix_columns(self, s):
xtime = lambda a: (((a << 1) ^ 0x1B) & 0xFF) if (a & 0x80) else (a << 1)
for i in range(4):
t = s[i][0] ^ s[i][1] ^ s[i][2] ^ s[i][3]
u = s[i][0]
s[i][0] ^= t ^ xtime(s[i][0] ^ s[i][1])
s[i][1] ^= t ^ xtime(s[i][1] ^ s[i][2])
s[i][2] ^= t ^ xtime(s[i][2] ^ s[i][3])
s[i][3] ^= t ^ xtime(s[i][3] ^ u)
def unmix_columns(self, s):
xtime = lambda a: (((a << 1) ^ 0x1B) & 0xFF) if (a & 0x80) else (a << 1)
for i in range(4):
u = xtime(xtime(s[i][0] ^ s[i][2]))
v = xtime(xtime(s[i][1] ^ s[i][3]))
s[i][0] ^= u
s[i][1] ^= v
s[i][2] ^= u
s[i][3] ^= v
self.mix_columns(s)
def encrypt(self, plaintext):
if self.iv is not None:
self.plain_state = text2matrix(plaintext ^ self.iv)
else:
self.plain_state = text2matrix(plaintext)
self.matrix_xor_elementwise(self.plain_state, self.round_keys[0])
#print([hex(x) for x in self.plain_state[0]])
for i in range(1, self.num_rounds+1):
## CYCLE 1
self.matrix_sbox_lookup(self.plain_state)
self.matrix_shift_rows(self.plain_state)
#print([hex(x) for x in self.plain_state[0]])
## CYCLE 2
if i != self.num_rounds: self.mix_columns(self.plain_state)
self.matrix_xor_elementwise(self.plain_state, self.round_keys[i])
#print([hex(x) for x in self.plain_state[0]])
if self.iv is not None:
self.iv = matrix2text(self.plain_state)
return matrix2text(self.plain_state)
def decrypt(self, ciphertext):
self.cipher_state = text2matrix(ciphertext)
#print(hex(self.cipher_state[3][3]))
for i in range(self.num_rounds, 0, -1):
## CYCLE 1
self.matrix_xor_elementwise(self.cipher_state, self.round_keys[i])
if i != self.num_rounds: self.unmix_columns(self.cipher_state)
#print(hex(self.cipher_state[3][3]))
## CYCLE 2
self.matrix_unshift_rows(self.cipher_state)
self.matrix_invsbox_lookup(self.cipher_state)
#print(hex(self.cipher_state[0][3]))
self.matrix_xor_elementwise(self.cipher_state, self.round_keys[0])
out = matrix2text(self.cipher_state)
if self.iv is not None:
out = out ^ self.iv
self.iv = ciphertext
return out
def change_key(self, master_key):
if (self.num_rounds == 14):
self.round_keys = [text2matrix(master_key >> 128), text2matrix(master_key & ((1 << 128) - 1))]
else:
self.round_keys = [text2matrix(master_key)]
last_key2 = self.round_keys[0]
last_key = self.round_keys[1] if (self.num_rounds == 14) else self.round_keys[0]
#print([hex(x) for x in last_key[0]])
for i in range(1, self.num_rounds - len(self.round_keys) + 2):
key = []
aes256_alt = (i % 2 == 0) and (self.num_rounds == 14)
# row 0
s0 = Sbox[last_key[3][0]]
s1 = Sbox[last_key[3][1]]
s2 = Sbox[last_key[3][2]]
s3 = Sbox[last_key[3][3]]
last2 = last_key2 if (self.num_rounds == 14) else last_key
round_const = Rcon[i // 2 + 1] if (self.num_rounds == 14) else Rcon[i]
r0b0 = last2[0][0] ^ (s1 if not aes256_alt else s0) ^ (round_const if not aes256_alt else 0)
r0b1 = last2[0][1] ^ (s2 if not aes256_alt else s1)
r0b2 = last2[0][2] ^ (s3 if not aes256_alt else s2)
r0b3 = last2[0][3] ^ (s0 if not aes256_alt else s3)
key.append([r0b0, r0b1, r0b2, r0b3])
# row 1
r1b0 = last2[1][0] ^ r0b0
r1b1 = last2[1][1] ^ r0b1
r1b2 = last2[1][2] ^ r0b2
r1b3 = last2[1][3] ^ r0b3
key.append([r1b0, r1b1, r1b2, r1b3])
# row 2
r2b0 = last2[2][0] ^ r1b0
r2b1 = last2[2][1] ^ r1b1
r2b2 = last2[2][2] ^ r1b2
r2b3 = last2[2][3] ^ r1b3
key.append([r2b0, r2b1, r2b2, r2b3])
# row 3
r3b0 = last2[3][0] ^ r2b0
r3b1 = last2[3][1] ^ r2b1
r3b2 = last2[3][2] ^ r2b2
r3b3 = last2[3][3] ^ r2b3
key.append([r3b0, r3b1, r3b2, r3b3])
self.round_keys.append(key)
last_key2 = last_key
last_key = key
#print([hex(x) for x in key[0]])
# 2d1541c695f88a16f8bfb5dbe3a95022
def packtext(s):
s = s
o = 0
while len(s) > 0:
o = (o << 8) | ord(s[0])
s = s[1:]
return o
def unpacktext(s):
o = ""
while s > 0:
o = chr(s & 0xFF) + o
s = s >> 8
return o
key = "abcd1234ABCD!@#$zyxwZYXW*1*2*3*4"
assert len(key) == 32
iv = "54123892jsdkjsdj"
assert len(iv) == 16
string1 = "helloworld123456"
string2 = "test string 1234"
print(key)
#print(ek(key.encode("ascii")))
a = AES(packtext(key), aes256=True)
enc = a.encrypt(packtext(string1))
dec = unpacktext(a.decrypt(enc))
print(packtext(key) >> 128, packtext(key) & ((1 << 128) - 1))
print(packtext(string1))
print(enc)
print(a.decrypt(enc))
print(unpacktext(a.decrypt(enc)))
print(hex(enc), dec)
assert enc == 0x2d1541c695f88a16f8bfb5dbe3a95022
##
#print(ek(key.encode("ascii")))
a = AES(packtext(key[:16]), aes256=False)
enc = a.encrypt(packtext(string1))
dec = unpacktext(a.decrypt(enc))
print(hex(enc), dec)
assert enc == 0x1708271a0a18bb2e15bd658805297b8d
(packtext(string1) >> 128),(packtext(string1) & ((1 << 128) - 1))
a = AES(packtext(key))
e1 = hex(a.encrypt(packtext(string1)))
#assert e1 == "0x1708271a0a18bb2e15bd658805297b8d"
e2 = hex(a.encrypt(packtext(string2)))
#assert e2 == "0x482ac205196a804865262a0044915738"
print(e1)
print(e2)
print(packtext(key), packtext(string1), int(e1, 0))
a = AES(packtext(key))
print(unpacktext(a.decrypt(int(e1, 0))))
#assert(unpacktext(a.decrypt(int(e1, 0))) == string1)
print(unpacktext(a.decrypt(int(e2, 0))))
#assert(unpacktext(a.decrypt(int(e2, 0))) == string2)
hex(30614575354952859734368363414031006605)
a = AES(packtext(key), packtext(iv))
e1 = hex(a.encrypt(packtext(string1)))
#assert e1 == "0x6cbaa5d41d87fc1cb2cde5f49c592554"
e2 = hex(a.encrypt(packtext(string2)))
#assert e2 == "0xb2b95376972f97140a84deda840144a2"
print(e1)
print(e2)
a = AES(packtext(key), packtext(iv))
dec1 = (unpacktext(a.decrypt(int(e1, 0))))
#assert(dec1 == string1)
print(dec1)
dec2 = (unpacktext(a.decrypt(int(e2, 0))))
#assert(dec2 == string2)
print(dec2)
from Crypto.Cipher import AES as AE
print(key, len(key.encode()))
cipher = AE.new(key.encode(), AE.MODE_ECB)
ciphertext = cipher.encrypt(string1 + string2)
print(ciphertext.hex()[:32])
print(ciphertext.hex()[32:])
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
cipher = AE.new(key.encode(), AE.MODE_CBC, iv)
ciphertext = cipher.encrypt(string1+string2)
print(ciphertext.hex()[:32])
print(ciphertext.hex()[32:])
cipher = AE.new(key.encode(), AE.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
import random
# Generate 256-bit encrypt test-cases
for _1 in range(10):
key = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(32)]) # AES256 key
print("setTopKey(BigInt(\"{}\"))".format(packtext(key) >> 128))
print("setKey(BigInt(\"{}\"))".format(packtext(key) & ((1 << 128) - 1)))
for _2 in range(10):
plaintext = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(16)])
iv = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(16)])
c1 = AES(packtext(key))
ct1 = c1.encrypt(packtext(plaintext))
print("runSingleEncryptTest(BigInt(\"{}\"), BigInt(\"{}\"))"
.format(packtext(plaintext), ct1))
c2 = AES(packtext(key), iv=packtext(iv))
ct2 = c2.encrypt(packtext(plaintext))
print("runSingleEncryptTest(BigInt(\"{}\"), BigInt(\"{}\"), iv=BigInt(\"{}\"))"
.format(packtext(plaintext), ct2, packtext(iv)))
import random
# Generate 256-bit decrypt test-cases
for _1 in range(10):
key = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(32)]) # AES256 key
print("setTopKey(BigInt(\"{}\"))".format(packtext(key) >> 128))
print("setKey(BigInt(\"{}\"))".format(packtext(key) & ((1 << 128) - 1)))
for _2 in range(10):
plaintext = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(16)])
iv = "".join([chr(random.randint(0x20, 0x7E)) for _ in range(16)])
c1 = AES(packtext(key))
ct1 = c1.encrypt(packtext(plaintext))
print("runSingleDecryptTest(BigInt(\"{}\"), BigInt(\"{}\"))"
.format(ct1, packtext(plaintext)))
c2 = AES(packtext(key), iv=packtext(iv))
ct2 = c2.encrypt(packtext(plaintext))
print("runSingleDecryptTest(BigInt(\"{}\"), BigInt(\"{}\"), iv=BigInt(\"{}\"))"
.format(ct2, packtext(plaintext), packtext(iv)))
```
|
github_jupyter
|
# CME 193 - Lecture 8
Here's what you've seen over the past 7 lectures:
* Python Language Basics
* NumPy - Arrays/Linear Algebra
* SciPy - Sparse Linear Algebra/Optimization
* DataFrames - Reading & Maniputlating tabular data
* Scikit learn - Machine Learning Models & use with data
* Ortools - More Optimization
You've now seen some tools for scientific computing in Python. How you add to them and what you do with them is up to you!

(Maybe you've also had a bit of [this](https://xkcd.com/1987/) experience)
## Today
1. We'll revisit object oriented programming in Python
2. We'll look at PyTorch (deep learning package)
# Object Oriented Programming - II
Recall some of the basic terminology of [object oriented programming](https://en.wikipedia.org/wiki/Object-oriented_programming)
* **Classes** are templates for objects (e.g., "the Integers" is a class)
* **Objects** are specific instances of a class (e.g., "2 is an integer")
* **Methods** are fuctions associated to objects of a class
* the "the square of 2" may be expressed as `2.square()` (returns 4)
* the "addition of 1 to 2" may be expressed as `2.add(1)` (returns 3)
* the "name of 2" may be expressed as `2.name()` (returns "two")
Today we'll use an extended example of univariate functions
$$f:\mathbb{R} \to \mathbb{R}$$
to see how you might use object oriented programming for something like automatic differentiation, classical machine learning, or deep learning. Yes - you can maybe use a library like [Tensorflow](https://www.tensorflow.org/), [Keras](https://keras.io/), or [PyTorch](https://pytorch.org/), but it's more fun to understand how to do it yourself (and then maybe use someone else's fancy/high quality implementation).
First thing to remember is that everything in Python is an object, even functions.
```
def f(x):
return x
isinstance(f, object)
isinstance(isinstance, object)
isinstance(object, object)
```
Once you create an object, it lives somewhere on your computer:
```
id(f) # memory address on your computer
x = 1000
id(x)
```
You can check if two variables are referring to the same address using `is`
```
z = x
print("equality: {}".format(z == x))
print("same address: {}".format(z is x))
y = 1000
print("equality: {}".format(y == x))
print("same address: {}".format(y is x))
```
## Univariate functions
Let's consider functions that send a real number to a real number
$$f:\mathbb{R} \to \mathbb{R}$$
Perhaps these functions have some parameters $\theta$, such as
$$f(x; \theta) = \theta x$$
(a linear function with slope $\theta$), or
$$g(x;\theta) = \theta_1 x + \theta_0$$
(linear function with slope $\theta_1$ and intercept $\theta_0$), or
$$h(x;\theta) = \theta_0 \exp(-\theta_1 x^2)$$
and so on. The point is that we can parameterize functions that have a similar form, and that there may be different numbers of parameters depending on the function.
What might we want to be able to do with a function?
1. Evaluate it (`y = f(x)`)
2. Print it as a string `f(x) = "3x + 2"`
3. Calculate a gradient
4. add/multiply/exponentiate...
We could think of doint the above with methods like `f.evaluate(x)`, and `f.name()`, but we'll use the special methods `__call__` and `__str__` to be able to do things like call `f(x)` and `format(f)` just as we might do so with built-in objects. You can see the different special methods available to overload [here](https://docs.python.org/3/reference/datamodel.html)
We're going to create an abstract function class that all the other classes we create will inherit from. If you haven't seen object oriented programming before, think of this as a way to promise all our functions will be able to do certain things (or throw an error). We'll provide default implementations for some methods (these will get filled in later), and have some methods that will need to be implemented differently for each sub-class.
For more on classes and inheritance, see [here](https://thepythonguru.com/python-inheritance-and-polymorphism/). The idea of giving objects methods with the same name is one form of [polymorphism](https://stackoverflow.com/questions/1031273/what-is-polymorphism-what-is-it-for-and-how-is-it-used) - we'll see how this is actually quite useful and allows you to do things that would be difficult without object-oriented programming.
```
class AbstractUnivariate:
def __init__(self):
raise NotImplementedError
def __call__(self, x):
raise NotImplementedError
def fmtstr(self, x="x"):
raise NotImplementedError
def __str__(self):
return self.fmtstr("x")
def gradient(self):
raise NotImplementedError
# the rest of these methods will be implemented when we write the appropriate functions
def __add__(self, other):
return SumFunction(self, other)
def __mul__(self, other):
return ProdFunction(self, other)
def __rmul__(self, other):
return ScaleFunction(other, self)
def __pow__(self, n):
return ComposeFunction(PowerFunction(1, n), self)
```
Now, to create a class that inherits from our abstract class, we just use the following syntax:
```
class ConstantFunction(AbstractUnivariate): # AbstractUnivariate indicates class to use for inheritance
def __init__(self, c):
self.c = c
f = ConstantFunction(3)
```
We can see there's a class hierarchy now:
```
print(isinstance(f, ConstantFunction))
print(isinstance(f, AbstractUnivariate))
print(isinstance(f, object))
```
If we haven't implemented the methods we promised we would, we'll get errors
```
f(1)
```
Let's go ahead an implement the promised methods
```
class ConstantFunction(AbstractUnivariate):
def __init__(self, c):
self.c = c
def __call__(self, x):
return self.c
def fmtstr(self, x="x"):
return "{}".format(self.c)
# __str__(self) uses default from abstract class
def gradient(self):
return ConstantFunction(0)
# we inherit the other functions from the AbstractUnivariate class
f = ConstantFunction(3)
print(f)
print(f(1))
print(f(2))
print(f.gradient())
```
What is it this object does? It represents the constant function
$$f: x \mapsto c$$
Let's do something a little less trivial. Now we'll implement
$$f: x \mapsto ax + b$$
```
class AffineFunction(AbstractUnivariate):
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self, x):
return self.a * x + self.b
def fmtstr(self, x="x"):
s = "{}".format(x)
if self.a != 1:
s = "{}*".format(self.a) + s
if self.b != 0:
s = s + " + {}".format(self.b)
return s
def gradient(self):
return ConstantFunction(self.a)
f = AffineFunction(1, 1)
print(f)
print(f(2))
print(f.gradient())
print(isinstance(f, AbstractUnivariate))
```
## Discussion
Let's take ourselves back to calculus. At some point you learned that you can take any function
$$y = ax + b$$
and if you know the values of $a$ and $b$, and someone gives you a value for $x$, you can calculate the value of $y$. At some later point you learned the rule
$$ \frac{d}{dx}(ax + b) = a$$
regardless of what values $a$ and $b$ take. The class `AffineFunction` defines the rules that you learned in math class.
When you write something like
```python
f = AffineFunction(1,1)
```
You are just choosing the values of $a$ and $b$. Now just like you would be able to use the rules of arithmetic and calculus to compute $y$ given $x$ or the gradient of the function, your computer can as well.
**Summary**
* Class definition gives mathematical rules for an equation of a certain form
* Instance of class is choice of constants for a function of that type
# Exercise 1
Implement classes for the following univariate function templates:
1. `QuadraticFunction` -- $f: x \mapsto a x^2 + bx + c$
2. `ExponentialFunction` -- $f: x \mapsto a e^{bx}$
3. `PowerFunction` -- $f: x \mapsto ax^n$
Make sure to return derivatives that are also `AbstractUnivariate` sub-classes. Which class can I use to represent $f: x \mapsto x^{-1}$?
```
# your code here
from math import * # for math.exp
```
# More functions
We can do more than just encode standard functions - we can scale, add, multiply, and compose functions.
Scaling a function:
$$ g(x)= a *f(x)$$
```
class ScaleFunction(AbstractUnivariate):
def __init__(self, a, f):
self.a = a
if isinstance(f, AbstractUnivariate):
self.f = f
else:
raise AssertionError("must input an AbstractUnivariate function")
def __call__(self, x):
return self.a * self.f(x)
def fmtstr(self, x="x"):
if self.a == 1:
return self.f.fmtstr(x)
else:
return "{}*({})".format(self.a, self.f.fmtstr(x))
def gradient(self):
return ScaleFunction(self.a, self.f.gradient())
f = ExponentialFunction(1, 2)
print(f)
g = ScaleFunction(2, f)
print(g)
print(g.gradient())
print(g(1))
```
Sum and product of two functions
$$ h(x) = f(x) + g(x)$$
$$ h(x) = f(x) * g(x)$$
```
class SumFunction(AbstractUnivariate):
def __init__(self, f, g):
if isinstance(f, AbstractUnivariate) and isinstance(g, AbstractUnivariate):
self.f = f
self.g = g
else:
raise AssertionError("must input AbstractUnivariate functions")
def __call__(self, x):
return self.f(x) + self.g(x)
def fmtstr(self, x="x"):
return "{} + {}".format(self.f.fmtstr(x), self.g.fmtstr(x))
def gradient(self):
return SumFunction(self.f.gradient(), self.g.gradient())
f = ExponentialFunction(1, 2)
g = AffineFunction(2, 1)
h = SumFunction(f, g)
print(h.fmtstr(x="y"))
print(h(-1))
print(h.gradient())
class ProdFunction(AbstractUnivariate):
def __init__(self, f, g):
if isinstance(f, AbstractUnivariate) and isinstance(g, AbstractUnivariate):
self.f = f
self.g = g
else:
raise AssertionError("must input AbstractUnivariate functions")
def __call__(self, x):
return self.f(x) * self.g(x)
def fmtstr(self, x="x"):
return "({}) * ({})".format(self.f.fmtstr(x=x), self.g.fmtstr(x=x))
# product rule (f*g)' = f'*g + f*g'
def gradient(self):
return SumFunction(ProdFunction(self.f.gradient(),self.g), ProdFunction(self.f, self.g.gradient()))
f = ExponentialFunction(1, 2)
g = AffineFunction(2, 1)
h = ProdFunction(f, g)
print(h)
print(h(-1))
print(h.gradient())
```
Compose Functions:
$$h(x) = (g \circ f)(x) = g(f(x))$$
```
class ComposeFunction(AbstractUnivariate):
def __init__(self, g, f):
if isinstance(f, AbstractUnivariate) and isinstance(g, AbstractUnivariate):
self.f = f
self.g = g
else:
raise AssertionError("must input AbstractUnivariate functions")
def __call__(self, x):
return self.g(self.f(x))
def fmtstr(self, x="x"):
return self.g.fmtstr(x="({})".format(self.f.fmtstr(x)))
# chain rule : g(f(x))' = g'(f(x))*f'(x)
def gradient(self):
return ProdFunction(ComposeFunction(self.g.gradient(), self.f), self.f.gradient())
f = PowerFunction(1,2)
print(f.fmtstr("x"))
g = ComposeFunction(f,f)
print(g)
h = ComposeFunction(g, f)
print(h)
print(h(2)) # 2^(2*2*2) = 2^8 = 256
f = PowerFunction(1,2)
g = ExponentialFunction(0.5, -1)
h = ComposeFunction(g, f)
print(h)
print(h.gradient())
```
## Operator overloading makes everything better
Recall how when we wrote the AbstractUnivariate class, we included some default methods
```python
class AbstractUnivariate:
# ...
# the rest of these methods will be implemented when we write the appropriate functions
def __add__(self, other):
return SumFunction(self, other)
def __mul__(self, other):
return ProdFunction(self, other)
def __rmul__(self, other):
return ScaleFunction(other, self)
def __pow__(self, n):
return ComposeFunction(PowerFunction(1, n), self)
```
If you think it is clunky to keep writing `SumFunction` or `ProdFunction` everywhere, you're not alone. Again, you can use the special methods above to [overload operators](https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types)
```
f = ExponentialFunction(1, 2)
g = AffineFunction(2, 1)
print("f = {}".format(f))
print("g = {}".format(g))
print("f + g = {}".format(f+g))
print("f * g = {}".format(f*g))
print("f^2 = {}".format(f**2))
print("2*g = {}".format(2*g))
f = ExponentialFunction(1, 2)
g = AffineFunction(2, 1)
h = f*g
print(h.gradient())
```
## What's going on?
Because we thought ahead to define addition, multiplication, scaling, and powers in our `AbstractUnivariate` class, every sub-class will implement those methods by default **without needing to write any extra code**.
If we hadn't done this, we would have had to copy and paste the same thing into every class definition to get the same behavior, **but we don't need to**. In fact, if we write a new basic univariate function class, e.g. `LogFunction`, we get addition, multiplication, etc., for free!
## Symbolic Functions
Just for fun, let's create an `AbstractUnivariate` sub-class, which just holds a placeholder symbolic function
```
class SymbolicFunction(AbstractUnivariate):
def __init__(self, name):
if isinstance(name, str):
self.name=name
else:
raise AssertionError("name must be string")
def __call__(self, x):
return "{}({})".format(self.name, x)
def fmtstr(self, x="x"):
return self.name + "({})".format(x)
# product rule (f*g)' = f'*g + f*g'
def gradient(self):
return SymbolicFunction(self.name + "'")
f = SymbolicFunction("f")
print(f)
print(f.gradient())
g = SymbolicFunction("g")
print(g + f)
```
Now we can remind ourselves of product rule, and chain rule (which we encoded in `ProductFunction` and `ComposeFunction` classes)
```
f = SymbolicFunction("f")
g = SymbolicFunction("g")
print((f*g).gradient())
h = ComposeFunction(g, f)
print(h.gradient())
```
And we can derive quotient rule
```
f = SymbolicFunction("f")
g = SymbolicFunction("g")
h = f * g**-1
print(h)
print(h.gradient())
```
You can also add symbolic functions to non-symbolic ones:
```
f = SymbolicFunction("f")
g = AffineFunction(1, 2)
h = f + g
print(h)
print(h.gradient())
```
## Summary
You're now on your way to having your own automatic differentiation library! Or your own symbolic computation library! You can probably see lots of ways to extend and improve what you've seen here:
* Support Multivariate Functions
* Add more "basic functions" such as trig functions, etc.
* Reduce expressions when you are able to
* ...
Yes, there are many libraries that do this very thing. Keywords are "autodifferentiation", "symbolic math". This sort of thing is used extensively in deep learning libraries, as well as optimization libraries.
* [Sympy](https://www.sympy.org/en/index.html) for symbolic computation
* [SciPy linear operators](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.html) do something similar to HW1
* [Sage](https://www.sagemath.org/) does a lot of symbolic math using Python
* [Autodiff tools for Python](http://www.autodiff.org/?module=Tools&language=python)
* [Autograd](https://github.com/HIPS/autograd) package
* Most Deep learning libraries (see below) do some form of automatic differentiation
### How was Object Oriented Programming Useful?
**Class Inhertiance** allowed you to get functions like addition and multiplication for free once you defined the class everything inherited from
**Polymorphism** enabled you to use any combination of `AbstractUnivariate` functions and still evaluate them, calculate derivatives, and format equations. Everyone played by the same rules.
**Encapsulation** let you interact with functions without worrying about how they are implemented under the hood.
If you think back to HW1, we implicitly used polymorphism in the power method function (e.g., matrix-vector multiply always uses `dot()` no matter which class we're using)
# Exercise 2
Ignoring our `SymbolicFunction` class, any sub-class of `AbstractUnivariate` is a real function $f:\mathbb{R} \to \mathbb{R}$ that we can evaluate using `f(x)` syntax. One thing that you may wish to do is find roots of your function: $\{x \mid f(x) = 0\}$.
One very classical algorithm for doing this is called [Newton's Method](https://en.wikipedia.org/wiki/Newton%27s_method), and has the basic pseudocode:
```
initialize x_0
while not converged:
x_{k+1} = x_k - f(x_k)/f'(x_k)
```
Write a function that implements Newton's method on any `AbstractUnivariate` function
Hint: use the `gradient()` method to get a function for derivatives
```
def find_root(f, x0=0.0, tol=1e-8):
if isinstance(f, SymbolicFunction):
raise AssertionError("can't handle symbolic input")
elif not isinstance(f, AbstractUnivariate):
raise AssertionError("Input must be AbstractUnivariate")
x = x0
# your code here
return x
```
# Deep Learning
After the first part of this lecture, you now have a pretty good idea of how to get started implementing a deep learning library. Recall that above we considered functions of the form
$$f(x; \theta): \mathbb{R} \to \mathbb{R}$$
To get to machine learning, you need to handle multivariate input and output
$$f(x; \theta):\mathbb{R}^p \to \mathbb{R}^k$$
You also need to be able to take the gradient of $f$ with respect to the parameters $\theta$ (which we didn't do in our `AbstractUnivariate` class, but is straightforward), and then you can do things like optimize a loss function using your favorite optimization algorithm.
In deep learning, we have the exact same setup
$$f(x; \theta):\mathbb{R}^p \to \mathbb{R}^k$$
What makes deep learning a "special case" of machine learning is that the function $f$ is the composition of several/many functions
$$f = f_n \circ f_{n-1} \circ \dots \circ f_1$$
This is what we mean by "layers", and you use chain rule to "backpropagate" gradients with respect to the parameters.
**Disclaimer** If you really want to learn to use a deep learning library, you really should go through several tutorials and learn about the different functions that are used (and *why* they are used). This is beyond the scope of this course, but there are several courses at Stanford that are devoted to this.
## Deep Learning Libraries
Some popular libraries for deep learning are [Tensorflow](https://www.tensorflow.org/), [Keras](https://keras.io/), and [PyTorch](https://pytorch.org/). Each has their strengths and weaknesses. All of them do essientially the same thing: you define a function through composition using objects that are in many ways similar to what you just implemented. Then you choose a loss function and start optimizing the parameters in these functions using something like stochastic gradient descent.
We'll do an example in PyTorch, since it is higher-level than Tensorflow, and perhaps the most "Pythonic" of the libraries.
```bash
conda install pytorch pillow
```
## PyTorch
What's a tensor? Conceptually identical to numpy array.
We'll consider the following network
$$ x \xrightarrow{w_1} h \to ReLU(h) \xrightarrow{w_2} y$$
where $x$ is a 500-dimensional vector, $h$ is a 100-dimensional "hidden layer", and $y$ is a 10-dimensional vector. $w_1$ and $w_2$ are linear transformations (matrices), and ReLU refers to the function
$$ReLU(x) = \begin{cases}
x & x > 0\\
0 & x \le 0
\end{cases}$$
```
import torch
from torch.autograd import Variable
dtype = torch.FloatTensor
# N - batch size
# D_in - x dimension
# H - h dimension"
# D_out - y dimension
N, D_in, H, D_out = 64, 500, 100, 10
# Setting requires_grad=False indicates that we do not need to compute gradients w.r.t var
# during the backward pass.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad = False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad = False)
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Variables during the backward pass.
w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
learning_rate = 1e-6
for t in range(10000):
# Forward pass: compute predicted y using operations on Variables;
y_pred = x.mm(w1).clamp(min=0).mm(w2) # clamp=ReLU
# Compute and print loss using operations on Variables.
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
loss = (y_pred - y).pow(2).sum()
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Variables with requires_grad=True.
loss.backward()
# Update weights using gradient descent; w1.data and w2.data are Tensors,
# w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are
# Tensors.
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
# Manually zero the gradients after running the backward pass
w1.grad.data.zero_()
w2.grad.data.zero_()
print("Loss is: {}".format(loss.data.numpy()), end='\r')
print()
print("Final loss is {}".format(loss.data[0]))
```
## That's still fairly cumbersome
- When building neural networks, arrange the computation into layers, some of which have learnable parameters which will be optimized during learning.
- Use the ``` torch.nn ``` package to define your layers
- Create custom networks by subclassing the nn.Module
- Really clean code!
- Just create a class subclassing the nn.Module
- specify layers in the ```__init__```
- define a forward pass by ```forward(self,x)``` method
This is analgous to how we created specific sub-classes of `AbstractUnivariate`, and got a lot for free through class inheritance, polymorphism, abstraction, etc.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
class TwoLayerNet(nn.Module):
def __init__(self, D_in, H, D_out): # this defines the parameters, and stores them
super(TwoLayerNet, self).__init__() # overrides class inheritance
self.layer1 = nn.Linear(D_in, H) # initializes weights
self.layer2 = nn.Linear(H, D_out)
def forward(self, x): # this defines the composition of functions
out = F.relu(self.layer1(x))
out = self.layer2(out)
return out
# N is batch size; D_in is input dimension; H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Construct our model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out) # we create our function f:x \to y
# Construct our loss function and an Optimizer.
loss_fn = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(1000):
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x) # evaluate the f(x)
# Compute and print loss
loss = loss_fn(y_pred, y) # evaluate the loss
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Final Loss is {}".format(loss.data[0]))
```
## Training a CNN for Image Classification
The following example is ported from [PyTorch's Documentation](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py)
The basic task of the network is to classify images in the [CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which has 10 classes:
```'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'```

```
import torch
import torchvision
import torchvision.transforms as transforms
# normalizes images to have pixel values between [-1,1]
# turns image into "tensor" to be fed to network
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# get data
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# Classes in the CIFAR10 dataset
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
```
To visualize images:
```
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
```
$$ x \xrightarrow{p_1 \circ r_1 \circ c_1} h_1 \xrightarrow{p_2 \circ r_2 \circ c_2} h_2 \xrightarrow{r_3 \circ f_1} h_3 \xrightarrow{r_4 \circ f_2} h_4 \xrightarrow{f_3} y$$
where $c$ refers to a convolution (a type of linear transormation), $r$ a ReLU, $p$ a pool, and $f$ a (fully connected) linear transformation. $x$ is an input image, and $y$ is a vector of length 10 which you can think of as "class probabilities".
You might also write the above expression as the following composition of functions:
$$y = f_3(r_4(f_2(r_3(f_1(p_2(r_2(c_2(p_1(r_1(c_1(x)))))))))))$$
How would you like to write out that chain rule by hand?
```
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
# composition of functions
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5) # flattens tensor
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
```
Now, we define a loss function and choose an optimimization algorithm
```
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
```
Now, we can train the network
```
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward() # calculate gradient w.r.t. parameters
optimizer.step() # update parameters
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
```
To test the classifier, we'll load a few images from our test set
```
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
```
Now we'll make predictions
```
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
```
To get accuracy over the whole test set (keep in mind, we expect 10% accuracy if we randomly guess a class):
```
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
```
## For more examples...
check out [Pytorch Docs](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html)
## To add your own function to PyTorch's autograd library
If you want to add your own functions to PyTorch's autograd library, see [here](https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html).
You would write a class that inherits from `torch.autograd.Function`, and just need to implement `forward` and `backward` methods (conceptually similar to `eval` and `gradient`).
# Reminders
* This is the last class
* HW 2 is due - this is the last homework
* After today, office hours will be by appointment
# Course Conclusion
You've now seen the basics of Python, and have now seen some of the standard libraries for scientific computing and data science. Hopefully you may now have some ideas of how you can use Python for whatever problems interest you, and have some templates to get you started.
To continue on your Python journey, the best way to improve your skills and knowledge is to just try using it for whatever it is you're doing.
If you'd like to use Python for a specific task, and don't know how to get started, feel free to send me an email and I'll try to point you in a reasonable direction.
# Additional Resources
## Object Oriented Programming
* Beginner's guide to Object Oriented Programming in Python [here](https://stackabuse.com/object-oriented-programming-in-python/)
## Image Processing
In this class, we've worked a lot with tabular data. Another important type of data to be able to work with is image data.
Some options are
* [scikit-image](https://scikit-image.org/)
* [scipy](http://www.scipy-lectures.org/advanced/image_processing/index.html)
* [Pillow](https://pillow.readthedocs.io)
* [OpenCV](https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_tutorials.html)
For many examples, see the [Scikit-image gallery](http://scikit-image.org/docs/stable/auto_examples/). Other libraries also have examples.
|
github_jupyter
|
```
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seawater as sw
import cartopy.crs as ccrs # import projections
import cartopy.feature as cf # import features
fig_dir='C:/Users/gentemann/Google Drive/f_drive/docs/projects/misst-arctic/Saildrone/'
icefile='C:/Users/gentemann/Google Drive/f_drive/docs/projects/misst-arctic/Ice Present.xlsx'
data_dir = 'F:/data/cruise_data/saildrone/2019_arctic/post_mission/'
adir_sbe='F:/data/cruise_data/saildrone/2019_arctic/sbe56/sd-'
data_dir_sbe_combined = 'F:/data/cruise_data/saildrone/2019_arctic/post_mission_combined_fluxes/'
ds = xr.open_mfdataset(data_dir_sbe_combined+'*.nc',combine='nested',concat_dim='trajectory').load()
ds
# calculate density at different depth
#import seawater as sw
# tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.TEMP_SBE37_MEAN)
# ds['density_MEAN']=xr.DataArray(tem,dims=('time'),coords={'time':ds.time})
#make diruanl plot
ds2=ds#.isel(trajectory=0)
xlon=ds2.lon
tdif=ds2.TEMP_CTD_RBR_MEAN-ds2.TEMP_SBE37_MEAN
time_offset_to_lmt=(xlon/360.)*24.*60
ds2['tlmt']=ds2.lon
for i in range(2):
ds2['tlmt'][i,:]= ds2.time.data+time_offset_to_lmt[i,:]*np.timedelta64(1,'m')# dt.timedelta(seconds=1)
tdif=ds2.TEMP_CTD_RBR_MEAN-ds2.TEMP_SBE37_MEAN
fig,(ax1,ax2) =plt.subplots(1,2)
for i in range(2):
cs=ax1.scatter(ds2.wspd_MEAN[i,:],tdif[i,:],c=ds2.time.dt.hour,s=.5)
ax1.set(xlabel='Wind Speed (ms$^{-1}$)', ylabel='RBR - SBE4 SST (K)')
ax1.set_xlim(0,15)
cbar = fig.colorbar(cs,orientation='horizontal',ax=ax1)
cbar.set_label('GMT Time (hrs)')
for i in range(2):
cs2=ax2.scatter(ds2.time.dt.hour,tdif[i,:],c=ds2.wspd_MEAN[i,:],s=.5)
ax2.set(xlabel='GMT (hr)')
cbar = fig.colorbar(cs2,orientation='horizontal',ax=ax2)
cbar.set_label('Wind Speed (ms$^{-1}$)')
fig.savefig(fig_dir+'figs/temp_buld_dw_data.png')
tdif=ds2.TEMP_CTD_RBR_MEAN-ds2.TEMP_SBE37_MEAN
fig,(ax1,ax2) =plt.subplots(1,2)
cs=ax1.scatter(ds2.wspd_MEAN[0,:],tdif[i,:],c=ds2.time.dt.hour,s=.5)
ax1.set(xlabel='Wind Speed (ms$^{-1}$)', ylabel='RBR - SBE4 SST (K)')
ax1.set_xlim(0,15)
cbar = fig.colorbar(cs,orientation='horizontal',ax=ax1)
cbar.set_label('GMT Time (hrs)')
cs2=ax2.scatter(ds2.time.dt.hour,tdif[0,:],c=ds2.wspd_MEAN[i,:],s=.5)
ax2.set(xlabel='GMT (hr)')
cbar = fig.colorbar(cs2,orientation='horizontal',ax=ax2)
cbar.set_label('Wind Speed (ms$^{-1}$)')
fig.savefig(fig_dir+'figs/temp_buld_dw_data36.png')
tdif=ds2.TEMP_CTD_RBR_MEAN-ds2.TEMP_SBE37_MEAN
fig,(ax1,ax2) =plt.subplots(1,2)
cs=ax1.scatter(ds2.wspd_MEAN[1,:],tdif[i,:],c=ds2.time.dt.hour,s=.5)
ax1.set(xlabel='Wind Speed (ms$^{-1}$)', ylabel='RBR - SBE4 SST (K)')
ax1.set_xlim(0,15)
cbar = fig.colorbar(cs,orientation='horizontal',ax=ax1)
cbar.set_label('GMT Time (hrs)')
cs2=ax2.scatter(ds2.time.dt.hour,tdif[1,:],c=ds2.wspd_MEAN[i,:],s=.5)
ax2.set(xlabel='GMT (hr)')
cbar = fig.colorbar(cs2,orientation='horizontal',ax=ax2)
cbar.set_label('Wind Speed (ms$^{-1}$)')
fig.savefig(fig_dir+'figs/temp_buld_dw_data37.png')
tdif=ds2.TEMP_CTD_RBR_MEAN-ds2.sea_water_temperature_01_mean
fig,(ax1,ax2) =plt.subplots(1,2)
for i in range(2):
cs=ax1.scatter(ds2.wspd_MEAN[i,:],tdif[i,:],c=ds2.time.dt.hour,s=.5)
ax1.set(xlabel='Wind Speed (ms$^{-1}$)', ylabel='RBR - SBE4 SST (K)')
ax1.set_xlim(0,15)
cbar = fig.colorbar(cs,orientation='horizontal',ax=ax1)
cbar.set_label('GMT Time (hrs)')
for i in range(2):
cs2=ax2.scatter(ds2.time.dt.hour,tdif[i,:],c=ds2.wspd_MEAN[i,:],s=.5)
ax2.set(xlabel='GMT (hr)')
cbar = fig.colorbar(cs2,orientation='horizontal',ax=ax2)
cbar.set_label('Wind Speed (ms$^{-1}$)')
fig.savefig(fig_dir+'figs/temp_rbr-sbe-buld_dw_data36.png')
tdif=ds2.TEMP_SBE37_MEAN-ds2.sea_water_temperature_01_mean
fig,(ax1,ax2) =plt.subplots(1,2)
for i in range(2):
cs=ax1.scatter(ds2.wspd_MEAN[i,:],tdif[i,:],c=ds2.time.dt.hour,s=.5)
ax1.set(xlabel='Wind Speed (ms$^{-1}$)', ylabel='SBE37 - SBE1 SST (K)')
ax1.set_xlim(0,15)
cbar = fig.colorbar(cs,orientation='horizontal',ax=ax1)
cbar.set_label('GMT Time (hrs)')
for i in range(2):
cs2=ax2.scatter(ds2.time.dt.hour,tdif[i,:],c=ds2.wspd_MEAN[i,:],s=.5)
ax2.set(xlabel='GMT (hr)')
cbar = fig.colorbar(cs2,orientation='horizontal',ax=ax2)
cbar.set_label('Wind Speed (ms$^{-1}$)')
fig.savefig(fig_dir+'figs/temp_sbe-sbe-buld_dw_data.png')
plt.scatter(ds2.wspd_MEAN,ds2.sea_water_temperature_01_std)
plt.scatter(ds2.wspd_MEAN,ds2.TEMP_SBE37_STDDEV)
#ICE VERIFIED FROM CAMERA
t1='2019-06-22T14'
t2='2019-06-23T00'
#(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
#(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_01_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
#(ds2.wspd_MEAN[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
plt.legend()
#Differences due to strong gradients in area and maybe shallow fresh layer
#surface is COOLER than at depth
#salinity drops significantly
#deeper temperatures warmer from sbe56 05 as compared to sbe01
t1='2019-07-17T00'
t2='2019-07-18T00'
#(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
#(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe00')
(ds2.sea_water_temperature_05_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe05')
(ds2.SAL_SBE37_MEAN[0,:]-28).sel(time=slice(t1,t2)).plot(label='salinity')
#(ds2.wspd_MEAN[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
plt.legend()
#Differences due to strong gradients in area and maybe shallow fresh layer
#surface is COOLER than at depth
#salinity drops significantly
#deeper temperatures warmer from sbe56 05 as compared to sbe01
import seawater as sw
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_00_mean)
ds['density_MEAN_00']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_01_mean)
ds['density_MEAN_01']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_02_mean)
ds['density_MEAN_02']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_04_mean)
ds['density_MEAN_04']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_05_mean)
ds['density_MEAN_05']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
tem=sw.dens0(ds.SAL_SBE37_MEAN,ds.sea_water_temperature_06_mean)
ds['density_MEAN_06']=xr.DataArray(tem,dims=('trajectory','time'),coords={'trajetcory':ds.trajectory,'time':ds.time})
t1='2019-07-17T00'
t2='2019-07-18T00'
#(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
#(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds.density_MEAN[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
(ds.density_MEAN_00[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
(ds.density_MEAN_02[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
(ds.density_MEAN_04[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
(ds.density_MEAN_05[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
#(ds2.wspd_MEAN[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
plt.legend()
t1='2019-10-01'
t2='2019-10-11'
(ds.density_MEAN[0,:]-ds.density_MEAN_06[0,:]).sel(time=slice(t1,t2)).plot(label='den')
t1='2019-07-04T18'
t2='2019-07-05'
(ds.sea_water_temperature_00_mean[0,:]-ds.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='den')
#(ds.sea_water_temperature_05_mean[0,:]-ds.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='den')
(ds.SAL_SBE37_MEAN[0,:]).sel(time=slice(t1,t2)).plot(label='den')
#(ds.sea_water_temperature_05_mean[0,:]-ds.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='den')
#tdif=ds2.TEMP_SBE37_MEAN-ds2.sea_water_temperature_01_mean
t1='2019-07-10T00'
t2='2019-07-12T00'
#(ds2.TEMP_AIR_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='air')
(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
(ds2.sea_water_temperature_02_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s02')
(ds2.sea_water_temperature_04_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s04')
plt.legend()
(ds2.sea_water_temperature_04_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s04')
plt.legend()
#tdif=ds2.TEMP_SBE37_MEAN-ds2.sea_water_temperature_01_mean
t1='2019-07-08T18'
t2='2019-07-10T00'
(ds2.TEMP_AIR_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='air')
(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
(ds2.sea_water_temperature_02_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s02')
(ds2.sea_water_temperature_04_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s04')
plt.legend()
#tdif=ds2.TEMP_SBE37_MEAN-ds2.sea_water_temperature_01_mean
t1='2019-06-28T12'
t2='2019-06-29T12'
(ds2.TEMP_AIR_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='air')
(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
(ds2.sea_water_temperature_02_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s02')
(ds2.sea_water_temperature_04_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s04')
plt.legend()
#tdif=ds2.TEMP_SBE37_MEAN-ds2.sea_water_temperature_01_mean
t1='2019-06-05T18'
t2='2019-06-06T05'
(ds2.TEMP_AIR_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='air')
(ds2.sea_water_temperature_00_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s00')
(ds2.sea_water_temperature_01_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s01')
(ds2.TEMP_SBE37_MEAN[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='sbe')
(ds2.sea_water_temperature_02_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s02')
(ds2.sea_water_temperature_04_mean[0,:]-ds2.sea_water_temperature_06_mean[0,:]).sel(time=slice(t1,t2)).plot(label='s04')
plt.legend()
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_00_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_00_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_00_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_01_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_01_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_01_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_02_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_02_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_02_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_04_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_04_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_04_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_05_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_05_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_05_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_SBE37_MEAN-ds.sea_water_temperature_06_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_CTD_RBR_MEAN-ds.sea_water_temperature_06_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
tdif=ds.TEMP_O2_RBR_MEAN-ds.sea_water_temperature_06_mean
print(tdif.mean('time').data,tdif.std('time').data,(np.isfinite(tdif)).sum('time').data)
```
# PLOT DIURANL WARMING
```
ds10=ds.isel(trajectory=0).resample(time='10min').mean()
plt.figure(figsize=(12,6))
subset=ds10.sel(time=slice('2019-06-15T08','2019-06-16'))
for i in range(2):
var='sea_water_temperature_'+str(i).zfill(2)+'_mean'
lvar=str(i).zfill(2)
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
var='TEMP_SBE37_MEAN'
lvar='SBE37'
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
for i in range(2,7):
var='sea_water_temperature_'+str(i).zfill(2)+'_mean'
lvar=str(i).zfill(2)
if i==3:
continue
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
plt.legend()
plt.ylabel('$\Delta$ T (K)')
plt.xlabel('Time (GMT)')
plt.savefig(fig_dir+'figs/diurnal36_06-15.png')
plt.figure(figsize=(12,6))
plt.plot(subset.time,subset.TEMP_AIR_MEAN-subset.sea_water_temperature_00_mean,label=lvar)
plt.figure(figsize=(12,6))
subset=ds10.sel(time=slice('2019-07-08T12','2019-07-10T12'))
for i in range(2):
var='sea_water_temperature_'+str(i).zfill(2)+'_mean'
lvar=str(i).zfill(2)
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
var='TEMP_SBE37_MEAN'
lvar='SBE37'
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
for i in range(2,7):
var='sea_water_temperature_'+str(i).zfill(2)+'_mean'
lvar=str(i).zfill(2)
if i==3:
continue
plt.plot(subset.time,subset[var]-subset.sea_water_temperature_06_mean,label=lvar,lw=3)
plt.legend()
plt.ylabel('$\Delta$ T (K)')
plt.xlabel('Time (GMT)')
plt.savefig(fig_dir+'figs/diurnal36_07-08.png')
plt.figure(figsize=(12,6))
plt.plot(subset.time,subset.TEMP_AIR_MEAN-subset.sea_water_temperature_00_mean,label=lvar)
plt.figure(figsize=(12,6))
subset=ds10.sel(time=slice('2019-05-15T12','2019-09-10T12'))
plt.plot(subset.time,subset.TEMP_AIR_MEAN-subset.sea_water_temperature_00_mean,label='$\Delta$T$_{air-sea}$')
plt.plot(subset.time,subset.sea_water_temperature_00_mean-subset.sea_water_temperature_06_mean,label='$\Delta$T$_{dw}$')
plt.legend()
plt.ylabel('$\Delta$ T (K)')
plt.xlabel('Time (GMT)')
plt.savefig(fig_dir+'figs/diurnal36_airseatemp.png')
subset=ds.sel(time=slice('2019-07-07','2019-07-11'))
tdif=subset.sea_water_temperature_00_mean-subset.sea_water_temperature_06_mean
tdif[0,:].plot()
fig = plt.figure(figsize=(8,15))
ax = plt.axes(projection = ccrs.NorthPolarStereo(central_longitude=180.0)) # create a set of axes with Mercator projection
for i in range(1):
ds2 = ds.isel(trajectory=i).sel(time=slice('2019-05-01','2019-09-15'))
im2=ax.quiver(ds2.lon[::200].data,
ds2.lat[::200].data,
ds2.UWND_MEAN[::200].data,
ds2.VWND_MEAN[::200].data,
scale=140,transform=ccrs.PlateCarree())
im=ax.scatter(ds2.lon,ds2.lat,
c=ds2.TEMP_AIR_MEAN-ds2.sea_water_temperature_00_mean,
s=.15,transform=ccrs.PlateCarree(),label=ds.trajectory[i].data,
cmap='seismic',vmin=-2,vmax=2)
ax.coastlines(resolution='10m')
ax.set_extent([-180,-158,68,77])
ax.legend()
cax = fig.add_axes([0.45, 0.17, 0.3, 0.02])
cbar = fig.colorbar(im,cax=cax, orientation='horizontal')
cbar.set_label('SST ($^\deg$C)')
fig.savefig(fig_dir+'figs/map_nasa_data_air-sbe5600.png')
fig = plt.figure(figsize=(8,15))
ax = plt.axes(projection = ccrs.NorthPolarStereo(central_longitude=180.0)) # create a set of axes with Mercator projection
for i in range(1):
ds2 = ds.isel(trajectory=i).sel(time=slice('2019-06-15','2019-06-16'))
im2=ax.quiver(ds2.lon[::100].data,
ds2.lat[::100].data,
ds2.UWND_MEAN[::100].data,
ds2.VWND_MEAN[::100].data,
scale=20,transform=ccrs.PlateCarree())
im=ax.scatter(ds2.lon,ds2.lat,
c=ds2.TEMP_AIR_MEAN-ds2.sea_water_temperature_00_mean,
s=.15,transform=ccrs.PlateCarree(),label=ds.trajectory[i].data,
cmap='seismic',vmin=-2,vmax=2)
ax.coastlines(resolution='10m')
ax.set_extent([-175,-158,68,72])
ax.legend()
cax = fig.add_axes([0.45, 0.17, 0.3, 0.02])
cbar = fig.colorbar(im,cax=cax, orientation='horizontal')
cbar.set_label('SST ($^\deg$C)')
fig.savefig(fig_dir+'figs/map_nasa_data_air-sbe5600-06-15.png')
ig = plt.figure(figsize=(8,15))
ax = plt.axes(projection = ccrs.NorthPolarStereo(central_longitude=180.0)) # create a set of axes with Mercator projection
for i in range(1):
ds2 = ds.isel(trajectory=i).sel(time=slice('2019-07-08','2019-07-10'))
im2=ax.quiver(ds2.lon[::100].data,
ds2.lat[::100].data,
ds2.UWND_MEAN[::100].data,
ds2.VWND_MEAN[::100].data,
scale=100,transform=ccrs.PlateCarree())
im=ax.scatter(ds2.lon,ds2.lat,
c=ds2.sea_water_temperature_00_mean-ds2.sea_water_temperature_06_mean,
s=.15,transform=ccrs.PlateCarree(),label=ds.trajectory[i].data,
cmap='seismic',vmin=-2,vmax=2)
ax.coastlines(resolution='10m')
ax.set_extent([-173,-160,70,71])
ax.legend()
cax = fig.add_axes([0.45, 0.17, 0.3, 0.02])
cbar = fig.colorbar(im,cax=cax, orientation='horizontal')
cbar.set_label('SST ($^\deg$C)')
fig.savefig(fig_dir+'figs/map_nasa_data_air-sbe5600-07-10.png')
plt.quiver(ds2.lon[::100].data,
ds2.lat[::100].data,
ds2.UWND_MEAN[::100].data,
ds2.VWND_MEAN[::100].data,
scale=50)
plt.scatter(ds2.lon,ds2.lat,
c=ds2.sea_water_temperature_00_mean-ds2.sea_water_temperature_06_mean,
s=.15,
cmap='seismic',vmin=-2,vmax=2)
%matplotlib inline
import sys
sys.path.append('./../../flux/')
from coare3 import coare3
coare3
```
|
github_jupyter
|
# Understanding Data Actions
blocktorch streamlines the creation and implementation of machine learning models for tabular data. One of the many features it offers is [data checks](https://blocktorch.alteryx.com/en/stable/user_guide/data_checks.html), which are geared towards determining the health of the data before we train a model on it. These data checks have associated actions with them and will be shown in this notebook. In our default data checks, we have the following checks:
- `HighlyNullDataCheck`: Checks whether the rows or columns are highly null
- `IDColumnsDataCheck`: Checks for columns that could be ID columns
- `TargetLeakageDataCheck`: Checks if any of the input features have high association with the targets
- `InvalidTargetDataCheck`: Checks if there are null or other invalid values in the target
- `NoVarianceDataCheck`: Checks if either the target or any features have no variance
- `NaturalLanguageNaNDataCheck`: Checks if any natural language columns have missing data
- `DateTimeNaNDataCheck`: Checks if any datetime columns have missing data
blocktorch has additional data checks that can be seen [here](https://blocktorch.alteryx.com/en/stable/api_index.html#data-checks), with usage examples [here](https://blocktorch.alteryx.com/en/stable/user_guide/data_checks.html). Below, we will walk through usage of blocktorch's default data checks and actions.
First, we import the necessary requirements to demonstrate these checks.
```
import woodwork as ww
import pandas as pd
from blocktorch import AutoMLSearch
from blocktorch.demos import load_fraud
from blocktorch.preprocessing import split_data
```
Let's look at the input feature data. blocktorch uses the [Woodwork](https://woodwork.alteryx.com/en/stable/) library to represent this data. The demo data that blocktorch returns is a Woodwork DataTable and DataColumn.
```
X, y = load_fraud(n_rows=1500)
X
```
## Adding noise and unclean data
This data is already clean and compatible with blocktorch's ``AutoMLSearch``. In order to demonstrate blocktorch default data checks, we will add the following:
- A column of mostly null values (<0.5% non-null)
- A column with low/no variance
- A row of null values
- A missing target value
We will add the first two columns to the whole dataset and we will only add the last two to the training data. Note: these only represent some of the scenarios that blocktorch default data checks can catch.
```
# add a column with no variance in the data
X['no_variance'] = [1 for _ in range(X.shape[0])]
# add a column with >99.5% null values
X['mostly_nulls'] = [None] * (X.shape[0] - 5) + [i for i in range(5)]
# since we changed the data, let's reinitialize the woodwork datatable
X.ww.init()
# let's split some training and validation data
X_train, X_valid, y_train, y_valid = split_data(X, y, problem_type='binary')
# let's copy the datetime at row 1 for future use
date = X_train.iloc[1]['datetime']
# make row 1 all nan values
X_train.iloc[1] = [None] * X_train.shape[1]
# make one of the target values null
y_train[990] = None
X_train.ww.init()
y_train = ww.init_series(y_train)
# Let's take another look at the new X_train data
X_train
```
If we call `AutoMLSearch.search()` on this data, the search will fail due to the columns and issues we've added above. Note: we use a try/except here to catch the resulting ValueError that AutoMLSearch raises.
```
automl = AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary')
try:
automl.search()
except ValueError as e:
# to make the error message more distinct
print("=" * 80, "\n")
print("Search errored out! Message received is: {}".format(e))
print("=" * 80, "\n")
```
We can use the `search_iterative()` function provided in blocktorch to determine what potential health issues our data has. We can see that this [search_iterative](https://blocktorch.alteryx.com/en/latest/autoapi/blocktorch/automl/index.html#blocktorch.automl.search_iterative) function is a public method available through `blocktorch.automl` and is different from the [search](https://blocktorch.alteryx.com/en/stable/autoapi/blocktorch/automl/index.html#blocktorch.automl.AutoMLSearch) function of the `AutoMLSearch` class in blocktorch. This `search_iterative()` function allows us to run the default data checks on the data, and, if there are no errors, automatically runs `AutoMLSearch.search()`.
```
from blocktorch.automl import search_iterative
results = search_iterative(X_train, y_train, problem_type='binary')
results
```
The return value of the `search_iterative` function above is a tuple. The first element is the `AutoMLSearch` object if it runs (and `None` otherwise), and the second element is a dictionary of potential warnings and errors that the default data checks find on the passed-in `X` and `y` data. In this dictionary, warnings are suggestions that the datachecks give that can useful to address to make the search better but will not break AutoMLSearch. On the flip side, errors will break AutoMLSearch and need to be addressed by the user.
## Addressing DataCheck errors
We will show that we can address errors to allow AutoMLSearch to run. However, ignoring warnings will come at the expense of performance.
We can print out the errors first to make it easier to read, and then we'll create new features and targets from the original training data.
```
results[1]['errors']
# copy the DataTables to new variables
X_train_no_errors = X_train.copy()
y_train_no_errors = y_train.copy()
# We address the errors by looking at the resulting dictionary errors listed
# first, let's address the `TARGET_HAS_NULL` error
y_train_no_errors.fillna(False, inplace=True)
# here, we address the `NO_VARIANCE` error
X_train_no_errors.drop("no_variance", axis=1, inplace=True)
# lastly, we address the `DATETIME_HAS_NAN` error with the date we had saved earlier
X_train_no_errors.iloc[1, 2] = date
# let's reinitialize the Woodwork DataTable
X_train_no_errors.ww.init()
X_train_no_errors.head()
```
We can now run search on `X_train_no_errors` and `y_train_no_errors`. Note that the search here doesn't fail since we addressed the errors, but there will still exist warnings in the returned tuple. This search allows the `mostly_nulls` column to remain in the features during search.
```
results_no_errors = search_iterative(X_train_no_errors, y_train_no_errors, problem_type='binary')
results_no_errors
```
## Addressing all warnings and errors
We can look at the `actions` key of the dictionary in order to see how we can fix and clean all of the data. This will help us clean both the warnings and errors from the data and provide us with a better model.
```
results[1]['actions']
```
We note that there are four action tasks that we can take to clean the data. Three of the tasks ask us to drop a row or column in the features, while one task asks us to impute the target value.
```
# The first action states to drop the row given by the action code
X_train.drop(1477, axis=0, inplace=True)
# we must also drop this for y since we are removing its associated feature input
y_train.drop(index=1477, inplace=True)
print("The new length of X_train is {} and y_train is {}".format(len(X_train),len(y_train)))
# Remove the 'mostly_nulls' column from X_train, which is the second action item
X_train.drop('mostly_nulls', axis=1, inplace=True)
X_train.head()
# Address the null in targets, which is the third action item
y_train.fillna(False, inplace=True)
y_train.isna().any()
# Finally, we can drop the 'no_variance' column, which is the final action item
X_train.drop('no_variance', axis=1, inplace=True)
X_train.head()
# let's reinitialize the dataframe using Woodwork and try the search again
X_train.ww.init()
results_cleaned = search_iterative(X_train, y_train, problem_type='binary')
```
Note that this time, we do get an `AutoMLSearch` object returned to us, as well as an empty dictionary of warnings and errors. We can use the `AutoMLSearch` object as needed, and we can see that the resulting warning dictionary is empty.
```
aml = results_cleaned[0]
aml.rankings
data_check_results = results_cleaned[1]
data_check_results
```
## Comparing removing only errors versus removing both warnings and errors
Let's see the differences in model performance when we remove only errors versus remove both warnings and errors. To do this, we compare the performance of the best pipelines on the validation data. Remember that in the search where we only address errors, we still have the `mostly_nulls` column present in the data, so we leave that column in the validation data for its respective search. We drop the other `no_variance` column from both searches.
Additionally, we do some logical type setting since we had added additional noise to just the training data. This allows the data to be of the same types in both training and validation.
```
# drop the no_variance column
X_valid.drop("no_variance", axis=1, inplace=True)
# logical type management
X_valid.ww.init(logical_types={"customer_present": "Categorical"})
y_valid = ww.init_series(y_valid, logical_type="Categorical")
best_pipeline_no_errors = results_no_errors[0].best_pipeline
print("Only dropping errors:", best_pipeline_no_errors.score(X_valid, y_valid, ["Log Loss Binary"]), "\n")
# drop the mostly_nulls column and reinitialize the DataTable
X_valid.drop("mostly_nulls", axis=1, inplace=True)
X_valid.ww.init()
best_pipeline_clean = results_cleaned[0].best_pipeline
print("Addressing all actions:", best_pipeline_clean.score(X_valid, y_valid, ["Log Loss Binary"]), "\n")
```
We can compare the differences in model performance when we address all action items (warnings and errors) in comparison to when we only address errors. While it isn't guaranteed that addressing all actions will always have better performance, we do recommend doing so since we only raise these issues when we believe the features have problems that could negatively impact or not benefit the search.
In the future, we aim to provide a helper function to allow users to quickly clean the data by taking in the list of actions and creating an appropriate pipeline of transformers to alter the data.
|
github_jupyter
|
```
import numpy as np
from bokeh.plotting import figure, show, output_notebook
from bokeh.layouts import gridplot
output_notebook()
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
xpts = np.array([-.09, -.12, .0, .12, .09])
ypts = np.array([-.1, .02, .1, .02, -.1])
figures = []
p = figure(title="annular_wedge")
p.annular_wedge(x, y, 10, 20, 0.6, 4.1, color="#8888ee",
inner_radius_units="screen", outer_radius_units="screen")
figures.append(p)
p = figure(title="annulus")
p.annulus(x, y, 10, 20, color="#7FC97F",
inner_radius_units="screen", outer_radius_units = "screen")
figures.append(p)
p = figure(title="arc")
p.arc(x, y, 20, 0.6, 4.1,
radius_units="screen", color="#BEAED4", line_width=3)
figures.append(p)
p = figure(title="bezier")
p.bezier(x, y, x+0.2, y, x+0.1, y+0.1, x-0.1, y-0.1,
color="#D95F02", line_width=2)
figures.append(p)
p = figure(title="circle")
p.circle(x, y, radius=0.1, color="#3288BD")
figures.append(p)
p = figure(title="ellipse")
p.ellipse(x, y, 15, 25, angle=-0.7, color="#1D91C0",
width_units="screen", height_units="screen")
figures.append(p)
p = figure(title="line")
p.line(x, y, color="#F46D43")
figures.append(p)
p = figure(title="multi_line")
p.multi_line([xpts+xx for xx in x], [ypts+yy for yy in y], color="#8073AC", line_width=2)
figures.append(p)
p = figure(title="multi_polygons")
p.multi_polygons([[[xpts*2+xx, xpts+xx]] for xx in x], [[[ypts*3+yy, ypts+yy]] for yy in y], color="#FB9A99")
figures.append(p)
p = figure(title="oval")
p.oval(x, y, 15, 25, angle=-0.7, color="#1D91C0",
width_units="screen", height_units="screen")
figures.append(p)
p = figure(title="patch")
p.patch(x, y, color="#A6CEE3")
figures.append(p)
p = figure(title="patches")
p.patches([xpts+xx for xx in x], [ypts+yy for yy in y], color="#FB9A99")
figures.append(p)
p = figure(title="quad")
p.quad(x, x-0.1, y, y-0.1, color="#B3DE69")
figures.append(p)
p = figure(title="quadratic")
p.quadratic(x, y, x+0.2, y, x+0.1, y+0.1, color="#4DAF4A", line_width=3)
figures.append(p)
p = figure(title="ray")
p.ray(x, y, 45, -0.7, color="#FB8072", line_width=2)
figures.append(p)
p = figure(title="rect")
p.rect(x, y, 10, 20, color="#CAB2D6", width_units="screen", height_units="screen")
figures.append(p)
p = figure(title="segment")
p.segment(x, y, x-0.1, y-0.1, color="#F4A582", line_width=3)
figures.append(p)
p = figure(title="square")
p.square(x, y, size=sizes, color="#74ADD1")
figures.append(p)
p = figure(title="wedge")
p.wedge(x, y, 15, 0.6, 4.1, radius_units="screen", color="#B3DE69")
figures.append(p)
p = figure(title="circle_x")
p.scatter(x, y, marker="circle_x", size=sizes, color="#DD1C77", fill_color=None)
figures.append(p)
p = figure(title="triangle")
p.scatter(x, y, marker="triangle", size=sizes, color="#99D594", line_width=2)
figures.append(p)
p = figure(title="circle")
p.scatter(x, y, marker="o", size=sizes, color="#80B1D3", line_width=3)
figures.append(p)
p = figure(title="cross")
p.scatter(x, y, marker="cross", size=sizes, color="#E6550D", line_width=2)
figures.append(p)
p = figure(title="diamond")
p.scatter(x, y, marker="diamond", size=sizes, color="#1C9099", line_width=2)
figures.append(p)
p = figure(title="inverted_triangle")
p.scatter(x, y, marker="inverted_triangle", size=sizes, color="#DE2D26")
figures.append(p)
p = figure(title="square_x")
p.scatter(x, y, marker="square_x", size=sizes, color="#FDAE6B",
fill_color=None, line_width=2)
figures.append(p)
p = figure(title="asterisk")
p.scatter(x, y, marker="asterisk", size=sizes, color="#F0027F",
line_width=2)
figures.append(p)
p = figure(title="square_cross")
p.scatter(x, y, marker="square_cross", size=sizes, color="#7FC97F",
fill_color=None, line_width=2)
figures.append(p)
p = figure(title="diamond_cross")
p.scatter(x, y, marker="diamond_cross", size=sizes, color="#386CB0",
fill_color=None, line_width=2)
figures.append(p)
p = figure(title="circle_cross")
p.scatter(x, y, marker="circle_cross", size=sizes, color="#FB8072",
fill_color=None, line_width=2)
figures.append(p)
show(gridplot(figures, ncols=3, plot_width=200, plot_height=200))
```
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Estimators
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/estimator"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This document introduces `tf.estimator`âa high-level TensorFlow
API. Estimators encapsulate the following actions:
* training
* evaluation
* prediction
* export for serving
TensorFlow implements several pre-made Estimators. Custom estimators are still suported, but mainly as a backwards compatibility measure. **Custom estimators should not be used for new code**. All Estimators--whether pre-made or custom--are classes based on the `tf.estimator.Estimator` class.
For a quick example try [Estimator tutorials](../tutorials/estimator/linear.ipynb). For an overview of the API design, see the [white paper](https://arxiv.org/abs/1708.02637).
## Setup
```
! pip install -U tensorflow_datasets
import tempfile
import os
import tensorflow as tf
import tensorflow_datasets as tfds
```
## Advantages
Similar to a `tf.keras.Model`, an `estimator` is a model-level abstraction. The `tf.estimator` provides some capabilities currently still under development for `tf.keras`. These are:
* Parameter server based training
* Full [TFX](http://tensorflow.org/tfx) integration.
## Estimators Capabilities
Estimators provide the following benefits:
* You can run Estimator-based models on a local host or on a distributed multi-server environment without changing your model. Furthermore, you can run Estimator-based models on CPUs, GPUs, or TPUs without recoding your model.
* Estimators provide a safe distributed training loop that controls how and when to:
* load data
* handle exceptions
* create checkpoint files and recover from failures
* save summaries for TensorBoard
When writing an application with Estimators, you must separate the data input
pipeline from the model. This separation simplifies experiments with
different data sets.
## Using pre-made Estimators
Pre-made Estimators enable you to work at a much higher conceptual level than the base TensorFlow APIs. You no longer have to worry about creating the computational graph or sessions since Estimators handle all the "plumbing" for you. Furthermore, pre-made Estimators let you experiment with different model architectures by making only minimal code changes. `tf.estimator.DNNClassifier`, for example, is a pre-made Estimator class that trains classification models based on dense, feed-forward neural networks.
A TensorFlow program relying on a pre-made Estimator typically consists of the following four steps:
### 1. Write an input functions
For example, you might create one function to import the training set and another function to import the test set. Estimators expect their inputs to be formatted as a pair of objects:
* A dictionary in which the keys are feature names and the values are Tensors (or SparseTensors) containing the corresponding feature data
* A Tensor containing one or more labels
The `input_fn` should return a `tf.data.Dataset` that yields pairs in that format.
For example, the following code builds a `tf.data.Dataset` from the Titanic dataset's `train.csv` file:
```
def train_input_fn():
titanic_file = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv")
titanic = tf.data.experimental.make_csv_dataset(
titanic_file, batch_size=32,
label_name="survived")
titanic_batches = (
titanic.cache().repeat().shuffle(500)
.prefetch(tf.data.experimental.AUTOTUNE))
return titanic_batches
```
The `input_fn` is executed in a `tf.Graph` and can also directly return a `(features_dics, labels)` pair containing graph tensors, but this is error prone outside of simple cases like returning constants.
### 2. Define the feature columns.
Each `tf.feature_column` identifies a feature name, its type, and any input pre-processing.
For example, the following snippet creates three feature columns.
- The first uses the `age` feature directly as a floating-point input.
- The second uses the `class` feature as a categorical input.
- The third uses the `embark_town` as a categorical input, but uses the `hashing trick` to avoid the need to enumerate the options, and to set the number of options.
For further information, see the [feature columns tutorial](https://www.tensorflow.org/tutorials/keras/feature_columns).
```
age = tf.feature_column.numeric_column('age')
cls = tf.feature_column.categorical_column_with_vocabulary_list('class', ['First', 'Second', 'Third'])
embark = tf.feature_column.categorical_column_with_hash_bucket('embark_town', 32)
```
### 3. Instantiate the relevant pre-made Estimator.
For example, here's a sample instantiation of a pre-made Estimator named `LinearClassifier`:
```
model_dir = tempfile.mkdtemp()
model = tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=[embark, cls, age],
n_classes=2
)
```
For further information, see the [linear classifier tutorial](https://www.tensorflow.org/tutorials/estimator/linear).
### 4. Call a training, evaluation, or inference method.
All Estimators provide `train`, `evaluate`, and `predict` methods.
```
model = model.train(input_fn=train_input_fn, steps=100)
result = model.evaluate(train_input_fn, steps=10)
for key, value in result.items():
print(key, ":", value)
for pred in model.predict(train_input_fn):
for key, value in pred.items():
print(key, ":", value)
break
```
### Benefits of pre-made Estimators
Pre-made Estimators encode best practices, providing the following benefits:
* Best practices for determining where different parts of the computational graph should run, implementing strategies on a single machine or on a
cluster.
* Best practices for event (summary) writing and universally useful
summaries.
If you don't use pre-made Estimators, you must implement the preceding features yourself.
## Custom Estimators
The heart of every Estimatorâwhether pre-made or customâis its *model function*, `model_fn`, which is a method that builds graphs for training, evaluation, and prediction. When you are using a pre-made Estimator, someone else has already implemented the model function. When relying on a custom Estimator, you must write the model function yourself.
> Note: A custom `model_fn` will still run in 1.x-style graph mode. This means there is no eager execution and no automatic control dependencies. You should plan to migrate away from `tf.estimator` with custom `model_fn`. The alternative APIs are `tf.keras` and `tf.distribute`. If you still need an `Estimator` for some part of your training you can use the `tf.keras.estimator.model_to_estimator` converter to create an `Estimator` from a `keras.Model`.
## Create an Estimator from a Keras model
You can convert existing Keras models to Estimators with `tf.keras.estimator.model_to_estimator`. This is helpful if you want to modernize your model code, but your training pipeline still requires Estimators.
Instantiate a Keras MobileNet V2 model and compile the model with the optimizer, loss, and metrics to train with:
import tensorflow as tf
import tensorflow_datasets as tfds
```
keras_mobilenet_v2 = tf.keras.applications.MobileNetV2(
input_shape=(160, 160, 3), include_top=False)
keras_mobilenet_v2.trainable = False
estimator_model = tf.keras.Sequential([
keras_mobilenet_v2,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1)
])
# Compile the model
estimator_model.compile(
optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
```
Create an `Estimator` from the compiled Keras model. The initial model state of the Keras model is preserved in the created `Estimator`:
```
est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model)
```
Treat the derived `Estimator` as you would with any other `Estimator`.
```
IMG_SIZE = 160 # All images will be resized to 160x160
def preprocess(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
def train_input_fn(batch_size):
data = tfds.load('cats_vs_dogs', as_supervised=True)
train_data = data['train']
train_data = train_data.map(preprocess).shuffle(500).batch(batch_size)
return train_data
```
To train, call Estimator's train function:
```
est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=50)
```
Similarly, to evaluate, call the Estimator's evaluate function:
```
est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10)
```
For more details, please refer to the documentation for `tf.keras.estimator.model_to_estimator`.
## Saving object-based checkpoints with Estimator
Estimators by default save checkpoints with variable names rather than the object graph described in the [Checkpoint guide](checkpoint.ipynb). `tf.train.Checkpoint` will read name-based checkpoints, but variable names may change when moving parts of a model outside of the Estimator's `model_fn`. For forwards compatibility saving object-based checkpoints makes it easier to train a model inside an Estimator and then use it outside of one.
```
import tensorflow.compat.v1 as tf_compat
def toy_dataset():
inputs = tf.range(10.)[:, None]
labels = inputs * 5. + tf.range(5.)[None, :]
return tf.data.Dataset.from_tensor_slices(
dict(x=inputs, y=labels)).repeat().batch(2)
class Net(tf.keras.Model):
"""A simple linear model."""
def __init__(self):
super(Net, self).__init__()
self.l1 = tf.keras.layers.Dense(5)
def call(self, x):
return self.l1(x)
def model_fn(features, labels, mode):
net = Net()
opt = tf.keras.optimizers.Adam(0.1)
ckpt = tf.train.Checkpoint(step=tf_compat.train.get_global_step(),
optimizer=opt, net=net)
with tf.GradientTape() as tape:
output = net(features['x'])
loss = tf.reduce_mean(tf.abs(output - features['y']))
variables = net.trainable_variables
gradients = tape.gradient(loss, variables)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=tf.group(opt.apply_gradients(zip(gradients, variables)),
ckpt.step.assign_add(1)),
# Tell the Estimator to save "ckpt" in an object-based format.
scaffold=tf_compat.train.Scaffold(saver=ckpt))
tf.keras.backend.clear_session()
est = tf.estimator.Estimator(model_fn, './tf_estimator_example/')
est.train(toy_dataset, steps=10)
```
`tf.train.Checkpoint` can then load the Estimator's checkpoints from its `model_dir`.
```
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
ckpt = tf.train.Checkpoint(
step=tf.Variable(1, dtype=tf.int64), optimizer=opt, net=net)
ckpt.restore(tf.train.latest_checkpoint('./tf_estimator_example/'))
ckpt.step.numpy() # From est.train(..., steps=10)
```
## SavedModels from Estimators
Estimators export SavedModels through [`tf.Estimator.export_saved_model`](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#export_saved_model).
```
input_column = tf.feature_column.numeric_column("x")
estimator = tf.estimator.LinearClassifier(feature_columns=[input_column])
def input_fn():
return tf.data.Dataset.from_tensor_slices(
({"x": [1., 2., 3., 4.]}, [1, 1, 0, 0])).repeat(200).shuffle(64).batch(16)
estimator.train(input_fn)
```
To save an `Estimator` you need to create a `serving_input_receiver`. This function builds a part of a `tf.Graph` that parses the raw data received by the SavedModel.
The `tf.estimator.export` module contains functions to help build these `receivers`.
The following code builds a receiver, based on the `feature_columns`, that accepts serialized `tf.Example` protocol buffers, which are often used with [tf-serving](https://tensorflow.org/serving).
```
tmpdir = tempfile.mkdtemp()
serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
tf.feature_column.make_parse_example_spec([input_column]))
estimator_base_path = os.path.join(tmpdir, 'from_estimator')
estimator_path = estimator.export_saved_model(estimator_base_path, serving_input_fn)
```
You can also load and run that model, from python:
```
imported = tf.saved_model.load(estimator_path)
def predict(x):
example = tf.train.Example()
example.features.feature["x"].float_list.value.extend([x])
return imported.signatures["predict"](
examples=tf.constant([example.SerializeToString()]))
print(predict(1.5))
print(predict(3.5))
```
`tf.estimator.export.build_raw_serving_input_receiver_fn` allows you to create input functions which take raw tensors rather than `tf.train.Example`s.
## Using `tf.distribute.Strategy` with Estimator (Limited support)
See the [Distributed training guide](guide/distributed_training.ipynb) for more info.
`tf.estimator` is a distributed training TensorFlow API that originally supported the async parameter server approach. `tf.estimator` now supports `tf.distribute.Strategy`. If you're using `tf.estimator`, you can change to distributed training with very few changes to your code. With this, Estimator users can now do synchronous distributed training on multiple GPUs and multiple workers, as well as use TPUs. This support in Estimator is, however, limited. See [What's supported now](#estimator_support) section below for more details.
The usage of `tf.distribute.Strategy` with Estimator is slightly different than the Keras case. Instead of using `strategy.scope`, now we pass the strategy object into the [`RunConfig`](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) for the Estimator.
Here is a snippet of code that shows this with a premade Estimator `LinearRegressor` and `MirroredStrategy`:
```
mirrored_strategy = tf.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(
train_distribute=mirrored_strategy, eval_distribute=mirrored_strategy)
regressor = tf.estimator.LinearRegressor(
feature_columns=[tf.feature_column.numeric_column('feats')],
optimizer='SGD',
config=config)
```
We use a premade Estimator here, but the same code works with a custom Estimator as well. `train_distribute` determines how training will be distributed, and `eval_distribute` determines how evaluation will be distributed. This is another difference from Keras where we use the same strategy for both training and eval.
Now we can train and evaluate this Estimator with an input function:
```
def input_fn():
dataset = tf.data.Dataset.from_tensors(({"feats":[1.]}, [1.]))
return dataset.repeat(1000).batch(10)
regressor.train(input_fn=input_fn, steps=10)
regressor.evaluate(input_fn=input_fn, steps=10)
```
Another difference to highlight here between Estimator and Keras is the input handling. In Keras, we mentioned that each batch of the dataset is split automatically across the multiple replicas. In Estimator, however, we do not do automatic splitting of batch, nor automatically shard the data across different workers. You have full control over how you want your data to be distributed across workers and devices, and you must provide an `input_fn` to specify how to distribute your data.
Your `input_fn` is called once per worker, thus giving one dataset per worker. Then one batch from that dataset is fed to one replica on that worker, thereby consuming N batches for N replicas on 1 worker. In other words, the dataset returned by the `input_fn` should provide batches of size `PER_REPLICA_BATCH_SIZE`. And the global batch size for a step can be obtained as `PER_REPLICA_BATCH_SIZE * strategy.num_replicas_in_sync`.
When doing multi worker training, you should either split your data across the workers, or shuffle with a random seed on each. You can see an example of how to do this in the [Multi-worker Training with Estimator](../tutorials/distribute/multi_worker_with_estimator.ipynb).
And similarly, you can use multi worker and parameter server strategies as well. The code remains the same, but you need to use `tf.estimator.train_and_evaluate`, and set `TF_CONFIG` environment variables for each binary running in your cluster.
<a name="estimator_support"></a>
### What's supported now?
There is limited support for training with Estimator using all strategies except `TPUStrategy`. Basic training and evaluation should work, but a number of advanced features such as `v1.train.Scaffold` do not. There may also be a number of bugs in this integration. At this time, we do not plan to actively improve this support, and instead are focused on Keras and custom training loop support. If at all possible, you should prefer to use `tf.distribute` with those APIs instead.
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy |
|:--------------- |:------------------ |:------------- |:----------------------------- |:------------------------ |:------------------------- |
| Estimator API | Limited Support | Not supported | Limited Support | Limited Support | Limited Support |
### Examples and Tutorials
Here are some examples that show end to end usage of various strategies with Estimator:
1. [Multi-worker Training with Estimator](../tutorials/distribute/multi_worker_with_estimator.ipynb) to train MNIST with multiple workers using `MultiWorkerMirroredStrategy`.
2. [End to end example](https://github.com/tensorflow/ecosystem/tree/master/distribution_strategy) for multi worker training in tensorflow/ecosystem using Kubernetes templates. This example starts with a Keras model and converts it to an Estimator using the `tf.keras.estimator.model_to_estimator` API.
3. Official [ResNet50](https://github.com/tensorflow/models/blob/master/official/vision/image_classification/resnet_imagenet_main.py) model, which can be trained using either `MirroredStrategy` or `MultiWorkerMirroredStrategy`.
|
github_jupyter
|
<div class="alert alert-block alert-info">
<font size="5"><b><center> Section 5</font></center>
<br>
<font size="5"><b><center>Recurrent Neural Network in PyTorch with an Introduction to Natural Language Processing</font></center>
</div>
Credit: This example is obtained from the following book:
Subramanian, Vishnu. 2018. "*Deep Learning with PyTorch: A Practical Approach to Building Neural Network Models Using PyTorch.*" Birmingham, U.K., Packt Publishing.
# Simple Text Processing
## Typically Data Preprocessing Steps before Modeling Training for NLP Applications
* Read the data from disk
* Tokenize the text
* Create a mapping from word to a unique integer
* Convert the text into lists of integers
* Load the data in whatever format your deep learning framework requires
* Pad the text so that all the sequences are the same length, so you can process them in batch
## Word Embedding
Word embedding is a very popular way of representing text data in problems that are solved by deep learning algorithms
Word embedding provides a dense representation of a word filled with floating numbers.
It drastically reduces the dimension of the dictionary
### `Torchtext` and Training word embedding by building a sentiment classifier
Torchtext takes a declarative approach to loading its data:
* you tell torchtext how you want the data to look like, and torchtext handles it for you
* Declaring a Field: The Field specifies how you want a certain field to be processed
The `Field` class is a fundamental component of torchtext and is what makes preprocessing very easy
### Load `torchtext.datasets`
# Use LSTM for Sentiment Classification
1. Preparing the data
2. Creating the batches
3. Creating the network
4. Training the model
```
from torchtext import data, datasets
from torchtext.vocab import GloVe,FastText,CharNGram
TEXT = data.Field(lower=True, fix_length=100,batch_first=False)
LABEL = data.Field(sequential=False,)
train, test = datasets.imdb.IMDB.splits(TEXT, LABEL)
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=300),max_size=10000,min_freq=10)
LABEL.build_vocab(train,)
len(TEXT.vocab.vectors)
train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=32, device=-1)
train_iter.repeat = False
test_iter.repeat = False
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
class IMDBRnn(nn.Module):
def __init__(self,vocab,hidden_size,n_cat,bs=1,nl=2):
super().__init__()
self.hidden_size = hidden_size
self.bs = bs
self.nl = nl
self.e = nn.Embedding(n_vocab,hidden_size)
self.rnn = nn.LSTM(hidden_size,hidden_size,nl)
self.fc2 = nn.Linear(hidden_size,n_cat)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self,inp):
bs = inp.size()[1]
if bs != self.bs:
self.bs = bs
e_out = self.e(inp)
h0 = c0 = Variable(e_out.data.new(*(self.nl,self.bs,self.hidden_size)).zero_())
rnn_o,_ = self.rnn(e_out,(h0,c0))
rnn_o = rnn_o[-1]
fc = F.dropout(self.fc2(rnn_o),p=0.8)
return self.softmax(fc)
n_vocab = len(TEXT.vocab)
n_hidden = 100
model = IMDBRnn(n_vocab,n_hidden,n_cat=3,bs=32)
#model = model.cuda()
optimizer = optim.Adam(model.parameters(),lr=1e-3)
def fit(epoch,model,data_loader,phase='training',volatile=False):
if phase == 'training':
model.train()
if phase == 'validation':
model.eval()
volatile=True
running_loss = 0.0
running_correct = 0
for batch_idx , batch in enumerate(data_loader):
text , target = batch.text , batch.label
# if is_cuda:
# text,target = text.cuda(),target.cuda()
if phase == 'training':
optimizer.zero_grad()
output = model(text)
loss = F.nll_loss(output,target)
#running_loss += F.nll_loss(output,target,size_average=False).data[0]
running_loss += F.nll_loss(output,target,size_average=False).data
preds = output.data.max(dim=1,keepdim=True)[1]
running_correct += preds.eq(target.data.view_as(preds)).cpu().sum()
if phase == 'training':
loss.backward()
optimizer.step()
loss = running_loss/len(data_loader.dataset)
accuracy = 100. * running_correct/len(data_loader.dataset)
print("epoch: ", epoch, "loss: ", loss, "accuracy: ", accuracy)
#print(f'{phase} loss is {loss:{5}.{2}} and {phase} accuracy is {running_correct}/{len(data_loader.dataset)}{accuracy:{10}.{4}}')
return loss,accuracy
import time
start = time.time()
train_losses , train_accuracy = [],[]
val_losses , val_accuracy = [],[]
for epoch in range(1,20):
epoch_loss, epoch_accuracy = fit(epoch,model,train_iter,phase='training')
val_epoch_loss , val_epoch_accuracy = fit(epoch,model,test_iter,phase='validation')
train_losses.append(epoch_loss)
train_accuracy.append(epoch_accuracy)
val_losses.append(val_epoch_loss)
val_accuracy.append(val_epoch_accuracy)
end = time.time()
print((end-start)/60)
print("Execution Time: ", round(((end-start)/60),1), "minutes")
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(range(1,len(train_losses)+1),train_losses,'bo',label = 'training loss')
plt.plot(range(1,len(val_losses)+1),val_losses,'r',label = 'validation loss')
plt.legend()
plt.plot(range(1,len(train_accuracy)+1),train_accuracy,'bo',label = 'train accuracy')
plt.plot(range(1,len(val_accuracy)+1),val_accuracy,'r',label = 'val accuracy')
plt.legend()
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import holoviews as hv
import networkx as nx
from holoviews import opts
hv.extension('bokeh')
defaults = dict(width=400, height=400)
hv.opts.defaults(
opts.EdgePaths(**defaults), opts.Graph(**defaults), opts.Nodes(**defaults))
```
Visualizing and working with network graphs is a common problem in many different disciplines. HoloViews provides the ability to represent and visualize graphs very simply and easily with facilities for interactively exploring the nodes and edges of the graph, especially using the bokeh plotting interface.
The ``Graph`` ``Element`` differs from other elements in HoloViews in that it consists of multiple sub-elements. The data of the ``Graph`` element itself are the abstract edges between the nodes. By default the element will automatically compute concrete ``x`` and ``y`` positions for the nodes and represent them using a ``Nodes`` element, which is stored on the Graph. The abstract edges and concrete node positions are sufficient to render the ``Graph`` by drawing straight-line edges between the nodes. In order to supply explicit edge paths we can also declare ``EdgePaths``, providing explicit coordinates for each edge to follow.
To summarize a ``Graph`` consists of three different components:
* The ``Graph`` itself holds the abstract edges stored as a table of node indices.
* The ``Nodes`` hold the concrete ``x`` and ``y`` positions of each node along with a node ``index``. The ``Nodes`` may also define any number of value dimensions, which can be revealed when hovering over the nodes or to color the nodes by.
* The ``EdgePaths`` can optionally be supplied to declare explicit node paths.
#### A simple Graph
Let's start by declaring a very simple graph connecting one node to all others. If we simply supply the abstract connectivity of the ``Graph``, it will automatically compute a layout for the nodes using the ``layout_nodes`` operation, which defaults to a circular layout:
```
# Declare abstract edges
N = 8
node_indices = np.arange(N, dtype=np.int32)
source = np.zeros(N, dtype=np.int32)
target = node_indices
simple_graph = hv.Graph(((source, target),))
simple_graph
```
#### Accessing the nodes and edges
We can easily access the ``Nodes`` and ``EdgePaths`` on the ``Graph`` element using the corresponding properties:
```
simple_graph.nodes + simple_graph.edgepaths
```
#### Displaying directed graphs
When specifying the graph edges the source and target node are listed in order, if the graph is actually a directed graph this may used to indicate the directionality of the graph. By setting ``directed=True`` as a plot option it is possible to indicate the directionality of each edge using an arrow:
```
simple_graph.relabel('Directed Graph').opts(directed=True, node_size=5, arrowhead_length=0.05)
```
The length of the arrows can be set as an fraction of the overall graph extent using the ``arrowhead_length`` option.
#### Supplying explicit paths
Next we will extend this example by supplying explicit edges:
```
def bezier(start, end, control, steps=np.linspace(0, 1, 100)):
return (1-steps)**2*start + 2*(1-steps)*steps*control+steps**2*end
x, y = simple_graph.nodes.array([0, 1]).T
paths = []
for node_index in node_indices:
ex, ey = x[node_index], y[node_index]
paths.append(np.column_stack([bezier(x[0], ex, 0), bezier(y[0], ey, 0)]))
bezier_graph = hv.Graph(((source, target), (x, y, node_indices), paths))
bezier_graph
```
## Interactive features
#### Hover and selection policies
Thanks to Bokeh we can reveal more about the graph by hovering over the nodes and edges. The ``Graph`` element provides an ``inspection_policy`` and a ``selection_policy``, which define whether hovering and selection highlight edges associated with the selected node or nodes associated with the selected edge, these policies can be toggled by setting the policy to ``'nodes'`` (the default) and ``'edges'``.
```
bezier_graph.relabel('Edge Inspection').opts(inspection_policy='edges')
```
In addition to changing the policy we can also change the colors used when hovering and selecting nodes:
```
bezier_graph.opts(
opts.Graph(inspection_policy='nodes', tools=['hover', 'box_select'],
edge_hover_line_color='green', node_hover_fill_color='red'))
```
#### Additional information
We can also associate additional information with the nodes and edges of a graph. By constructing the ``Nodes`` explicitly we can declare additional value dimensions, which are revealed when hovering and/or can be mapped to the color by setting the ``color`` to the dimension name ('Weight'). We can also associate additional information with each edge by supplying a value dimension to the ``Graph`` itself, which we can map to various style options, e.g. by setting the ``edge_color`` and ``edge_line_width``.
```
node_labels = ['Output']+['Input']*(N-1)
np.random.seed(7)
edge_labels = np.random.rand(8)
nodes = hv.Nodes((x, y, node_indices, node_labels), vdims='Type')
graph = hv.Graph(((source, target, edge_labels), nodes, paths), vdims='Weight')
(graph + graph.opts(inspection_policy='edges', clone=True)).opts(
opts.Graph(node_color='Type', edge_color='Weight', cmap='Set1',
edge_cmap='viridis', edge_line_width=hv.dim('Weight')*10))
```
If you want to supply additional node information without speciying explicit node positions you may pass in a ``Dataset`` object consisting of various value dimensions.
```
node_info = hv.Dataset(node_labels, vdims='Label')
hv.Graph(((source, target), node_info)).opts(node_color='Label', cmap='Set1')
```
## Working with NetworkX
NetworkX is a very useful library when working with network graphs and the Graph Element provides ways of importing a NetworkX Graph directly. Here we will load the Karate Club graph and use the ``circular_layout`` function provided by NetworkX to lay it out:
```
G = nx.karate_club_graph()
hv.Graph.from_networkx(G, nx.layout.circular_layout).opts(tools=['hover'])
```
It is also possible to pass arguments to the NetworkX layout function as keywords to ``hv.Graph.from_networkx``, e.g. we can override the k-value of the Fruchteran Reingold layout
```
hv.Graph.from_networkx(G, nx.layout.fruchterman_reingold_layout, k=1)
```
Finally if we want to layout a Graph after it has already been constructed, the ``layout_nodes`` operation may be used, which also allows applying the ``weight`` argument to graphs which have not been constructed with networkx:
```
from holoviews.element.graphs import layout_nodes
graph = hv.Graph([
('a', 'b', 3),
('a', 'c', 0.2),
('c', 'd', 0.1),
('c', 'e', 0.7),
('c', 'f', 5),
('a', 'd', 0.3)
], vdims='weight')
layout_nodes(graph, layout=nx.layout.fruchterman_reingold_layout, kwargs={'weight': 'weight'})
```
## Adding labels
If the ``Graph`` we have constructed has additional metadata we can easily use those as labels, we simply get a handle on the nodes, cast them to hv.Labels and then overlay them:
```
graph = hv.Graph.from_networkx(G, nx.layout.fruchterman_reingold_layout)
labels = hv.Labels(graph.nodes, ['x', 'y'], 'club')
(graph * labels.opts(text_font_size='8pt', text_color='white', bgcolor='gray'))
```
## Animating graphs
Like all other elements ``Graph`` can be updated in a ``HoloMap`` or ``DynamicMap``. Here we animate how the Fruchterman-Reingold force-directed algorithm lays out the nodes in real time.
```
hv.HoloMap({i: hv.Graph.from_networkx(G, nx.spring_layout, iterations=i, seed=10) for i in range(5, 30, 5)},
kdims='Iterations')
```
## Real world graphs
As a final example let's look at a slightly larger graph. We will load a dataset of a Facebook network consisting a number of friendship groups identified by their ``'circle'``. We will load the edge and node data using pandas and then color each node by their friendship group using many of the things we learned above.
```
kwargs = dict(width=800, height=800, xaxis=None, yaxis=None)
opts.defaults(opts.Nodes(**kwargs), opts.Graph(**kwargs))
colors = ['#000000']+hv.Cycle('Category20').values
edges_df = pd.read_csv('../assets/fb_edges.csv')
fb_nodes = hv.Nodes(pd.read_csv('../assets/fb_nodes.csv')).sort()
fb_graph = hv.Graph((edges_df, fb_nodes), label='Facebook Circles')
fb_graph.opts(cmap=colors, node_size=10, edge_line_width=1,
node_line_color='gray', node_color='circle')
```
## Bundling graphs
The datashader library provides algorithms for bundling the edges of a graph and HoloViews provides convenient wrappers around the libraries. Note that these operations need ``scikit-image`` which you can install using:
```
conda install scikit-image
```
or
```
pip install scikit-image
```
```
from holoviews.operation.datashader import datashade, bundle_graph
bundled = bundle_graph(fb_graph)
bundled
```
## Datashading graphs
For graphs with a large number of edges we can datashade the paths and display the nodes separately. This loses some of the interactive features but will let you visualize quite large graphs:
```
(datashade(bundled, normalization='linear', width=800, height=800) * bundled.nodes).opts(
opts.Nodes(color='circle', size=10, width=1000, cmap=colors, legend_position='right'))
```
### Applying selections
Alternatively we can select the nodes and edges by an attribute that resides on either. In this case we will select the nodes and edges for a particular circle and then overlay just the selected part of the graph on the datashaded plot. Note that selections on the ``Graph`` itself will select all nodes that connect to one of the selected nodes. In this way a smaller subgraph can be highlighted and the larger graph can be datashaded.
```
datashade(bundle_graph(fb_graph), normalization='linear', width=800, height=800) *\
bundled.select(circle='circle15').opts(node_fill_color='white')
```
To select just nodes that are in 'circle15' set the ``selection_mode='nodes'`` overriding the default of 'edges':
```
bundled.select(circle='circle15', selection_mode='nodes')
```
|
github_jupyter
|
# Data Analysis of Bitcoin and Where it is Heading
# Graphing the whole Graph
```
#### Importing Pandas and others and Reading csv file
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import plotly.express as px
##Remodified .CSV data to make managing data easier.
##Some data cleaning.
Bitcoin = pd.read_csv('HistoricalData4.csv')
##Created A daily Average for each day to work around with the data.
Bitcoin['Daily Average'] = Bitcoin.iloc[:, 2:4].sum(axis=1)/2
##Just to let the viewer see the data that is coming out, truancated
##because there is 2843 rows to show.... too much data, lol
print(Bitcoin[['Date', 'Low', 'High', 'Daily Average']])
print(Bitcoin[['Date', 'Open', 'Close']])
print(Bitcoin[['Date', 'Volume', 'Market Cap']])
#Line graph plot to show, low, high, and Average price
Bitcoin.plot(x="Date", y=["Low", "High", "Daily Average"], figsize=(15, 20), title ="Bitcoin Low, High, and Daily Average Prices.", ylabel="Price in $")
plt.show()
#Line graph to show traditonal Open and Close (although the selling and buying never sleeps for Cryptocurrency)
Bitcoin.plot(x="Date", y=["Open", "Close"], figsize=(15, 30), title ="Open and Close Prices.", ylabel="Price in $")
plt.show()
#Line graph to Show Volume and Market Cap
#These two indicators are important to understand how healthy or not healthy a particular stock or cryptocurrency is.
#High Volume but decrease in price could mean people are because they see a decrease in price or cashing out
#High Volume and higher price means that people are still buying the currency because there is value.
Bitcoin.plot(x="Date", y=["Volume", "Market Cap"], figsize=(15, 30), title ="Volume and Market Cap.", ylabel="Price in $")
plt.show()
```
## Thoughts on the Line Graph
This graph shows the whole entire graph since Bitcoin started being sold and used and the day I stopped collecting Data. As you can see, it isn't really useful for trying to extract specific Data because there is just so much Data and the data extremes like 0, and recently....bitcoin going past 50k makes it hard to view specific data unless this graphical picture is at least maybe 10 times bigger (if I were to guess.) You can't even see the minute difference between the 3 data sets.
## Cryptocurrencies are said to be volatile, but are they though?
Or is it only considered volatile if
1: You invest in the wrong ones
2: You continue to invest on a project that isn't ongoing (like Dogecoin), has an extremely high supply cap, or known to have problems (the people and company aren't trustworthy or the company or currency was hacked)
3: You invest on unknown cryotocurrencies except that those currencies are just a deritivative of Bitcoin
4: There is very few popular news around the cryptocurrency.
## There are 3 points in the graph that prove to be interesting because those are 3 points where Bitcoin skyrocketed
The last one needs no introduction. Elon Musk decided to invest more than $1 Billion dollars into Bitcoin. That alone has caused the cryotcurrency itself to skyrocket. [CNBC Link to Article About it!](https://www.cnbc.com/2021/02/08/tesla-buys-1point5-billion-in-bitcoin.html) While this graph doesn't refect the current day (the price is at 50k right now) it's still fairly accurate.
Other than that, let's see if we can grab some data between the two other points, what caused it to go up close to 20k and then start dropping again around ~16k in the line graph? Lets create a closer line graph for that
# Bulls and Bear #1
The first point of the graph is interesting, it woentt close to about 1000 and quickly went close to 20000 in half a year. While I sold my bitcoin for a measely 700 back in the day, I stopped reading news about bitcoin, so I am not too sure what went on that cause it to signficantly jump, other than I know that bitcoin gets increasing worse/harder to grab as bitcoin solves ever harder math questions that take up a lot of energy. A perception of miners and non-miners are that these cryptocurrency mining is... energy intensive... and it is , if you are a first/second generation cryptocurrency that uses mining as currency
[Bitcoin uses as much energy as Argentina](https://www.iflscience.com/technology/bitcoin-mining-now-uses-more-electricity-than-argentina/)
```
#### Importing Pandas and others and Reading csv file
import os
import matplotlib.pyplot as plt
import numpy as np
import math
import seaborn as sns
import pandas as pd
import plotly.express as px
Bitcoin = pd.read_csv('HistoricalData4.csv')
##Created A daily Average for each day to work around with the data.
Bitcoin['Daily Average'] = Bitcoin.iloc[:, 2:4].sum(axis=1)/2
#Condensed line graph plot to show, low, high, and Average price
Bitcoin.plot(x="Date", y=["Low", "High", "Daily Average"], figsize=(15, 20), title ="Bitcoin Low, High, and Daily Average Prices.", ylabel="Price in $")
plt.ylim([1000, 20000])
plt.xlim([1500, 2000])
plt.xticks(visible = True)
plt.show()
```
Sorting around June 16, 2017 and August 21, 2018, [we get some various articles while searching through Google](https://www.google.com/search?q=bitcoin&client=firefox-b-1-d&tbs=cdr:1,cd_min:6/8/2017,cd_max:10/21/2018,sbd:1&tbm=nws&ei=ys0tYMT8EIXU-gSIh7WICA&start=0&sa=N&ved=0ahUKEwjEie_KrvLuAhUFqp4KHYhDDYE4ChDy0wMIhQE&biw=1280&bih=818&dpr=1)
[Bitcoin Hits a New Record High, But Stops Short of $20,000](https://fortune.com/2017/12/17/bitcoin-record-high-short-of-20000/)
[Why is bitcoinâs price so high?](https://techcrunch.com/2017/12/08/why-is-bitcoins-price-so-high/)
[Bitcoin tops $16,000, and it's $271B market value passes Home Depot's](https://www.usatoday.com/story/money/2017/12/07/bitcoin-tops-15-000-and-its-259-b-market-value-tops-home-depot/929962001/)
## Bear Market of Bitcoin, and the Rise of Bitcoin Cash Derivative
Later articles in 2018 point to [Bitcoin Falling Off its all time high of getting close to 20k and dropping down close to 8k](https://www.reuters.com/article/us-global-markets-bitcoin-idUSKBN1FM11M) due to possible regulatory clampdown similar to what is happening to another cryptocurrency called [Ripple](https://www.sec.gov/news/press-release/2020-338).
In 2017, it was also the year that [Bitcoin Cash](https://www.marketwatch.com/story/meet-bitcoin-cashthe-new-digital-currency-that-surged-122-in-less-than-a-day-2017-08-02) made a debut. In technical terms, is known as a fork/derivative. It follow the example of the original Bitcoin and made changes to it. Even so, [Bitcoin Cash as of this article is 700+](https://www.coindesk.com/price/bitcoin-cash)
# What is the future of of Cryptocurrency?
With the Stockmarket and Cryptocurrencies, we get a time of low and highs. For most Cryptocurrencies, in typical stockmartket terms, it is in quite a bullish territory right now.
But the old addage applies here: Buy low, sell high. Right now might not be a good time to buy BTC. If news is an indicator, it can go any way right now. A few things to look at.
Elon Musk Spent 1B+ of Bitcoin
[SEC is looking into regulating cryptocurrency](https://www.financemagnates.com/cryptocurrency/regulation/sec-commissioner-demands-clear-cryptocurrency-regulations/)
[Visa is planning to include Crytocurrency in it's list of currencies allowed to be transacted.](https://www.forbes.com/sites/billybambrough/2021/02/03/visa-reveals-bitcoin-and-crypto-banking-roadmap-amid-race-to-reach-network-of-70-million/?sh=39b269b401cd)
The original type of crytocurrency are energy intensive, Bitcoin and Etherium are the worse offenders when it comes to energy consumption. New Crypto, like Cardano, choose to do away with the mining and choose to reward people who invest with more shares rather than having to have thousands of computers taking up the worlds energy.
**A Linear Regression** points that the data has overexceed it's prediction expectations. From day 1 since the Data collected to recently, the current price of Bitcoin is an extreme outlier when it comes to it's price. The Linear Regression expects Bitcoin price to be ~10k, but it has pushed up to 50k recently.
If the pattern continues, the new linear regression will eventually move up closer to 50k to reflect what could be it's potential price, as of right now, the prediction isn't accurate.
```
import os
import matplotlib.pyplot as plt
import math
import numpy as np
import seaborn as sns
import pandas as pd
import plotly.express as px
from sklearn.linear_model import LinearRegression
##Remodified .CSV data to make managing data easier.
##Some data cleaning.
Bitcoin = pd.read_csv('HistoricalData4.csv')
##Created A daily Average for each day to work around with the data.
Bitcoin['Daily Average'] = Bitcoin.iloc[:, 2:4].sum(axis=1)/2
Bitcoin['Date'] = pd.to_datetime(Bitcoin['Date']).apply(lambda date: date.toordinal())
X = Bitcoin[["Date"]]
y = Bitcoin[["Daily Average"]]
regressor = LinearRegression()
regressor.fit(X, y)
y_pred = regressor.predict(X)
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color='blue')
plt.title('Simple Bitcoin Regression')
plt.xlabel('Ordinal Date')
plt.ylabel('Daily Average Price in $')
plt.figsize=(15, 30)
plt.show()
```
## In the next installment, I'll be doing a few different cryptocurrencies to see if it follows a similar pattern.
My thought is to do one on Cardano/ADA as that is where I see the future of Crytocurrency that is outside of just currency itself (like Bitcoin.)
|
github_jupyter
|
<a href="https://colab.research.google.com/github/google/jax-md/blob/main/notebooks/athermal_linear_elasticity.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Imports and utility code
!pip install jax-md
import numpy as onp
import jax.numpy as jnp
from jax.config import config
config.update('jax_enable_x64', True)
from jax import random
from jax import jit, lax, grad, vmap
import jax.scipy as jsp
from jax_md import space, energy, smap, minimize, util, elasticity, quantity
from jax_md.colab_tools import renderer
f32 = jnp.float32
f64 = jnp.float64
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
def format_plot(x, y):
plt.grid(True)
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
def finalize_plot(shape=(1, 0.7)):
plt.gcf().set_size_inches(
shape[0] * 1.5 * plt.gcf().get_size_inches()[1],
shape[1] * 1.5 * plt.gcf().get_size_inches()[1])
def run_minimization_while(energy_fn, R_init, shift, max_grad_thresh = 1e-12, max_num_steps=1000000, **kwargs):
init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs)
apply = jit(apply)
@jit
def get_maxgrad(state):
return jnp.amax(jnp.abs(state.force))
@jit
def cond_fn(val):
state, i = val
return jnp.logical_and(get_maxgrad(state) > max_grad_thresh, i<max_num_steps)
@jit
def body_fn(val):
state, i = val
return apply(state), i+1
state = init(R_init)
state, num_iterations = lax.while_loop(cond_fn, body_fn, (state, 0))
return state.position, get_maxgrad(state), num_iterations
def run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift,
max_grad_thresh = 1e-12, max_num_steps = 1000000,
step_inc = 1000, verbose = False, **kwargs):
nbrs = neighbor_fn.allocate(R_init)
init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs)
apply = jit(apply)
@jit
def get_maxgrad(state):
return jnp.amax(jnp.abs(state.force))
@jit
def body_fn(state_nbrs, t):
state, nbrs = state_nbrs
nbrs = neighbor_fn.update(state.position, nbrs)
state = apply(state, neighbor=nbrs)
return (state, nbrs), 0
state = init(R_init, neighbor=nbrs)
step = 0
while step < max_num_steps:
if verbose:
print('minimization step {}'.format(step))
rtn_state, _ = lax.scan(body_fn, (state, nbrs), step + jnp.arange(step_inc))
new_state, nbrs = rtn_state
# If the neighbor list overflowed, rebuild it and repeat part of
# the simulation.
if nbrs.did_buffer_overflow:
print('Buffer overflow.')
nbrs = neighbor_fn.allocate(state.position)
else:
state = new_state
step += step_inc
if get_maxgrad(state) <= max_grad_thresh:
break
if verbose:
print('successfully finished {} steps.'.format(step*step_inc))
return state.position, get_maxgrad(state), nbrs, step
def run_minimization_scan(energy_fn, R_init, shift, num_steps=5000, **kwargs):
init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs)
apply = jit(apply)
@jit
def scan_fn(state, i):
return apply(state), 0.
state = init(R_init)
state, _ = lax.scan(scan_fn,state,jnp.arange(num_steps))
return state.position, jnp.amax(jnp.abs(state.force))
key = random.PRNGKey(0)
```
#Linear elasticity in athermal systems
## The elastic modulus tensor
An global affine deformation is given to lowest order by a symmetric strain tensor $\epsilon$, which transforms any vector $r$ according to
\begin{equation}
r \rightarrow (1 + \epsilon) \cdot r.
\end{equation}
Note that in $d$ dimensions, the strain tensor has $d(d + 1)/2$ independent elements. Now, when a mechanically stable system (i.e. a system at a local energy minimum where there is zero net force on every particle) is subject to an affine deformation, it usually does not remain in mechanical equilibrium. Therefore, there is a secondary, nonaffine response that returns the system to mechanical equilibrium, though usually at a different energy than the undeformed state.
The change of energy can be written to quadratic order as
\begin{equation}
\frac{ \Delta U}{V^0} = \sigma^0_{ij}\epsilon_{ji} + \frac 12 C_{ijkl} \epsilon_{ij} \epsilon_{kl} + O\left( \epsilon^3 \right)
\end{equation}
where $C_{ijkl}$ is the $d à d à d à d$ elastic modulus tensor, $\sigma^0$ is the $d à d$ symmetric stress tensor describing residual stresses in the initial state, and $V^0$ is the volume of the initial state. The symmetries of $\epsilon_{ij}$ imply the following:
\begin{equation}
C_{ijkl} = C_{jikl} = C_{ijlk} = C_{klij}
\end{equation}
When no further symmetries are assumed, the number of independent elastic constants becomes $\frac 18 d(d + 1)(d^2 + d + 2)$, which is 6 in two dimensions and 21 in three dimensions.
##Linear response to an external force
Consider a set of $N$ particles in $d$ dimensions with positions $R_0$. Using $u \equiv R - R_0$ and assuming fixed boundary conditions, we can expand the energy about $R_0$:
\begin{equation}
U = U^0 - F^0 u + \frac 12 u H^0 u + O(u^3),
\end{equation}
where $U^0$ is the energy at $R_0$, $F^0$ is the force, $F^0_\mu \equiv \left. \frac {\partial U}{\partial u_\mu} \right |_{u=0}$, and $H^0$ is the Hessian, $H^0 \equiv \left. \frac{ \partial^2 U}{\partial u_\mu \partial u_\nu}\right|_{u=0}$.
Note that here we are expanding in terms of the particle positions, where as above we were expanding in the global strain degrees of freedom.
If we assume that $R_0$ corresponds to a local energy minimum, then $F^0=0$. Dropping higher order terms, we have a system of coupled harmonic oscillators given by
\begin{equation}
\Delta U \equiv U - U^0 = \frac 12 u H^0 u.
\end{equation}
This is independent of the form or details of $U$.
Hooke's law for this system gives the net force $f$ as a result of displacing the particles by $u$:
\begin{equation}
f = -H^0 u.
\end{equation}
Thus, if an *external* force $f_\mathrm{ext}$ is applied, the particles will respond so that the total force is zero, i.e. $f = -f_\mathrm{ext}$. This response is obtained by solving for $u$:
\begin{equation}
u = (H^0)^{-1} f_\mathrm{ext}.
\end{equation}
## Response to an affine strain
Now consider a strain tensor $\epsilon = \tilde \epsilon \gamma$, where $\gamma$ is a scalar and will be used to explicitly take the limit of small strain for fixed $\tilde \epsilon$. Importantly, the strain tensor represents a deformation of the underlying space that the particles live in and thus is a degree of freedom that is independent of the $Nd$ particle degrees of freedom. Therefore, knowing the particle positions $R$ is not sufficient to describe the energy, we also need to know $\gamma$ to specify the correct boundary conditions:
\begin{equation}
U = U(R, \gamma).
\end{equation}
We now have a system with $Nd+1$ variables $\{R, \gamma\}$ that, like before, form a set of coupled harmonic oscillators. We can describe this using the so-called "generalized Hessian" matrix of second derivatives of the energy with respect to both $R$ and $\gamma$. Specifically, Hooke's law reads
\begin{equation}
\left( \begin{array}{ ccccc|c}
&&&&&\\
&&H^0 &&& -\Xi \\
&&&&& \\ \hline
&&-\Xi^T &&&\frac{\partial ^2U}{\partial \gamma^2}
\end{array}\right)
\left( \begin{array}{ c}
\\
u \\
\\ \hline
\gamma
\end{array}\right)
=
\left( \begin{array}{ c}
\\
0 \\
\\ \hline
\tilde \sigma
\end{array}\right),
\end{equation}
where $u = R - R_0$ is the displacement of every particle, $\Xi = -\frac{ \partial^2 U}{\partial R \partial \gamma}$, and $\tilde \sigma$ is the induced stress caused by the deformation. (If there is prestress in the system, i.e. $\sigma^0 = \frac{\partial U}{\partial \gamma} \neq 0$, the total stress is $\sigma = \sigma^0 + \tilde \sigma$.) In this equation, $\gamma$ is held fixed and the zero in the top of the right-hand-side imposes force balance after the deformation and resulting non-affine displacement of every particle. The non-affine displacement itself, $u$, and the induced stress $\sigma$, are both unknown but can be solved for. First, the non-affine response is
\begin{equation}
u = (H^0)^{-1} \Xi \; \gamma,
\end{equation}
where we note that in the limit of small $\gamma$, the force induced on every particle due to the affine deformation is $\Xi \; \gamma$. Second, the induced stress is
\begin{equation}
\tilde \sigma = \frac{\partial ^2U}{\partial \gamma^2} \gamma - \Xi^T u = \left(\frac{\partial ^2U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right) \gamma.
\end{equation}
Similarly, the change in energy is
\begin{equation}
\frac{\Delta U}{V^0} = \sigma^0 \gamma + \frac 1{2V^0} \left(\frac{\partial ^2U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right) \gamma^2,
\end{equation}
where $\sigma^0$ is the prestress in the system per unit volume. Comparing this to the above definition of the the elastic modulus tensor, we see that the elastic constant associated with the deformation $\tilde \epsilon$ is
\begin{equation}
C(\tilde \epsilon) = \frac 1{V^0} \left( \frac{\partial^2 U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right).
\end{equation}
$C(\tilde \epsilon)$ is related to $C_{ijkl}$ by summing $C(\tilde \epsilon) = C_{ijkl}\tilde \epsilon_{ij} \tilde \epsilon_{kl}$. So, if $\tilde \epsilon_{ij} = \delta_{0i}\delta_{0j}$, then $C_{0000} = C(\tilde \epsilon)$.
The internal code in `jax_md.elasticity` repeats this calculation for different $\tilde \epsilon$ to back out the different independent elastic constants.
#First example
As a first example, let's consider a 3d system of 128 soft spheres. The elastic modulus tensor is only defined for systems that are at a local energy minimum, so we start by minimizing the energy.
```
N = 128
dimension = 3
box_size = quantity.box_size_at_number_density(N, 1.4, dimension)
displacement, shift = space.periodic(box_size)
energy_fn = energy.soft_sphere_pair(displacement)
key, split = random.split(key)
R_init = random.uniform(split, (N,dimension), minval=0.0, maxval=box_size, dtype=f64)
R, max_grad, niters = run_minimization_while(energy_fn, R_init, shift)
print('Minimized the energy in {} minimization steps and reached a final \
maximum gradient of {}'.format(niters, max_grad))
```
We can now calculate the elastic modulus tensor
```
emt_fn = jit(elasticity.athermal_moduli(energy_fn, check_convergence=True))
C, converged = emt_fn(R,box_size)
print(converged)
```
The elastic modulus tensor gives a quantitative prediction for how the energy should change if we deform the system according to a strain tensor
\begin{equation}
\frac{ \Delta U}{V^0} = \sigma^0\epsilon + \frac 12 \epsilon C \epsilon + O\left(\epsilon^3\right)
\end{equation}
To test this, we define $\epsilon = \tilde \epsilon \gamma$ for a randomly chosen strain tensor $\tilde \epsilon$ and for $\gamma << 1$. Ignoring terms of order $\gamma^3$ and higher, we have
\begin{equation}
\frac{ \Delta U}{V^0} - \sigma^0\epsilon = \left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2
\end{equation}
Thus, we can test our calculation of $C$ by plotting $\frac{ \Delta U}{V^0} - \sigma^0\epsilon$ as a function of $\gamma$ for our randomly chosen $\tilde \epsilon$ and comparing it to the line $\left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2$.
First, generate a random $\tilde \epsilon$ and calculate $U$ for different $\gamma$.
```
key, split = random.split(key)
#Pick a random (symmetric) strain tensor
strain_tensor = random.uniform(split, (dimension,dimension), minval=-1, maxval=1, dtype=f64)
strain_tensor = (strain_tensor + strain_tensor.T) / 2.0
#Define a function to calculate the energy at a given strain
def get_energy_at_strain(gamma, strain_tensor, R_init, box):
R_init = space.transform(space.inverse(box),R_init)
new_box = jnp.matmul(jnp.eye(strain_tensor.shape[0]) + gamma * strain_tensor, box)
displacement, shift = space.periodic_general(new_box, fractional_coordinates=True)
energy_fn = energy.soft_sphere_pair(displacement, sigma=1.0)
R_final, _, _ = run_minimization_while(energy_fn, R_init, shift)
return energy_fn(R_final)
gammas = jnp.logspace(-7,-4,50)
Us = vmap(get_energy_at_strain, in_axes=(0,None,None,None))(gammas, strain_tensor, R, box_size * jnp.eye(dimension))
```
Plot $\frac{ \Delta U}{V^0} - \sigma^0\epsilon$ and $\left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2$ as functinos of $\gamma$. While there may be disagreements for very small $\gamma$ due to numerical precision or at large $\gamma$ due to higher-order terms becoming relevant, there should be a region of quantitative agreement.
```
U_0 = energy_fn(R)
stress_0 = -quantity.stress(energy_fn, R, box_size)
V_0 = quantity.volume(dimension, box_size)
#Plot \Delta E/V - sigma*epsilon
y1 = (Us - U_0)/V_0 - gammas * jnp.einsum('ij,ji->',stress_0,strain_tensor)
plt.plot(jnp.abs(gammas), y1, lw=3, label=r'$\Delta U/V^0 - \sigma^0 \epsilon$')
#Plot 0.5 * epsilon*C*epsilon
y2 = 0.5 * jnp.einsum('ij,ijkl,kl->',strain_tensor, C, strain_tensor) * gammas**2
plt.plot(jnp.abs(gammas), y2, ls='--', lw=3, label=r'$(1/2) \epsilon C \epsilon$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
format_plot('$\gamma$','')
finalize_plot()
```
To test the accuracy of this agreement, we first define:
\begin{equation}
T(\gamma) = \frac{ \Delta U}{V^0} - \sigma^0\epsilon - \frac 12 \epsilon C \epsilon \sim O\left(\gamma^3\right)
\end{equation}
which should be proportional to $\gamma^3$ for small $\gamma$ (note that this expected scaling should break down when the y-axis approaches machine precision). This is a prediction of scaling only, so we plot a line proportional to $\gamma^3$ to compare the slopes.
```
#Plot the difference, which should scales as gamma**3
plt.plot(jnp.abs(gammas), jnp.abs(y1-y2), label=r'$T(\gamma)$')
#Plot gamma**3 for reference
plt.plot(jnp.abs(gammas), jnp.abs(gammas**3), 'black', label=r'slope = $\gamma^3$ (for reference)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
format_plot('$\gamma$','')
finalize_plot()
```
Save `C` for later testing.
```
C_3d = C
```
#Example with neighbor lists
As a second example, consider a much larger systems that is implemented using neighbor lists.
```
N = 5000
dimension = 2
box_size = quantity.box_size_at_number_density(N, 1.3, dimension)
box = box_size * jnp.eye(dimension)
displacement, shift = space.periodic_general(box, fractional_coordinates=True)
sigma = jnp.array([[1.0, 1.2], [1.2, 1.4]])
N_2 = int(N / 2)
species = jnp.where(jnp.arange(N) < N_2, 0, 1)
neighbor_fn, energy_fn = energy.soft_sphere_neighbor_list(
displacement, box_size, species=species, sigma=sigma, dr_threshold = 0.1,
fractional_coordinates = True)
key, split = random.split(key)
R_init = random.uniform(split, (N,dimension), minval=0.0, maxval=1.0, dtype=f64)
R, max_grad, nbrs, niters = run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift)
print('Minimized the energy in {} minimization steps and reached a final \
maximum gradient of {}'.format(niters, max_grad))
```
We have to pass the neighbor list to `emt_fn`.
```
emt_fn = jit(elasticity.athermal_moduli(energy_fn, check_convergence=True))
C, converged = emt_fn(R,box,neighbor=nbrs)
print(converged)
```
We can time the calculation of the compiled function.
```
%timeit emt_fn(R,box,neighbor=nbrs)
```
Repeat the same tests as above. NOTE: this may take a few minutes.
```
key, split = random.split(key)
#Pick a random (symmetric) strain tensor
strain_tensor = random.uniform(split, (dimension,dimension), minval=-1, maxval=1, dtype=f64)
strain_tensor = (strain_tensor + strain_tensor.T) / 2.0
def get_energy_at_strain(gamma, strain_tensor, R_init, box):
new_box = jnp.matmul(jnp.eye(strain_tensor.shape[0]) + gamma * strain_tensor, box)
displacement, shift = space.periodic_general(new_box, fractional_coordinates=True)
neighbor_fn, energy_fn = energy.soft_sphere_neighbor_list(
displacement, box_size, species=species, sigma=sigma, dr_threshold = 0.1,
fractional_coordinates = True, capacity_multiplier = 1.5)
R_final, _, nbrs, _ = run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift)
return energy_fn(R_final, neighbor=nbrs)
gammas = jnp.logspace(-7,-3,20)
Us = jnp.array([ get_energy_at_strain(gamma, strain_tensor, R, box) for gamma in gammas])
U_0 = energy_fn(R, neighbor=nbrs)
stress_0 = -quantity.stress(energy_fn, R, box, neighbor=nbrs)
V_0 = quantity.volume(dimension, box)
#Plot \Delta E/V - sigma*epsilon
y1 = (Us - U_0)/V_0 - gammas * jnp.einsum('ij,ji->',stress_0,strain_tensor)
plt.plot(jnp.abs(gammas), y1, lw=3, label=r'$\Delta U/V^0 - \sigma^0 \epsilon$')
#Plot 0.5 * epsilon*C*epsilon
y2 = 0.5 * jnp.einsum('ij,ijkl,kl->',strain_tensor, C, strain_tensor) * gammas**2
plt.plot(jnp.abs(gammas), y2, ls='--', lw=3, label=r'$(1/2) \epsilon C \epsilon$')
plt.xscale('log')
plt.yscale('log')
plt.legend()
format_plot('$\gamma$','')
finalize_plot()
#Plot the difference, which should scales as gamma**3
plt.plot(jnp.abs(gammas), jnp.abs(y1-y2), label=r'$T(\gamma)$')
#Plot gamma**3 for reference
plt.plot(jnp.abs(gammas), jnp.abs(gammas**3), 'black', label=r'slope = $\gamma^3$ (for reference)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
format_plot('$\gamma$','')
finalize_plot()
```
Save `C` for later testing.
```
C_2d = C
```
#Mandel notation
Mandel notation is a way to represent symmetric second-rank tensors and fourth-rank tensors with so-called "minor symmetries", i.e. $T_{ijkl} = T_{ijlk} = T_{jilk}$. The idea is to map pairs of indices so that $(i,i) \rightarrow i$ and $(i,j) \rightarrow K - i - j$ for $i\neq j$, where $K = d(d+1)/2$ is the number of independent pairs $(i,j)$ for tensors with $d$ elements along each axis. Thus, second-rank tensors become first-rank tensors, and fourth-rank tensors become second-rank tensors, according to:
\begin{align}
M_{m(i,j)} &= T_{ij} w(i,j) \\
M_{m(i,j),m(k,l)} &= T_{ijkl} w(i,j) w(k,l).
\end{align}
Here, $m(i,j)$ is the mapping function described above, and w(i,j) is a weight that preserves summation rules and is given by
\begin{align}
w(i,j) = \delta_{ij} + \sqrt{2} (\delta_{ij}-1).
\end{align}
We can convert strain tensors, stress tensors, and elastic modulus tensors to and from Mandel notation using the functions `elasticity.tensor_to_mandel` and `elasticity.mandel_to_tensor`.
First, lets copy one of the previously calculated elastic modulus tensors and define a random strain tensor.
```
#This can be 2 or 3 depending on which of the above solutions has been calculated
dimension = 3
if dimension == 2:
C = C_2d
else:
C = C_3d
key, split = random.split(key)
e = random.uniform(key, (dimension,dimension), minval=-1, maxval=1, dtype=f64)
e = (e + e.T)/2.
```
Convert `e` and `C` to Mental notation
```
e_m = jit(elasticity.tensor_to_mandel)(e)
C_m = jit(elasticity.tensor_to_mandel)(C)
print(e_m)
print(C_m)
```
Using "bar" notation to represent Mandel vectors and matrices, we have
\begin{equation}
\frac{ \Delta U}{V^0} = \bar \sigma_i^0 \bar\epsilon_i + \frac 12 \bar \epsilon_i \bar C_{ij} \bar\epsilon_j + O\left(\bar \epsilon^3\right)
\end{equation}
We can explicity test that the sums are equivalent to the sums involving the original tensors
```
sum_m = jnp.einsum('i,ij,j->',e_m, C_m, e_m)
sum_t = jnp.einsum('ij,ijkl,kl->',e, C, e)
print('Relative error is {}, which should be very close to 0'.format((sum_t-sum_m)/sum_t))
```
Finally, we can convert back to the full tensors and check that they are unchanged.
```
C_new = jit(elasticity.mandel_to_tensor)(C_m)
print('Max error in C is {}, which should be very close to 0.'.format(jnp.max(jnp.abs(C-C_new))))
e_new = jit(elasticity.mandel_to_tensor)(e_m)
print('Max error in e is {}, which should be very close to 0.'.format(jnp.max(jnp.abs(e-e_new))))
```
# Isotropic elastic constants
The calculation of the elastic modulus tensor does not make any assumptions about the underlying symmetries in the material. However, for isotropic systems, only two constants are needed to completely describe the elastic behavior. These are often taken to be the bulk modulus, $B$, and the shear modulus, $G$, or the Young's modulus, $E$, and the Poisson's ratio, $\nu$. The function `elasticity.extract_isotropic_moduli` extracts these values, as well as the longitudinal modulus, $M$, from an elastic modulus tensor.
Importantly, since there is not guarantee that `C` is calculated from a truely isotropic systems, these are "orientation-averaged" values. For example, there are many directions in which you can shear a system, and the shear modulus that is returned represents and average over all these orientations. This can be an effective way to average over small fluctuations in an "almost isotropic" system, but the values lose their typical meaning when the systems is highly anisotropic.
```
elasticity.extract_isotropic_moduli(C)
```
# Gradients
The calculation of the elastic modulus tensor is fully differentiable:
```
def setup(N,dimension,key):
box_size = quantity.box_size_at_number_density(N, 1.4, dimension)
box = box_size * jnp.eye(dimension)
displacement, shift = space.periodic_general(box, fractional_coordinates=True)
R_init = random.uniform(key, (N,dimension), minval=0.0, maxval=1.0, dtype=f64)
def run(sigma):
energy_fn = energy.soft_sphere_pair(displacement, sigma=sigma)
R, max_grad = run_minimization_scan(energy_fn, R_init, shift, num_steps=1000)
emt_fn = jit(elasticity.athermal_moduli(energy_fn))
C = emt_fn(R,box)
return elasticity.extract_isotropic_moduli(C)['G']
return run
key, split = random.split(key)
N = 50
dimension = 2
run = setup(N, dimension, split)
sigma = jnp.linspace(1.0,1.4,N)
print(run(sigma))
print(grad(run)(sigma))
```
|
github_jupyter
|
```
# noexport
import os
os.system('export_notebook identify_domain_training_data.ipynb')
from tmilib import *
import csv
import sys
num_prev_enabled = int(sys.argv[1])
num_labels_enabled = 2 + num_prev_enabled
data_version = 4 + num_prev_enabled
print 'num_prev_enabled', num_prev_enabled
print 'data_version', data_version
twenty_letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t"]
#domain_to_letter = {x:twenty_letters[i] for i,x in enumerate(top_domains)}
domain_id_to_letter = {domain_to_id(x):twenty_letters[i] for i,x in enumerate(top_n_domains_by_visits(20))}
#print domain_id_to_letter
#print domain_to_letter
productivity_letters = {-2: 'v', -1: 'w', 0: 'x', 1: 'y', 2: 'z'}
domain_id_to_productivity_letter = [productivity_letters[x] for x in get_domain_id_to_productivity()]
#print domain_id_to_productivity[:10]
#print domain_id_to_productivity_letter[:10]
def get_row_names():
output_row_names = [
'label',
'spanlen',
'since_cur',
'cur_domain_letter',
'cur_domain_productivity',
'to_next',
'next_domain_letter',
'next_domain_productivity',
'n_eq_c',
]
for idx_p_zeroidx in range(num_prev_enabled):
sp = str(idx_p_zeroidx + 1)
new_feature_names_for_p = [
'since_prev' + sp,
'prev' + sp +'_domain_letter',
'prev' + sp + '_domain_productivity',
'n_eq_p' + sp,
]
output_row_names.extend(new_feature_names_for_p)
return tuple(output_row_names)
row_names = get_row_names()
print row_names
def get_rows_for_user(user):
output = []
#ordered_visits = get_history_ordered_visits_corrected_for_user(user)
ordered_visits = get_history_ordered_visits_corrected_for_user(user)
ordered_visits = exclude_bad_visits(ordered_visits)
#active_domain_at_time = get_active_domain_at_time_for_user(user)
active_seconds_set = set(get_active_insession_seconds_for_user(user))
active_second_to_domain_id = {int(k):v for k,v in get_active_second_to_domain_id_for_user(user).viewitems()}
prev_domain_ids = [-1]*8
domain_id_to_most_recent_visit = {}
total_items = 0
skipped_items = 0
for idx,visit in enumerate(ordered_visits):
if idx+1 >= len(ordered_visits):
break
next_visit = ordered_visits[idx+1]
cur_domain = url_to_domain(visit['url'])
cur_domain_id = domain_to_id(cur_domain)
next_domain = url_to_domain(next_visit['url'])
next_domain_id = domain_to_id(next_domain)
cur_time_sec = int(round(visit['visitTime'] / 1000.0))
next_time_sec = int(round(next_visit['visitTime'] / 1000.0))
domain_id_to_most_recent_visit[cur_domain_id] = cur_time_sec
if prev_domain_ids[0] != cur_domain_id:
#prev_domain_ids = ([cur_domain_id] + [x for x in prev_domain_ids if x != cur_domain_id])[:4]
if cur_domain_id in prev_domain_ids:
prev_domain_ids.remove(cur_domain_id)
prev_domain_ids.insert(0, cur_domain_id)
while len(prev_domain_ids) > 8:
prev_domain_ids.pop()
# prev_domain_ids includes the current one
if cur_time_sec > next_time_sec:
continue
prev1_domain_id = prev_domain_ids[1]
prev2_domain_id = prev_domain_ids[2]
prev3_domain_id = prev_domain_ids[3]
prev4_domain_id = prev_domain_ids[4]
prev5_domain_id = prev_domain_ids[5]
prev6_domain_id = prev_domain_ids[6]
prev7_domain_id = prev_domain_ids[7]
n_eq_c = 'T' if (next_domain_id == cur_domain_id) else 'F'
n_eq_p1 = 'T' if (next_domain_id == prev1_domain_id) else 'F'
n_eq_p2 = 'T' if (next_domain_id == prev2_domain_id) else 'F'
n_eq_p3 = 'T' if (next_domain_id == prev3_domain_id) else 'F'
n_eq_p4 = 'T' if (next_domain_id == prev4_domain_id) else 'F'
n_eq_p5 = 'T' if (next_domain_id == prev5_domain_id) else 'F'
n_eq_p6 = 'T' if (next_domain_id == prev6_domain_id) else 'F'
n_eq_p7 = 'T' if (next_domain_id == prev7_domain_id) else 'F'
for time_sec in xrange(cur_time_sec+1, next_time_sec):
if time_sec not in active_seconds_set:
continue
ref_domain_id = active_second_to_domain_id[time_sec]
total_items += 1
label = None
available_labels = (
(cur_domain_id, 'c'),
(next_domain_id, 'n'),
(prev1_domain_id, 'p1'),
(prev2_domain_id, 'p2'),
(prev3_domain_id, 'p3'),
(prev4_domain_id, 'p4'),
(prev5_domain_id, 'p5'),
(prev6_domain_id, 'p6'),
(prev7_domain_id, 'p7'),
)[:num_labels_enabled]
# c p n p q r s t
for label_value,label_name in available_labels:
if ref_domain_id == label_value:
label = label_name
break
if label == None:
skipped_items += 1
continue
next_domain_letter = domain_id_to_letter.get(next_domain_id, 'u')
cur_domain_letter = domain_id_to_letter.get(cur_domain_id, 'u')
prev1_domain_letter = domain_id_to_letter.get(prev1_domain_id, 'u')
prev2_domain_letter = domain_id_to_letter.get(prev2_domain_id, 'u')
prev3_domain_letter = domain_id_to_letter.get(prev3_domain_id, 'u')
prev4_domain_letter = domain_id_to_letter.get(prev4_domain_id, 'u')
prev5_domain_letter = domain_id_to_letter.get(prev5_domain_id, 'u')
prev6_domain_letter = domain_id_to_letter.get(prev6_domain_id, 'u')
prev7_domain_letter = domain_id_to_letter.get(prev7_domain_id, 'u')
next_domain_productivity = domain_id_to_productivity_letter[next_domain_id]
cur_domain_productivity = domain_id_to_productivity_letter[cur_domain_id]
prev1_domain_productivity = domain_id_to_productivity_letter[prev1_domain_id]
prev2_domain_productivity = domain_id_to_productivity_letter[prev2_domain_id]
prev3_domain_productivity = domain_id_to_productivity_letter[prev3_domain_id]
prev4_domain_productivity = domain_id_to_productivity_letter[prev4_domain_id]
prev5_domain_productivity = domain_id_to_productivity_letter[prev5_domain_id]
prev6_domain_productivity = domain_id_to_productivity_letter[prev6_domain_id]
prev7_domain_productivity = domain_id_to_productivity_letter[prev7_domain_id]
since_cur = time_sec - cur_time_sec
to_next = next_time_sec - time_sec
spanlen = since_cur + to_next
prev1_domain_last_visit = domain_id_to_most_recent_visit.get(prev1_domain_id, 0)
prev2_domain_last_visit = domain_id_to_most_recent_visit.get(prev2_domain_id, 0)
prev3_domain_last_visit = domain_id_to_most_recent_visit.get(prev3_domain_id, 0)
prev3_domain_last_visit = domain_id_to_most_recent_visit.get(prev3_domain_id, 0)
prev4_domain_last_visit = domain_id_to_most_recent_visit.get(prev4_domain_id, 0)
prev5_domain_last_visit = domain_id_to_most_recent_visit.get(prev5_domain_id, 0)
prev6_domain_last_visit = domain_id_to_most_recent_visit.get(prev6_domain_id, 0)
prev7_domain_last_visit = domain_id_to_most_recent_visit.get(prev7_domain_id, 0)
since_prev1 = time_sec - prev1_domain_last_visit
since_prev2 = time_sec - prev2_domain_last_visit
since_prev3 = time_sec - prev3_domain_last_visit
since_prev4 = time_sec - prev4_domain_last_visit
since_prev5 = time_sec - prev5_domain_last_visit
since_prev6 = time_sec - prev6_domain_last_visit
since_prev7 = time_sec - prev7_domain_last_visit
since_cur = log(since_cur)
to_next = log(to_next)
spanlen = log(spanlen)
since_prev1 = log(since_prev1)
since_prev2 = log(since_prev2)
since_prev3 = log(since_prev3)
since_prev4 = log(since_prev4)
since_prev5 = log(since_prev5)
since_prev6 = log(since_prev6)
since_prev7 = log(since_prev7)
cached_locals = locals()
output.append([cached_locals[row_name] for row_name in row_names])
#print 'user', user, 'guaranteed error', float(skipped_items)/total_items, 'skipped', skipped_items, 'total', total_items
return {
'rows': output,
'skipped_items': skipped_items,
'total_items': total_items,
}
def create_domainclass_data_for_users(users, filename):
if sdir_exists(filename):
print 'already exists', filename
return
outfile = csv.writer(open(sdir_path(filename), 'w'))
outfile.writerow(row_names)
total_items = 0
skipped_items = 0
for user in users:
data = get_rows_for_user(user)
total_items += data['total_items']
if total_items == 0:
print user, 'no items'
continue
skipped_items += data['skipped_items']
print user, 'skipped', float(data['skipped_items'])/data['total_items'], 'skipped', data['skipped_items'], 'total', data['total_items']
outfile.writerows(data['rows'])
print 'guaranteed error', float(skipped_items) / total_items, 'skipped', skipped_items, 'total', total_items
create_domainclass_data_for_users(get_training_users(), 'domainclass_cpn_train_v' + str(data_version) +'.csv')
create_domainclass_data_for_users(get_test_users(), 'domainclass_cpn_test_v' + str(data_version) + '.csv')
```
|
github_jupyter
|
# Module 3 Required Coding Activity
Introduction to Python (Unit 2) Fundamentals
All course .ipynb Jupyter Notebooks are available from the project files download topic in Module 1, Section 1.
This is an activity from the Jupyter Notebook **`Practice_MOD03_IntroPy.ipynb`** which you may have already completed.
| Assignment Requirements |
|:-------------------------------|
| **NOTE:** This program requires **`print`** output and using code syntax used in module 3: **`if`**, **`input`**, **`def`**, **`return`**, **`for`**/**`in`** keywords, **`.lower()`** and **`.upper()`** method, **`.append`**, **`.pop`**, **`.split`** methods, **`range`** and **`len`** functions |
## Program: poem mixer
This program takes string input and then prints out a mixed order version of the string
**Program Parts**
- **program flow** gathers the word list, modifies the case and order, and prints
- get string input, input like a poem, verse or saying
- split the string into a list of individual words
- determine the length of the list
- Loop the length of the list by index number and for each list index:
- if a word is short (3 letters or less) make the word in the list lowercase
- if a word is long (7 letters or more) make the word in the list uppercase
- **call the word_mixer** function with the modified list
- print the return value from the word_mixer function
- **word_mixer** Function has 1 argument: an original list of string words, containing greater than 5 words and the function returns a new list.
- sort the original list
- create a new list
- Loop while the list is longer than 5 words:
- *in each loop pop a word from the sorted original list and append to the new list*
- pop the word 5th from the end of the list and append to the new list
- pop the first word in the list and append to the new list
- pop the last word in the list and append to the new list
- **return** the new list on exiting the loop

**input example** *(beginning of William Blake poem, "The Fly")*
>enter a saying or poem: `Little fly, Thy summerâs play My thoughtless hand Has brushed away. Am not I A fly like thee? Or art not thou A man like me?`
**output example**
>`or BRUSHED thy not Little thou me? SUMMERâS thee? like THOUGHTLESS play i a not hand a my fly am man`
**alternative output** in each loop in the function that creates the new list add a "\\n" to the list
```
or BRUSHED thy
not Little thou
me? SUMMERâS thee?
like THOUGHTLESS play
i a not
hand a my
fly am man
```
```
# [] create poem mixer
# [] copy and paste in edX assignment page
```
Submit this by creating a python file (.py) and submitting it in D2L. Be sure to test that it works.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/2_elemental_features_of_monk/5)%20Feature%20-%20Switch%20modes%20without%20reloading%20experiment%20-%20train%2C%20eval%2C%20infer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### 1. Understand how continuously and manually switching between training and externally validating help improve training
### 2. Experiment and learning how switching between train and val modes can help choose right hyper-parameters
### 2. Steps
- You will use mxnet gluon backend for this example
- You will first train a classifier using default params
- You will switch mode from train to val for the first time here
- You will then validate to check accuracy
- You will switch mode from val to train here
- You will reduce the learning rate (Need notfocus on how it is done for now)
- You will retrain again using this new lr
- You will switch mode from train to val for the second time here
- You will then validate to check accuracy
- You will again switch mode from val to train here
- You will further change the learning rate (Need notfocus on how it is done for now)
- You will retrain again using this newest lr
- You will switch mode from train to val for the final time here
- You will then validate to check accuracy
# Table of Contents
## [0. Install](#0)
## [1. Train a classifier using default settings](#1)
## [2. Switch mode from train to eval and validate](#2)
## [3. Switch back mode, reduce lr, retrain](#3)
## [4. Switch mode from train to eval and re-validate](#4)
## [5. Switch back mode, change lr further, retrain](#5)
## [6. Switch mode from train to eval and re-validate](#6)
<a id='0'></a>
# Install Monk
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
- cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
- (Select the requirements file as per OS and CUDA version)
```
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# If using Colab install using the commands below
!cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
#!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
#!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
```
## Dataset - Malarial cell images
- Credits: https://www.kaggle.com/iarunava/cell-images-for-detecting-malaria
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1mMEtGIK8UZNCrErXRJR-kutNTaN1zxjC' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1mMEtGIK8UZNCrErXRJR-kutNTaN1zxjC" -O malaria_cell.zip && rm -rf /tmp/cookies.txt
! unzip -qq malaria_cell.zip
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1WHpd7M-E_EiXmdjOr48BfvlUtMRPV6PM' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1WHpd7M-E_EiXmdjOr48BfvlUtMRPV6PM" -O malaria_cell_val.zip && rm -rf /tmp/cookies.txt
! unzip -qq malaria_cell_val.zip
```
# Imports
- Using single mxnet-gluoncv backend for this tutorial
```
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
```
<a id='1'></a>
# Train a classifier using default settings
### Creating and managing experiments
- Provide project name
- Provide experiment name
```
gtf = prototype(verbose=1);
gtf.Prototype("Malaria-Cell", "exp-switch-modes");
```
### This creates files and directories as per the following structure
workspace
|
|--------Malaria-Cell
|
|
|-----exp-switch-modes
|
|-----experiment-state.json
|
|-----output
|
|------logs (All training logs and graphs saved here)
|
|------models (all trained models saved here)
### Load Dataset
```
gtf.Default(dataset_path="malaria_cell",
model_name="resnet18_v1",
num_epochs=5);
#Read the summary generated once you run this cell.
```
### From summary current Learning rate: 0.01
```
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
<a id='2'></a>
# Switch mode from train to eval and validate
```
gtf.Switch_Mode(eval_infer=True)
```
### Load the validation dataset
```
gtf.Dataset_Params(dataset_path="malaria_cell_val");
gtf.Dataset();
```
### Run validation
```
accuracy, class_based_accuracy = gtf.Evaluate();
```
### Accuracy now is - 65.08% when learning rate is 0.01
(Can change when you run the exp)
<a id='3'></a>
# Switch back mode, reduce lr, retrain
```
gtf.Switch_Mode(train=True)
```
## Reduce learning rate from 0.01 to 0.001
```
# This part of code will be taken up again in upcoming sections
gtf.update_learning_rate(0.001);
gtf.Reload();
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
<a id='4'></a>
# Switch mode from train to eval and re-validate
```
gtf.Switch_Mode(eval_infer=True)
```
### Load the validation dataset
```
gtf.Dataset_Params(dataset_path="malaria_cell_val");
gtf.Dataset();
```
### Run validation
```
accuracy, class_based_accuracy = gtf.Evaluate();
```
### Accuracy now is - 58.85% when learning rate is 0.001
(Can change when you run the exp)
- Thus reducing learning rate didn't help our case
<a id='5'></a>
# Switch back mode, change lr, retrain
```
gtf.Switch_Mode(train=True)
```
## Update the learning rate again
```
# This part of code will be taken up again in upcoming sections
gtf.update_learning_rate(0.1);
gtf.Reload();
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
<a id='6'></a>
# Switch mode from train to eval and re-validate
```
gtf.Switch_Mode(eval_infer=True)
```
### Load the validation dataset
```
gtf.Dataset_Params(dataset_path="malaria_cell_val");
gtf.Dataset();
```
### Run validation
```
accuracy, class_based_accuracy = gtf.Evaluate();
```
### Accuracy now is - 49.85% when learning rate is 0.1, even lower
(Can change when you run the exp)
- Thus increasing learning rate didn't help our case
### LR 0.01 worked best for us
- That's how manual hyper-parameter tuning can be done using switch modes
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.