path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Chapter_9_EatNoEat_Training.ipynb | ###Markdown
###Code
import tensorflow as tf
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import math
import glob
import pickle
import io
import os
import datetime
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
from google.colab import drive
drive.mount('/content/gdrive')
use_NAS = False
if use_NAS:
IMG_SIZE = 224 # 299 for Inception, 224 for NASNetMobile
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
else:
IMG_SIZE = 299 # 299 for Inception, 224 for NASNetMobile
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (IMG_SIZE, IMG_SIZE))
if use_NAS:
img = tf.keras.applications.nasnet.preprocess_input(img)
else:
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
def create_model(image_batch):
tf.keras.backend.clear_session()
if use_NAS:
# Create the base model from the pre-trained model
base_model = tf.keras.applications.NASNetMobile(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
else:
# Create the base model from the pre-trained model
base_model = tf.keras.applications.InceptionResNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
feature_batch = base_model(image_batch)
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = tf.keras.layers.Dense(3)
prediction_batch = prediction_layer(feature_batch_average)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer])
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Nadam(lr=base_learning_rate),
loss=tf.keras.losses.MeanAbsoluteError(),
metrics=['mae', 'mse', 'accuracy'])
return model
import os
from os import listdir
my_drive = '/content/gdrive/My Drive/'
image_folder = my_drive + 'TestImages/'
models = my_drive + 'Models'
training_folder = my_drive + "Traning/"
def get_test_images(directory):
images = []
for file in listdir(directory):
if file.endswith(".jpg"):
images.append(image_folder + file)
return images
images = get_test_images(image_folder)
print(images)
if len(images) < 0:
raise Exception('Test images need to be loaded!')
else:
x, _ = load_image(images[0])
img = x[np.newaxis, ...]
food_model = create_model(img)
food_model.summary()
latest = tf.train.latest_checkpoint(models)
latest
if latest != None:
food_model.load_weights(latest)
def observe_image(image, model):
x, _ = load_image(image)
img = x[np.newaxis, ...]
return model.predict(img)
import ipywidgets as widgets
from IPython.display import display
from IPython.display import Javascript
test_states = []
#@title Eat/No Eat Training { run: "auto", vertical-output: true, display-mode: "form" }
image_idx = 19 #@param {type:"slider", min:0, max:100, step:1}
val = f"Images Trained {len(test_states)}"
label = widgets.Label(
value= val,
disabled=False
)
display(label)
cnt = len(images)
image_idx = image_idx if image_idx < cnt else cnt - 1
image = images[image_idx]
x, _ = load_image(image)
img = x[np.newaxis, ...]
predict = food_model.predict(img)
print(predict+5)
print(image_idx,image)
plt.imshow((x+1)/2)
toggle = widgets.ToggleButtons(
options=['Eat', 'No Eat'],
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
# icon='check'
)
display(toggle)
button = widgets.Button(description="Train!")
output = widgets.Output()
def button_clicked(b):
# Display the message within the output widget.
with output:
test = (predict,toggle.index,image)
test_states.append(test)
button.on_click(button_clicked)
display(button, output)
if len(test_states) > 0:
if os.path.isdir(training_folder) == False:
os.makedirs(training_folder)
pickle.dump( test_states, open( training_folder + "food_test.p", "wb" ) )
###Output
_____no_output_____ |
Homeworks/.ipynb_checkpoints/TitanicHomeWork-checkpoint.ipynb | ###Markdown
Download DatassetDownload the data set from this link+ we will be using full.csv + it's requires that you login to download, so either create a loggin or use one of the except ones+ https://www.kaggle.com/pavlofesenko/titanic-extended/downloads/titanic-extended.zip/2 Deliverable+ jupyter notebook with code answering the following questions+ The answers to the questions must be printed as output using the code need to derive it+ Each Question should be in it's own code markdown block, with the answer code and output bellow Questions+ How many unique passengers where on the Titanic ?+ How many First Class Male Passengers Survived ?+ How may Second or Third class passengers survived?+ What was the survival rate (num survived/num total) for first class women?+ What was the survival rate (num survived/num total) for third class men?+ What was the most common home town of th passengers ?+ What was average ticket Price?
###Code
# example loading data into python
import pandas as pd
path = 'data/titanic/full.csv'
df =pd.read_csv(path)
df.head()
###Output
_____no_output_____ |
examples/reference/elements/plotly/Raster.ipynb | ###Markdown
Title Raster Element Dependencies Plotly Backends Bokeh Matplotlib Plotly
###Code
import numpy as np
import holoviews as hv
hv.extension('plotly')
###Output
_____no_output_____
###Markdown
A ``Raster`` is the base class for image-like elements (namely [``Image``](./Image.ipynb), [``RGB``](./RGB.ipynb) and [``HSV``](./HSV.ipynb)), but may be used directly to visualize 2D arrays using a color map:
###Code
xvals = np.linspace(0,4,202)
ys,xs = np.meshgrid(xvals, -xvals[::-1])
hv.Raster(np.sin(((ys)**3)*xs))
###Output
_____no_output_____
###Markdown
Title Raster Element Dependencies Plotly Backends Bokeh Matplotlib Plotly
###Code
import numpy as np
import holoviews as hv
hv.extension('plotly')
###Output
_____no_output_____
###Markdown
A ``Raster`` is the base class for image-like elements (namely [``Image``](./Image.ipynb), [``RGB``](./RGB.ipynb) and [``HSV``](./HSV.ipynb)), but may be used directly to visualize 2D arrays using a color map:
###Code
xvals = np.linspace(0,4,202)
ys,xs = np.meshgrid(xvals, -xvals[::-1])
hv.Raster(np.sin(((ys)**3)*xs))
###Output
_____no_output_____ |
code/hot_star_rotation.ipynb | ###Markdown
Calculate the rotation distribution for hot stars
###Code
import numpy as np
import matplotlib.pyplot as plt
from plotstuff import colours
cols = colours()
%matplotlib inline
plotpar = {'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 15,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True}
plt.rcParams.update(plotpar)
KID, Teff, logg, Mass, Prot, Prot_err, Rper, LPH, w, DC, Flag = \
np.genfromtxt("Table_1_Periodic.txt", delimiter=",", skip_header=1).T
m = Teff > 6250
Prot, Rper, Teff = Prot[m], Rper[m], Teff[m]
plt.scatter(Prot, np.log(Rper), c=Teff)
plt.colorbar()
plt.hist(Prot, 50)
N, P_bins = np.histogram(Prot, 50)
m = N == max(N)
ind = int(np.arange(len(P_bins))[m][0] + 1)
plt.axvline((P_bins[m] + P_bins[ind])/2, color="r")
print((P_bins[m] + P_bins[ind])/2)
###Output
[ 2.10053]
###Markdown
Fit a Gaussian
###Code
def Gaussian(par, x):
A, mu, sig = par
return A * np.exp(-.5*(x-mu)**2/sig**2)
def chi2(par, x, y):
return sum((y - Gaussian(par, x))**2)
import scipy.optimize as sco
par_init = 300, 2.10053, 5.
x, y = P_bins[1:], N
result1 = sco.minimize(chi2, par_init, args=(x, y))
A, mu, sig = result1.x
print(A, mu, sig)
plt.hist(Prot, 50)
xs = np.linspace(0, 70, 1000)
ys = Gaussian(result1.x, xs)
plt.plot(xs, ys, "r")
###Output
_____no_output_____
###Markdown
Fit two Gaussians
###Code
def Double_Gaussian(par, x):
A1, A2, mu1, mu2, sig1, sig2 = par
return A1 * np.exp(-.5*(x-mu1)**2/sig1**2) + A2 * np.exp(-.5*(x-mu2)**2/sig2**2)
def Double_chi2(par, x, y):
return sum((y - Double_Gaussian(par, x))**2)
double_par_init = A, mu, sig, 12, 5, 3
result2 = sco.minimize(Double_chi2, double_par_init, args=(x, y))
A1, A2, mu1, mu2, sig1, sig2 = result2.x
print(result2.x)
print(mu1, mu2)
print(sig1, sig2)
plt.hist(Prot, 50, color="w", histtype="stepfilled",
label="$P_{\mathrm{rot}}~(T_{\mathrm{eff}} > 6250)$") # ,~\mathrm{McQuillan~et~al.~(2013)}$")
ys = Double_Gaussian(result2.x, xs)
ys1 = Gaussian([A1, mu1, sig1], xs)
ys2 = Gaussian([A2, mu2, sig2], xs)
plt.plot(xs, ys, color=cols.blue, lw=2, label="$G1 + G2$")
plt.plot(xs, ys1, color=cols.orange, lw=2, label="$G1:\mu={0:.1f}, \sigma={1:.1f}$".format(mu1, sig1))
plt.plot(xs, ys2, color=cols.pink, lw=2, label="$G2:\mu={0:.1f}, \sigma={1:.1f}$".format(mu2, sig2))
plt.xlim(0, 30)
plt.legend()
plt.xlabel("$P_{\mathrm{rot}}~\mathrm{(Days)}$")
plt.ylabel("$\mathrm{Number~of~stars}$")
plt.subplots_adjust(bottom=.25, left=.25)
plt.savefig("hot_star_hist.pdf")
print(chi2(result1.x, x, y)/(len(x)-3-1), Double_chi2(result2.x, x, y)/(len(x)-6-1))
###Output
107.320483706 18.0529947306
|
evaluation-participant-e.ipynb | ###Markdown
E-CEO Challenge 3 Evaluation WeightsDefine the weight of each wavelength
###Code
w_412 = 0.56
w_443 = 0.73
w_490 = 0.71
w_510 = 0.36
w_560 = 0.01
###Output
_____no_output_____
###Markdown
RunProvide the run information:* run id* run metalink containing the 3 by 3 kernel extractions* participant
###Code
run_id = '0000006-150701000046181-oozie-oozi-W'
run_meta = 'http://sb-10-16-10-55.dev.terradue.int:50075/streamFile/ciop/run/participant-e/0000006-150701000046181-oozie-oozi-W/results.metalink?'
participant = 'participant-e'
###Output
_____no_output_____
###Markdown
Define all imports in a single cell
###Code
import glob
import pandas as pd
from scipy.stats.stats import pearsonr
import numpy
import math
###Output
_____no_output_____
###Markdown
Manage run resultsDownload the results and aggregate them in a single Pandas dataframe
###Code
!curl $run_meta | aria2c -d $participant -M -
path = participant # use your path
allFiles = glob.glob(path + "/*.txt")
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
frame = pd.concat(list_)
###Output
_____no_output_____
###Markdown
Number of points extracted from MERIS level 2 products
###Code
len(frame.index)
###Output
_____no_output_____
###Markdown
Calculate PearsonFor all three sites, AAOT, BOUSSOLE and MOBY, calculate the Pearson factor for each band.> Note AAOT does not have measurements for band @510 AAOT site
###Code
insitu_path = './insitu/AAOT.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "AAOT"'), insitu, how='inner', on = ['Date', 'ORBIT'])
frame_xxx= frame_full[['Rrs_413_mean', 'rho_wn_IS_412']].dropna()
r_aaot_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @412")
frame_xxx= frame_full[['Rrs_443_mean', 'rho_wn_IS_443']].dropna()
r_aaot_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
frame_xxx= frame_full[['Rrs_490_mean', 'rho_wn_IS_490']].dropna()
r_aaot_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
r_aaot_510 = 0
print("0 observations for band @510")
frame_xxx= frame_full[['Rrs_510_mean', 'rho_wn_IS_560']].dropna()
r_aaot_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
insitu_path = './insitu/BOUSS.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "BOUS"'), insitu, how='inner', on = ['Date', 'ORBIT'])
frame_xxx= frame_full[['Rrs_413_mean', 'rho_wn_IS_412']].dropna()
r_bous_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @412")
frame_xxx= frame_full[['Rrs_443_mean', 'rho_wn_IS_443']].dropna()
r_bous_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
frame_xxx= frame_full[['Rrs_490_mean', 'rho_wn_IS_490']].dropna()
r_bous_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
frame_xxx= frame_full[['Rrs_510_mean', 'rho_wn_IS_510']].dropna()
r_bous_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @510")
frame_xxx= frame_full[['Rrs_560_mean', 'rho_wn_IS_560']].dropna()
r_bous_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
insitu_path = './insitu/MOBY.csv'
insitu = pd.read_csv(insitu_path)
frame_full = pd.DataFrame.merge(frame.query('Name == "MOBY"'), insitu, how='inner', on = ['Date', 'ORBIT'])
frame_xxx= frame_full[['Rrs_413_mean', 'rho_wn_IS_412']].dropna()
r_moby_412 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @12")
frame_xxx= frame_full[['Rrs_443_mean', 'rho_wn_IS_443']].dropna()
r_moby_443 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @443")
frame_xxx= frame_full[['Rrs_490_mean', 'rho_wn_IS_490']].dropna()
r_moby_490 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @490")
frame_xxx= frame_full[['Rrs_510_mean', 'rho_wn_IS_510']].dropna()
r_moby_510 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @510")
frame_xxx= frame_full[['Rrs_560_mean', 'rho_wn_IS_560']].dropna()
r_moby_560 = pearsonr(frame_xxx.ix[:,0], frame_xxx.ix[:,1])[0]
print(str(len(frame_xxx.index)) + " observations for band @560")
[r_aaot_412, r_aaot_443, r_aaot_490, r_aaot_510, r_aaot_560]
[r_bous_412, r_bous_443, r_bous_490, r_bous_510, r_bous_560]
[r_moby_412, r_moby_443, r_moby_490, r_moby_510, r_moby_560]
r_final = (numpy.mean([r_bous_412, r_moby_412, r_aaot_412]) * w_412 \
+ numpy.mean([r_bous_443, r_moby_443, r_aaot_443]) * w_443 \
+ numpy.mean([r_bous_490, r_moby_490, r_aaot_490]) * w_490 \
+ numpy.mean([r_bous_510, r_moby_510, r_aaot_510]) * w_510 \
+ numpy.mean([r_bous_560, r_moby_560, r_aaot_560]) * w_560) \
/ (w_412 + w_443 + w_490 + w_510 + w_560)
r_final
###Output
_____no_output_____ |
notebooks/session9.ipynb | ###Markdown
Import packages You'll first need to install either the ```spaCY``` medium or large model!->> terminal```cd cds-languagesource ./lang101/bin/activatepython -m spacy download en_core_web_mddeactivate```
###Code
# preprocessing
import os
import pandas as pd
from tqdm import tqdm
# nlp
import spacy
nlp = spacy.load("en_core_web_md")
# gensim
from gensim.models import Word2Vec
import gensim.downloader
###Output
_____no_output_____
###Markdown
Using pretrained vectors in ```spaCy```
###Code
doc = nlp("denmark")
print(len(doc.vector))
doc.vector[0:50]
###Output
300
###Markdown
__Comparing individual words__
###Code
banana = nlp("banana")
apple = nlp("apple")
scotland = nlp("scotland")
denmark = nlp("denmark")
###Output
_____no_output_____
###Markdown
__Inspect word similarities__
###Code
banana.similarity(apple)
banana.similarity(scotland)
denmark.similarity(scotland)
###Output
_____no_output_____
###Markdown
__Document similarities__
###Code
doc1 = nlp("I like bananas")
doc2 = nlp("I like apples")
doc3 = nlp("I come from Scotland")
doc4 = nlp("I live in Denmark")
doc1.similarity(doc3)
doc3.similarity(doc4)
###Output
_____no_output_____
###Markdown
Working with ```gensim``` __Download pretrained models__
###Code
list(gensim.downloader.info()['models'].keys())
###Output
_____no_output_____
###Markdown
__Download a pretrained model__
###Code
pretrained_vectors = gensim.downloader.load('glove-wiki-gigaword-100')
###Output
_____no_output_____
###Markdown
__Inspect vector for specific word__
###Code
pretrained_vectors['denmark']
###Output
_____no_output_____
###Markdown
__Find most similar words to target__
###Code
pretrained_vectors.most_similar('denmark')
###Output
_____no_output_____
###Markdown
__Compare specific words__
###Code
pretrained_vectors.similarity('denmark', 'scotland')
pretrained_vectors.similarity('denmark', 'sweden')
###Output
_____no_output_____
###Markdown
__Vector algebra__*Man* is to *woman* as *cat* is to ...
###Code
pretrained_vectors.most_similar(positive=['woman', 'dog'],
negative=['man'],
topn=1)
pretrained_vectors.most_similar(positive=['walk', 'swim'],
negative=['walked'],
topn=1)
pretrained_vectors.most_similar(positive=['berlin', 'denmark'],
negative=['germany'],
topn=1)
###Output
_____no_output_____
###Markdown
__Odd one out!__
###Code
pretrained_vectors.doesnt_match(["france", "germany", "dog", "japan"])
###Output
_____no_output_____
###Markdown
Train your own models __Load data with pandas__
###Code
filename = os.path.join("..", "data", "labelled_data", "fake_or_real_news.csv")
data = pd.read_csv(filename)
data.head()
###Output
_____no_output_____
###Markdown
__Tokenize with ```spaCy```__
###Code
sentences = []
for post in tqdm(data["text"]):
# create a temporary list
tmp_list = []
# create spaCy doc object
doc = nlp(post.lower())
# loop over
for token in doc:
tmp_list.append(token.text)
# append tmp_list to sentences
sentences.append(tmp_list)
###Output
_____no_output_____
###Markdown
__Train model with ```gensim```__
###Code
model = Word2Vec(sentences=sentences, # input data
size=50, # embedding size
window=5, # context window
sg=1, # cbow or skip-gram (cbow=0, sg=1)
negative=5, # number of negative samples
min_count=3, # remove rare words
workers=6) # number of CPU processes
###Output
_____no_output_____
###Markdown
__Inspect most similar word__
###Code
model.wv.most_similar('faith', topn=10)
###Output
_____no_output_____
###Markdown
__Compare words__
###Code
model.wv.similarity('jesus', 'god')
###Output
_____no_output_____
###Markdown
Load libraries
###Code
# data tools
import os
import numpy as np
import matplotlib.pyplot as plt
# sklearn tools
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
# tf tools
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Conv2D,
MaxPooling2D,
Activation,
Flatten,
Dense)
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
def plot_history(H, epochs):
# visualize performance
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Load cifar10 data
###Code
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float") / 255.
testX = testX.astype("float") / 255.
# integers to one-hot vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.fit_transform(testY)
# initialize label names for CIFAR-10 dataset
labelNames = ["airplane", "automobile", "bird",
"cat", "deer", "dog", "frog", "horse",
"ship", "truck"]
###Output
_____no_output_____
###Markdown
ShallowNet ShallowNet architecture: >INPUT => CONV => ReLU => FC
###Code
# initialise model
model = Sequential()
# define CONV => RELU layer
model.add(Conv2D(32, (3, 3),
padding="same",
input_shape=(32, 32, 3)))
model.add(Activation("relu"))
# softmax classifier
model.add(Flatten())
model.add(Dense(10))
model.add(Activation("softmax"))
###Output
_____no_output_____
###Markdown
__Compile model__
###Code
opt = SGD(lr =.01)
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
__Model summary__
###Code
model.summary()
###Output
_____no_output_____
###Markdown
__Plot model__
###Code
plot_model(model, show_shapes=True, show_layer_names=True)
###Output
_____no_output_____
###Markdown
__Train model__
###Code
H = model.fit(trainX, trainY,
validation_data=(testX, testY),
batch_size=32,
epochs=40,
verbose=1)
###Output
_____no_output_____
###Markdown
__Evaluate model__
###Code
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1),
target_names=labelNames))
###Output
_____no_output_____
###Markdown
plot_model(H) LeNet LeNet architecture:>INPUT => CONV => ReLU => MAXPOOL => CONV => ReLU => MAXPOOL => FC => ReLU => FC __Define model__
###Code
# define model
model = Sequential()
# first set of CONV => RELU => POOL
model.add(Conv2D(32, (3, 3),
padding="same",
input_shape=(32, 32, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2)))
# second set of CONV => RELU => POOL
model.add(Conv2D(50, (5, 5),
padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2)))
# FC => RELU
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(10))
model.add(Activation("softmax"))
###Output
_____no_output_____
###Markdown
__Compile model__
###Code
opt = SGD(lr=0.01)
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
__Model summary__
###Code
model.summary()
###Output
_____no_output_____
###Markdown
__Plot model__
###Code
plot_model(model, show_shapes=True, show_layer_names=True)
###Output
_____no_output_____
###Markdown
__Train model__
###Code
# train model
H = model.fit(trainX, trainY,
validation_data=(testX, testY),
batch_size=32,
epochs=20,
verbose=1)
plot_model(H,20)
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1),
target_names=labelNames))
###Output
_____no_output_____
###Markdown
Import packages You'll first need to install either the ```spaCY``` medium or large model!->> terminal```cd cds-languagesource ./lang101/bin/activatepython -m spacy download en_core_web_mddeactivate```
###Code
# preprocessing
import os
import pandas as pd
from tqdm import tqdm
# nlp
import spacy
nlp = spacy.load("en_core_web_md")
# gensim
from gensim.models import Word2Vec
import gensim.downloader
###Output
_____no_output_____
###Markdown
Using pretrained vectors in ```spaCy```
###Code
nlp("denmark").vector
###Output
_____no_output_____
###Markdown
__Comparing individual words__
###Code
banana = nlp("banana")
apple = nlp("apple")
scotland = nlp("scotland")
denmark = nlp("denmark")
###Output
_____no_output_____
###Markdown
__Inspect word similarities__
###Code
banana.similarity(apple)
banana.similarity(scotland)
denmark.similarity(scotland)
###Output
_____no_output_____
###Markdown
__Document similarities__
###Code
doc1 = nlp("I like bananas")
doc2 = nlp("I like apples")
doc3 = nlp("I come from Scotland")
doc4 = nlp("I live in Denmark")
doc1.similarity(doc3)
doc3.similarity(doc4)
###Output
_____no_output_____
###Markdown
Working with ```gensim``` __Download pretrained models__
###Code
list(gensim.downloader.info()['models'].keys())
###Output
_____no_output_____
###Markdown
__Download a pretrained model__
###Code
pretrained_vectors = gensim.downloader.load('glove-wiki-gigaword-100')
###Output
_____no_output_____
###Markdown
__Inspect vector for specific word__
###Code
pretrained_vectors['denmark']
###Output
_____no_output_____
###Markdown
__Find most similar words to target__
###Code
pretrained_vectors.most_similar('denmark')
###Output
_____no_output_____
###Markdown
__Compare specific words__
###Code
pretrained_vectors.similarity('denmark', 'scotland')
pretrained_vectors.similarity('denmark', 'sweden')
###Output
_____no_output_____
###Markdown
__Vector algebra__*Man* is to *woman* as *cat* is to ...
###Code
pretrained_vectors.most_similar(positive=['woman', 'dog'],
negative=['man'],
topn=1)
pretrained_vectors.most_similar(positive=['walk', 'swim'],
negative=['walked'],
topn=1)
pretrained_vectors.most_similar(positive=['berlin', 'denmark'],
negative=['germany'],
topn=1)
###Output
_____no_output_____
###Markdown
__Odd one out!__
###Code
pretrained_vectors.doesnt_match(["france", "germany", "dog", "japan"])
###Output
_____no_output_____
###Markdown
Train your own models __Load data with pandas__
###Code
filename = os.path.join("..", "data", "labelled_data", "fake_or_real_news.csv")
data = pd.read_csv(filename)
data.head()
###Output
_____no_output_____
###Markdown
__Tokenize with ```spaCy```__
###Code
sentences = []
for post in tqdm(data["text"]):
# create a temporary list
tmp_list = []
# create spaCy doc object
doc = nlp(post.lower())
# loop over
for token in doc:
tmp_list.append(token.text)
# append tmp_list to sentences
sentences.append(tmp_list)
###Output
_____no_output_____
###Markdown
__Train model with ```gensim```__
###Code
model = Word2Vec(sentences=sentences, # input data
size=50, # embedding size
window=5, # context window
sg=1, # cbow or skip-gram (cbow=0, sg=1)
negative=5, # number of negative samples
min_count=3, # remove rare words
workers=6) # number of CPU processes
###Output
_____no_output_____
###Markdown
__Inspect most similar word__
###Code
model.wv.most_similar('faith', topn=10)
###Output
_____no_output_____
###Markdown
__Compare words__
###Code
model.wv.similarity('jesus', 'god')
###Output
_____no_output_____ |
Competitor-Products.ipynb | ###Markdown
Product Sentiment DataData (public domain): https://data.world/crowdflower/brands-and-product-emotionsNotebook code based on IMDB notebook from bert-sklearn/other_examples
###Code
import numpy as np
import pandas as pd
import os
import sys
import csv
import re
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from ftfy import fix_text
from bert_sklearn import BertClassifier
from bert_sklearn import load_model
print(os.getcwd())
DATAFILE = "./data/judge-cleaned-up.csv"
# Prep Data
def cleanup(txt):
return fix_text(txt)
converters = {'tweet_text': cleanup}
raw_data = pd.read_csv(DATAFILE, converters=converters, encoding='unicode_escape')
raw_data.head(10)
## Transform columns
## ONLY RUN THIS CELL ONCE!!!
# Add columns to make the labels usable by the model
# tweet_text => text
# Positive / No emotion / Negative => 1, 0, -1
# Product: Apple stuff, Google stuff, NaN => Apple, Google, ''
def clean_text(txt):
return txt
raw_data.insert(1, "text", np.vectorize(clean_text)(raw_data['tweet_text']))
def create_labels(sentiment):
if sentiment.startswith('Positive'):
return 1
if sentiment.startswith('Negative'):
return -1
return 0
raw_data.insert(3, 'label', np.vectorize(create_labels)(raw_data['is_there_an_emotion_directed_at_a_brand_or_product']))
def get_company(product):
if pd.isnull(product):
return ''
if 'iPad' in product or 'iPhone' in product or 'Apple' in product:
return 'Apple'
if 'Google' in product or 'Android' in product:
return 'Google'
return ''
raw_data.insert(2, 'company', np.vectorize(get_company)(raw_data['emotion_in_tweet_is_directed_at']))
raw_data.head(10)
# Last Data Preparation Step
# Clean up characters and pull out columns of interest
def clean(text):
text = re.sub(r'<.*?>', '', text)
text = re.sub(r"\"", "", text)
return text
data = raw_data.filter(['text', 'company', 'label'], axis=1)
data['text'] = data['text'].transform(clean)
# Split into training and test data
msk = np.random.rand(len(data)) < 0.8
train = data[msk]
test = data[~msk]
print('Training data size: ' + str(train.shape))
print('Test data size: ' + str(test.shape))
train[:1].values
###Output
_____no_output_____
###Markdown
As you can see, each review is much longer than a sentence or two. The Google AI BERT models were trained on sequences of max length 512. Lets look at the performance for max_seq_length equal to 128, 256, and 512. max_seq_length = 128
###Code
## Set up data for the classifier
train = train.sample(800)
test = test.sample(500)
print("Train data size: %d "%(len(train)))
print("Test data size: %d "%(len(test)))
X_train = train['text']
y_train = train['label']
X_test = test['text']
y_test = test['label']
## Create the model
model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1])
model.max_seq_length = 128
model.learning_rate = 2e-05
model.epochs = 4
print(model)
%%time
## Train the model using our data (this could take a while)
model.fit(X_train, y_train)
accy = model.score(X_test, y_test)
%%time
## Test out the model with our own invented examples!
examples = [
'This Android product is not very good',
'I could not get that iPhone to work, so I sent it back. I''m really upset!',
'Another great product from the folks at Google! We really liked it a lot',
'My iPad is essential - of course I would buy another one!','
'When in the course of human events it becomes necessary to dissolve those ties...',
'We the people, in order to form a more perfect union, establish justice, insure domestic tranquility, ...'
]
print(model.predict_proba(examples))
model.save('models/model1_128_bb_uncased.mdl')
###Output
_____no_output_____
###Markdown
max_seq_length = 256
###Code
%%time
## Don't use this one - it will take a very long time!
model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1])
model.max_seq_length = 256
model.train_batch_size = 32
model.learning_rate = 2e-05
model.epochs = 4
print(model)
model.fit(X_train, y_train)
accy = model.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
max_seq_length = 512
###Code
%%time
## Don't use this one - it will take the longest of all!
model = BertClassifier(bert_model='bert-base-uncased', label_list=[-1,0,1])
model.max_seq_length = 512
# max_seq_length=512 will use a lot more GPU mem, so I am turning down batch size
# and adding gradient accumulation steps
model.train_batch_size = 16
model_gradient_accumulation_steps = 4
model.learning_rate = 2e-05
model.epochs = 4
print(model)
model.fit(X_train, y_train)
accy = model.score(X_test, y_test)
###Output
_____no_output_____ |
2021-07-10-q-metrics-plots-APx.ipynb | ###Markdown
Overall AP, mAP.5, mAP.75
###Code
s = GrandSummary('~/reval_50/')
fig, axes = get_figure_axes()
s.plot_ap_summaries(axes, order=DEFAULT_ORDER, legend=False)
finish_plot(fig, axes)
fig.savefig('all_50_AP_Q.pdf')
s = GrandSummary('~/reval_05/')
fig, axes = get_figure_axes()
s.plot_ap_summaries(axes, order=DEFAULT_ORDER, legend=False)
finish_plot(fig, axes)
fig.savefig('all_05_AP_Q.pdf')
###Output
_____no_output_____
###Markdown
By size
###Code
s = GrandSummary('~/reval_50/')
fig, axes = get_figure_axes()
s.plot_ap_summaries(axes, order=DEFAULT_ORDER, legend=False, by_size=True)
finish_plot(fig, axes)
fig.savefig('all_50_APsize_Q.pdf')
s = GrandSummary('~/reval_05/')
fig, axes = get_figure_axes()
s.plot_ap_summaries(axes, order=DEFAULT_ORDER, legend=False, by_size=True)
finish_plot(fig, axes)
fig.savefig('all_05_APsize_Q.pdf')
###Output
_____no_output_____
###Markdown
Looking at derivative
###Code
fig, axes = get_figure_axes(sharey=True)
s.plot_ap_derivatives(axes, order=DEFAULT_ORDER)
finish_plot(fig, axes)
fig.savefig('all_05_APderiv_Q.pdf')
###Output
_____no_output_____
###Markdown
Prior research
###Code
df = next(s.ap_summaries())
for df in s.ap_summaries():
df.AP.rolling(5).mean().diff().plot()
for df in s.ap_summaries():
df.AP.diff().rolling(5).mean().plot()
###Output
_____no_output_____
###Markdown
Comutative?
###Code
for df in s.ap_summaries():
df.APs.diff().rolling(5).mean().plot()
for df in s.ap_summaries():
df.APs.rolling(5).mean().diff().plot()
###Output
_____no_output_____ |
iteration_1_PDF data extractor.ipynb | ###Markdown
Global Variables
###Code
import print_metrics
file_path = "ITIL Books/ITIL 3/ITIL3 Service Operation chapter 4.pdf"
# extracted_text_file_path = "ITIL Books/ITIL 3/Continual service improvement chapter from notebook.txt"
# extracted_text_file_path = "ITIL Books/ITIL 3/Service operation chapter 4/Service operation chapter 4 - 4.txt"
# extracted_text_file_path = "ITIL Books\ITIL 3\Service operation chapter 4\Automated concepts extracted\\4.2\Service operation chapter 4 - 4.2 to 4.2.4 .txt"
output_file_path = "output/ITIL3 Continual Service Improvement.txt"
# Uncomment a single extracted_text_file_path and a single manual_concepts_file_path to run metric tests on that
# specific chapter subsection, e.g. 4.1 is uncommented now so metrics will be calculated for it within the last cell
# of the notebook
lemmatize = False
# lemmatize = True
### Chapter 4 start ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4)
### Chapter 4 - 4.1 to 4.1.4 ###
extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.1)
manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.1)
### Chapter 4 - 4.2 to 4.2.4 ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.2)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.2)
# ### Chapter 4 - 4.3 to 4.3.4 ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.3)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.3)
# ### Chapter 4 - 4.4 to 4.4.4 ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.4)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.4)
# ### Chapter 4 - 4.5 to 4.5.4 ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.5)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.5)
# ### Chapter 4 - 4.6 to 4.6.4 ###
# extracted_text_file_path = print_metrics.get_extracted_text_file_path(4.6)
# manual_concepts_file_path = print_metrics.get_manual_concepts_file_path(4.6)
###Output
_____no_output_____
###Markdown
Imports
###Code
import pdfminer
import nltk
import re
###Output
_____no_output_____
###Markdown
Global Functions
###Code
with open(extracted_text_file_path, 'r') as file:
extracted_text = file.read()
# extracted_text = "Particle dynamics involves the study of physics and chemistry"
# tokens = nltk.word_tokenize(extracted_text)
# print(tokens)
# ### Part of speech tagging ###
# part_of_speech_array = nltk.pos_tag(tokens)
# print(part_of_speech_array)
###Output
_____no_output_____
###Markdown
Text sanitization and word tokenizing
###Code
### Grab sections from text ###
# print(re.findall("^\d(\.|\d)*(\s|\w)*$", extracted_text))
# title_pattern = re.compile(r"^\d(\.|\d)*(\s|\w)*$", re.MULTILINE)
title_pattern = re.compile(r"^\d+.*$", re.MULTILINE)
sections = title_pattern.findall(extracted_text)
for counter, section in enumerate(sections):
if not (section.find("%") == -1 and section.find(")") == -1):
sections.remove(section)
### Sanitise extracted text ###
extracted_text_sanitised = extracted_text
extracted_text_sanitised = extracted_text.replace("¦", "")
extracted_text_sanitised = extracted_text_sanitised.replace("–", "")
### Tokenise extracted text ###
tokens = nltk.word_tokenize(extracted_text_sanitised)
# print(tokens)
###Output
_____no_output_____
###Markdown
Part to speech tagging
###Code
### Part of speech tagging ###
part_of_speech_array = nltk.pos_tag(tokens)
# print(part_of_speech_array)
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def lemmatize_part_of_speech_array(part_of_speech_array):
part_of_speech_array_lemmatized = []
for part_of_speech in part_of_speech_array:
part_of_speech_array_lemmatized.append(
(lemmatizer.lemmatize(part_of_speech[0]), part_of_speech[1])
)
return part_of_speech_array_lemmatized
# print(part_of_speech_array_lemmatized)
# Uncomment this if lemmatization is to be used
if lemmatize:
part_of_speech_array = lemmatize_part_of_speech_array(part_of_speech_array)
###Output
_____no_output_____
###Markdown
Term extraction
###Code
### Term Extraction (NNP next to each other) ###
def extract_terms(part_of_speech_array_array, tags_to_use):
terms_array = []
term_phrase = []
start_new_term = True
for index, part in enumerate(part_of_speech_array_array):
if(part[1] in tags_to_use):
term_phrase.append(part[0])
start_new_term = False if part_of_speech_array_array[index + 1][1] in tags_to_use else True
if start_new_term == True:
terms_array.append(" ".join(term_phrase))
term_phrase = []
return terms_array
### Term Extraction (NNP next to each other) ###
def extract_terms_with_adj(part_of_speech_array_array, tags_to_use):
terms_array = []
term_phrase = []
start_new_term = True
for index, part in enumerate(part_of_speech_array_array):
if(part[1] in tags_to_use):
term_phrase.append(part[0])
start_new_term = False if part_of_speech_array_array[index + 1][1] in tags_to_use else True
if start_new_term == True:
terms_array.append(" ".join(term_phrase))
term_phrase = []
return terms_array
major_named_concepts = extract_terms(part_of_speech_array, {"NNP", "NNPS"})
other_concepts = extract_terms(part_of_speech_array, {"NN", "NNS"})
all_noun_phrases = extract_terms(part_of_speech_array, {"NNP", "NNPS", "NN", "NNS"})
all_noun_phrases_with_adj = extract_terms_with_adj(part_of_speech_array, {"NNP", "NNPS", "NN", "NNS", "JJ"})
print(f"major named concepts: \n{major_named_concepts}")
print(f"\nother concepts: \n{other_concepts}")
print(f"\nall noun phrases: \n{all_noun_phrases}")
print(f"\nall noun phrases with adj: \n{all_noun_phrases_with_adj}")
###Output
major named concepts:
['OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 'Change Management Change Management', 'Service Transition', 'Change Management', 'Service Operation', 'Raising', 'RFCs', 'Service Operation', 'CAB', 'CAB/EC', 'Service Operation', '� Implementing', 'Change Management', 'Service Operation', 'Change Management', 'Service Operation', 'Helping', 'Service Operation', 'Service Operation', 'Change Management', 'Configuration Management Configuration Management', 'Service Transition', 'Configuration Management', 'Service Operation', 'Informing Configuration Management', 'CIs', 'CMS � Making', 'Configuration Management', 'Service Operation', 'Responsibility', 'CMS', 'Configuration Management', 'Operations', 'Configuration Management', 'CIs', 'CIs', 'CMS', 'Operations', 'Release', 'Deployment Management Release', 'Deployment Management', 'Service Transition', 'Service Operation', 'Actual', 'Release', 'Deployment Management', 'Service Operation', 'Participation', 'Service Operation', 'CIs', 'DML', 'Release', 'Deployment Management']
other concepts:
['publication', 'aspect', 'staff', 'basis', 'issue', 'meeting', 'risk', 'issue', 'view', 'account', 'change', 'component', 'service', 'change', 'component', 'service', 'define', 'maintain change model', 'component', 'service', 'change schedule', 'staff', 'change', 'process', 'standard', 'change', 'publication', 'aspect', 'staff', 'basis', 'discrepancy', 'amendment', 'discrepancy', 'authority', 'component', 'service', 'case', 'staff', 'direction', 'relationship', 'mark', '�disposed�', 'update', 'activity', 'staff', 'publication', 'aspect', 'process', 'staff', 'basis', 'implementation action', 'deployment', 'release', 'direction', 'component', 'service', 'planning stage', 'release', 'issue', 'handling', 'role', 'procedure', 'item']
all noun phrases:
['OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 'Change Management Change Management', 'Service Transition publication', 'aspect', 'Change Management', 'Service Operation staff', 'basis', 'Raising', 'RFCs', 'Service Operation issue', 'CAB', 'CAB/EC meeting', 'Service Operation risk', 'issue', 'view', 'account � Implementing change', 'Change Management', 'Service Operation component', 'service', 'change', 'Change Management', 'Service Operation component', 'service', 'Helping define', 'maintain change model', 'Service Operation component', 'service', 'change schedule', 'Service Operation staff', 'change', 'Change Management process', 'standard', 'change', 'Configuration Management Configuration Management', 'Service Transition publication', 'aspect', 'Configuration Management', 'Service Operation staff', 'basis', 'Informing Configuration Management', 'discrepancy', 'CIs', 'CMS � Making', 'amendment', 'discrepancy', 'authority', 'Configuration Management', 'Service Operation component', 'service', 'Responsibility', 'CMS', 'Configuration Management', 'case Operations staff', 'direction', 'Configuration Management', 'relationship', 'CIs', 'mark CIs', '�disposed�', 'CMS', 'update', 'activity', 'Operations staff', 'Release', 'Deployment Management Release', 'Deployment Management', 'Service Transition publication', 'aspect', 'process', 'Service Operation staff', 'basis', 'Actual implementation action', 'deployment', 'release', 'direction', 'Release', 'Deployment Management', 'Service Operation component', 'service', 'Participation', 'planning stage', 'release', 'Service Operation issue', 'handling', 'CIs', 'DML', 'role', 'Release', 'Deployment Management procedure', 'item']
all noun phrases with adj:
['OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 'Change Management Change Management', 'Service Transition publication', 'aspect', 'Change Management', 'Service Operation staff', 'day-to-day basis', '� Raising', 'RFCs', 'Service Operation issue', 'CAB', 'CAB/EC meeting', 'Service Operation risk', 'issue', 'view', 'account � Implementing change', 'Change Management', 'Service Operation component', 'service', 'change', 'Change Management', 'Service Operation component', 'service', 'Helping define', 'maintain change model', 'Service Operation component', 'service', 'change schedule', 'Service Operation staff', 'aware', 'relevant change', 'Change Management process', 'standard', 'operational-type change', 'Configuration Management Configuration Management', 'Service Transition publication', 'aspect', 'Configuration Management', 'Service Operation staff', 'day-to-day basis', '� Informing Configuration Management', 'discrepancy', 'CIs', 'CMS � Making', 'amendment necessary', 'discrepancy', 'authority', 'Configuration Management', 'Service Operation component', 'service', 'Responsibility', 'CMS', 'Configuration Management', 'case Operations staff', 'direction', 'Configuration Management', 'relationship', 'new CIs', 'mark CIs', '�disposed�', 'CMS', 'update', 'operational activity', 'Operations staff', 'Release', 'Deployment Management Release', 'Deployment Management', 'Service Transition publication', 'aspect', 'process', 'Service Operation staff', 'day-to-day basis', '� Actual implementation action', 'deployment', 'new release', 'direction', 'Release', 'Deployment Management', 'Service Operation component', 'service � Participation', 'planning stage', 'major new release', 'Service Operation issue', 'physical handling', 'CIs', 'DML', 'operational role', 'Release', 'Deployment Management procedure', 'such', 'item']
###Markdown
Major/common concept extraction
###Code
# concept_relationships = extract_terms(part_of_speech_array, {"VP"})
# print(concept_relationships)
# print(all_noun_phrases)
### Perform frequency analysis ###
### Concept Extraction Frequency analysis ###
major_named_concept_frequency_distribution = nltk.FreqDist(major_named_concepts)
other_concept_frequency_distribution = nltk.FreqDist(other_concepts)
all_noun_phrases_frequency_distribution = nltk.FreqDist(all_noun_phrases)
print(f"major named concepts: \n{major_named_concept_frequency_distribution.most_common(50)}")
print(f"\nother concepts: \n{other_concept_frequency_distribution.most_common(50)}")
print(f"\nall noun phrases: \n{all_noun_phrases_frequency_distribution.most_common(50)}")
###Output
major named concepts:
[('Service Operation', 12), ('Change Management', 4), ('Configuration Management', 4), ('CIs', 4), ('Service Transition', 3), ('Release', 3), ('Deployment Management', 3), ('CMS', 2), ('Operations', 2), ('OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 1), ('Change Management Change Management', 1), ('Raising', 1), ('RFCs', 1), ('CAB', 1), ('CAB/EC', 1), ('� Implementing', 1), ('Helping', 1), ('Configuration Management Configuration Management', 1), ('Informing Configuration Management', 1), ('CMS � Making', 1), ('Responsibility', 1), ('Deployment Management Release', 1), ('Actual', 1), ('Participation', 1), ('DML', 1)]
other concepts:
[('staff', 6), ('component', 5), ('service', 5), ('change', 4), ('publication', 3), ('aspect', 3), ('basis', 3), ('issue', 3), ('process', 2), ('discrepancy', 2), ('direction', 2), ('release', 2), ('meeting', 1), ('risk', 1), ('view', 1), ('account', 1), ('define', 1), ('maintain change model', 1), ('change schedule', 1), ('standard', 1), ('amendment', 1), ('authority', 1), ('case', 1), ('relationship', 1), ('mark', 1), ('�disposed�', 1), ('update', 1), ('activity', 1), ('implementation action', 1), ('deployment', 1), ('planning stage', 1), ('handling', 1), ('role', 1), ('procedure', 1), ('item', 1)]
all noun phrases:
[('Service Operation component', 5), ('service', 5), ('Service Operation staff', 4), ('Configuration Management', 4), ('Service Transition publication', 3), ('aspect', 3), ('Change Management', 3), ('basis', 3), ('change', 3), ('CIs', 3), ('Release', 3), ('Service Operation issue', 2), ('discrepancy', 2), ('CMS', 2), ('direction', 2), ('Deployment Management', 2), ('release', 2), ('OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 1), ('Change Management Change Management', 1), ('Raising', 1), ('RFCs', 1), ('CAB', 1), ('CAB/EC meeting', 1), ('Service Operation risk', 1), ('issue', 1), ('view', 1), ('account � Implementing change', 1), ('Helping define', 1), ('maintain change model', 1), ('change schedule', 1), ('Change Management process', 1), ('standard', 1), ('Configuration Management Configuration Management', 1), ('Informing Configuration Management', 1), ('CMS � Making', 1), ('amendment', 1), ('authority', 1), ('Responsibility', 1), ('case Operations staff', 1), ('relationship', 1), ('mark CIs', 1), ('�disposed�', 1), ('update', 1), ('activity', 1), ('Operations staff', 1), ('Deployment Management Release', 1), ('process', 1), ('Actual implementation action', 1), ('deployment', 1), ('Participation', 1)]
###Markdown
Concept relationship extraction
###Code
def get_sentence_at_index(part_of_speech_array, index):
sentence_starting_index = 0
sentence_end_index = len(part_of_speech_array)
### Get sentence start index ###
for i in range(0, index):
# print(part_of_speech_array[index])
if part_of_speech_array[index - i][1] == ".":
sentence_starting_index = index - i
break
### Get sentence end index ###
for i in range(0, len(part_of_speech_array)):
if part_of_speech_array[index + i][1] == ".":
sentence_end_index = index + i
break
return (sentence_starting_index, sentence_end_index + 1)
### Term Extraction (NNP next to each other) ###
def extract_terms(part_of_speech_array_array, tags_to_use):
part_of_speech_array_with_terms = []
terms_array = []
term_phrase = []
start_new_term = True
for index, part in enumerate(part_of_speech_array_array):
if(part[1] in tags_to_use):
term_phrase.append(part[0])
start_new_term = False if part_of_speech_array_array[index + 1][1] in tags_to_use else True
if start_new_term == True:
if len(term_phrase) > 1:
# part_of_speech_array_with_terms.append((" ".join(term_phrase), f"NPhrase-{part[1]}"))
part_of_speech_array_with_terms.append((" ".join(term_phrase), "NPhrase"))
else:
part_of_speech_array_with_terms.append((" ".join(term_phrase), part[1]))
term_phrase = []
else:
part_of_speech_array_with_terms.append((part[0], part[1]))
return part_of_speech_array_with_terms
part_of_speech_array_with_terms = extract_terms(part_of_speech_array, {"NNP", "NNPS", "NN", "NNS"})
# print(part_of_speech_array_with_terms)
sentences = []
temp_sentence = []
for word in part_of_speech_array_with_terms:
if word[1] is ".":
temp_sentence.append(word)
sentences.append(temp_sentence)
temp_sentence = []
else:
temp_sentence.append(word)
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
# for index, sentence in enumerate(sentences):
# sentence_with_no_stop_words = [word_pos for word_pos in sentence if not word_pos[0] in stop_words]
#
# if index > 3:
# break
#
# print(sentence)
# print(sentence_with_no_stop_words)
# print("-" * 100)
# from part_of_speech_regex import PartOfSpeechRegex
class PartOfSpeechRegex:
def parseAndReturnPatterns(self, pattern_string, sentence):
print(pattern_string)
pattern_string = "*<JJ><NNP><VBZ><NP>"
print(sentences[3])
part_of_speech_regex = PartOfSpeechRegex()
# part_of_speech_regex.parseAndReturnPatterns()
part_of_speech_regex.parseAndReturnPatterns(pattern_string, sentences[3])
most_common_major_concepts = major_named_concept_frequency_distribution.most_common(50)
# print(most_common_major_concepts)
# print(tokens.index(most_common_major_concepts[0][0]))
# print(part_of_speech_array[206])
## Get indices of all common concepts
indices = [i for i, x in enumerate(part_of_speech_array) if x[0] == most_common_major_concepts[0][0]]
# print(indices)
# print(most_common_major_concepts[1][0])
def get_sentence_at_index(part_of_speech_array, index):
sentence_starting_index = 0
sentence_end_index = len(part_of_speech_array)
### Get sentence start index ###
for i in range(0, index):
if part_of_speech_array[index - i][1] == ".":
sentence_starting_index = index - i
break
### Get sentence end index ###
for i in range(0, index):
if part_of_speech_array[index + i][1] == ".":
sentence_end_index = index + i
break
return (sentence_starting_index + 1, sentence_end_index + 1)
def does_list_contain_verb_pos(part_of_speech_array):
for word_pos in part_of_speech_array:
if word_pos[1] in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
return True
return False
# sentence_index = get_sentence_at_index(part_of_speech_array, indices[0])
# print(sentence_index)
concept_relations = []
i = 0
for index in indices:
# if i < 3:
# i = i + 1
# continue
sentence_pos_containing_concept = part_of_speech_array[
get_sentence_at_index(part_of_speech_array, index)[0]:
get_sentence_at_index(part_of_speech_array, index)[1]
]
print(sentence_pos_containing_concept)
last_concept = ()
last_concept_index = -1
# For word part_of_speech in sentence_part_of_speech_containing_concept
for index, word_pos in enumerate(sentence_pos_containing_concept):
# print(f"{word_pos[0]}: {word_pos[1]}")
# print(word_pos[0] in all_noun_phrases)
# if (word_pos[0] in all_noun_phrases):
if (word_pos[0] in major_named_concepts):
if last_concept_index != -1:# and does_list_contain_verb_pos(sentence_pos_containing_concept[last_concept_index + 1:index]):
concept_relations.append(f"{last_concept}::{sentence_pos_containing_concept[last_concept_index + 1:index]}::{word_pos}")
last_concept = word_pos
last_concept_index = index
# print(related_concepts)
# print()
# print('-----')
i = i + 1
# if i == 4:
# break
for concept_relation in concept_relations:
print(concept_relation)
###Output
_____no_output_____
###Markdown
Metrics for term extraction chapter 4 first section
###Code
# This code has been moved into the print_metrics.py Python module to prevent code duplication
### The below cell code is now within the print_metrics python file so it can be used for iteration 1 and 2 without duplication
# # automatic_concepts_file_path = "ITIL Books/ITIL 3/Service operation chapter 4/Automated concepts extracted/4/Automated concepts extracted 4.txt"
# # manual_concepts_file_path = "ITIL Books/ITIL 3/Service operation chapter 4/Automated concepts extracted/4/Manual concepts extracted 4.txt"
#
# # automatic_concepts_file_path = "ITIL Books/ITIL 3/Service operation chapter 4/Automated concepts extracted/4.2/Automated concepts extracted 4.2.txt"
# # manual_concepts_file_path = "ITIL Books/ITIL 3/Service operation chapter 4/Automated concepts extracted/4.2/Manual concepts extracted 4.2.txt"
# #
# # with open(automatic_concepts_file_path, 'r') as file:
# # automatic_concepts = file.read()
#
# with open(manual_concepts_file_path, 'r') as file:
# manual_concepts = file.read()
#
# manual_concepts_list = manual_concepts.split('\n')
# manual_concepts_list = [x.lower() for x in manual_concepts_list]
#
# # print("Manual concepts")
# # print(list(dict.fromkeys(manual_concepts_list)))
# # print()
#
# # automatic_concepts_list = ['Service Operation', 'processes', 'paragraph', 'detail', 'chapter', 'reference', 'structure', 'processes', 'detail', 'chapter', 'Please note', 'roles', 'process', 'tools', 'process', 'Chapters', 'Management', 'process', 'monitors', 'events', 'IT infrastructure', 'operation', 'exception conditions', 'Incident Management', 'service', 'users', 'order', 'business impact', 'Problem Management', 'root-cause analysis', 'cause', 'events', 'incidents', 'activities', 'problems/incidents', 'Known Error subprocess', 'quicker diagnosis', 'resolution', 'incidents', 'NOTE', 'distinction', 'incidents', 'problems', 'Incident', 'Problem Records', 'danger', 'Incidents', 'support cycle', 'actions', 'recurrence', 'incidents', 'Incidents', 'root cause analysis', 'visibility', 'user ’ s service', 'SLA targets', 'service', 'users', 'expectations', 'results', 'number', 'incidents', '‘ purge ’', 'visibility', 'issues', 'Request Fulfilment', 'management', 'customer', 'user requests', 'incident', 'service delay', 'disruption', 'organizations', 'requests', 'category ’', 'incidents', 'information', 'Incident Management system', 'others', 'volumes', 'business priority', 'requests', 'provision', 'Request Fulfilment', 'Request Fulfilment process', 'practice', 'Request Fulfilment process', 'customer', 'user requests', 'types', 'requests', 'facilities', 'moves', 'supplies', 'IT services', 'requests', 'SLA measures', 'records', 'process flow', 'practice', 'organizations', 'Access Management', 'process', 'users', 'right', 'service', 'access', 'users', 'users', 'ability', 'access services', 'stages', 'resources', 'HR', 'lifecycle', 'Access Management', 'Identity', 'Rights Management', 'organizations', 'addition', 'processes', 'Service Operation', 'phases', 'Service Management Lifecycle', 'aspects', 'processes', 'part', 'chapter', 'include', 'Change Management', 'process', 'Configuration Management', 'Release Management', 'topics', 'Service Transition publication', 'Capacity', 'Availability Management', 'aspects', 'publication', 'detail', 'Service Design publication', 'Financial Management', 'Service Strategy publication', 'Knowledge Management', 'Service Transition publication', 'IT Service Continuity', 'Service Design publication', 'Service Reporting', 'Measurement', 'Continual Service Improvement publication']
# automatic_concepts_list = all_noun_phrases
# automatic_concepts_list = [x.lower() for x in automatic_concepts_list]
#
# # print("all noun phrases")
# # print(list(dict.fromkeys(automatic_concepts_list)))
#
# count = 0
# for concept in manual_concepts_list:
# if concept in automatic_concepts_list:
# count = count + 1
#
# number_of_fully_correct_manual_concepts = count
#
# number_of_manual_concepts = len(manual_concepts_list)
#
# count = 0
# for concept in automatic_concepts_list:
# if concept in manual_concepts_list:
# count = count + 1
#
# number_of_fully_correct_automatic_concepts = count
#
# number_of_automatic_concepts = len(automatic_concepts_list)
#
# print(f"number_of_manual_concepts: {number_of_manual_concepts}")
# print(f"number_of_automatic_concepts: {number_of_automatic_concepts}")
# print(f"number_of_fully_correct_manual_concepts: {number_of_fully_correct_manual_concepts}")
# print(f"number_of_fully_correct_automatic_concepts: {number_of_fully_correct_automatic_concepts}")
#
# # Lists to words for partial matches
# automatic_concepts_list_single_words = [x.split() for x in automatic_concepts_list]
# # print(automatic_concepts_list_single_words)
#
# manual_concepts_list_single_words = [x.split() for x in manual_concepts_list]
# # print(manual_concepts_list_single_words)
#
# count = 0
# for concept in manual_concepts_list_single_words:
# for word in concept:
# if word in ' '.join(automatic_concepts_list).split():
# count = count + 1
# break
#
# number_of_full_and_partial_correct_manual_concepts = count
# print(f"number_of_full_and_partial_correct_manual_concepts: {number_of_full_and_partial_correct_manual_concepts}")
#
# count = 0
# for concept in automatic_concepts_list_single_words:
# for word in concept:
# if word in ' '.join(manual_concepts_list).split():
# count = count + 1
# break
#
# number_of_full_and_partial_correct_automatic_concepts = count
# print(f"number_of_full_and_partial_correct_automatic_concepts: {number_of_full_and_partial_correct_automatic_concepts}")
import print_metrics
part_of_speech_array = nltk.pos_tag(tokens)
lemmatized_part_of_speech_array = lemmatize_part_of_speech_array(part_of_speech_array)
extract_terms(part_of_speech_array, {"NNP", "NNPS", "NN", "NNS"})
extract_terms(lemmatized_part_of_speech_array, {"NNP", "NNPS", "NN", "NNS"})
print("With duplicates")
print_metrics.print_metrics(manual_concepts_file_path, all_noun_phrases, debug=True, remove_duplicates=False)
print()
print("Without duplicates")
print_metrics.print_metrics(manual_concepts_file_path, all_noun_phrases, debug=True, remove_duplicates=True)
###Output
With duplicates
All noun phrases
['OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 'Change Management Change Management', 'Service Transition publication', 'aspect', 'Change Management', 'Service Operation staff', 'basis', 'Raising', 'RFCs', 'Service Operation issue', 'CAB', 'CAB/EC meeting', 'Service Operation risk', 'issue', 'view', 'account � Implementing change', 'Change Management', 'Service Operation component', 'service', 'change', 'Change Management', 'Service Operation component', 'service', 'Helping define', 'maintain change model', 'Service Operation component', 'service', 'change schedule', 'Service Operation staff', 'change', 'Change Management process', 'standard', 'change', 'Configuration Management Configuration Management', 'Service Transition publication', 'aspect', 'Configuration Management', 'Service Operation staff', 'basis', 'Informing Configuration Management', 'discrepancy', 'CIs', 'CMS � Making', 'amendment', 'discrepancy', 'authority', 'Configuration Management', 'Service Operation component', 'service', 'Responsibility', 'CMS', 'Configuration Management', 'case Operations staff', 'direction', 'Configuration Management', 'relationship', 'CIs', 'mark CIs', '�disposed�', 'CMS', 'update', 'activity', 'Operations staff', 'Release', 'Deployment Management Release', 'Deployment Management', 'Service Transition publication', 'aspect', 'process', 'Service Operation staff', 'basis', 'Actual implementation action', 'deployment', 'release', 'direction', 'Release', 'Deployment Management', 'Service Operation component', 'service', 'Participation', 'planning stage', 'release', 'Service Operation issue', 'handling', 'CIs', 'DML', 'role', 'Release', 'Deployment Management procedure', 'item']
manual concepts
['operational activities', 'processes', 'lifecycle phases', 'change management', 'change management', 'service transition publication', 'change management', 'service operation staff', 'day-to-day-basis', 'rfcs', 'service operation issues', 'cab', 'cab meetings', 'ec meetings', 'service operation risks', 'issues', 'views', 'changes', 'change management', 'service operation component', 'services', 'change management', 'service operation component', 'services', 'change models', 'service operation components', 'services', 'change schedules', 'service operation staff', 'changes', 'change management process', 'standard', 'operational-type changes', 'configuration management', 'configuration management', 'service transition publication', 'aspects', 'configuration management', 'service operation staff', 'day-to-day basis', 'configuration management', 'discrepancies', 'cis', 'cms', 'amendments', 'correct', 'discrepancies', 'authority', 'configuration management', 'service operation components', 'services', 'responsibility', 'cms', 'configuration management', 'operations staff', 'configuration management', 'relationships', 'cis', 'cis', 'disposed', 'cms', 'updates', 'related', 'operational activities', 'operations staff', 'release and deployment management', 'release and deployment management', 'service transition publication', 'aspects', 'process', 'service operation staff', 'day-to-day basis', 'actual implementation actions', 'deployment', 'new releases', 'direction', 'release and deployment management', 'relate', 'service operation components', 'services', 'participation', 'planning stages', 'releases', 'service operation issues', 'physical handling', 'cis', 'dml', 'fulfil', 'operational roles', 'adhering', 'relevant release and deployment management procedures', 'ensure', 'items', 'booked out and back in', '']
number_of_manual_concepts: 95
number_of_automatic_concepts: 90
number_of_fully_correct_manual_concepts: 41
number_of_fully_correct_automatic_concepts: 37
number_of_full_and_partial_correct_manual_concepts: 65
number_of_full_and_partial_correct_automatic_concepts: 73
Without duplicates
All noun phrases
['OPERATIONAL ACTIVITIES OF PROCESSES COVERED IN OTHER LIFECYCLE PHASES', 'Change Management Change Management', 'Service Transition publication', 'aspect', 'Change Management', 'Service Operation staff', 'basis', 'Raising', 'RFCs', 'Service Operation issue', 'CAB', 'CAB/EC meeting', 'Service Operation risk', 'issue', 'view', 'account � Implementing change', 'Change Management', 'Service Operation component', 'service', 'change', 'Change Management', 'Service Operation component', 'service', 'Helping define', 'maintain change model', 'Service Operation component', 'service', 'change schedule', 'Service Operation staff', 'change', 'Change Management process', 'standard', 'change', 'Configuration Management Configuration Management', 'Service Transition publication', 'aspect', 'Configuration Management', 'Service Operation staff', 'basis', 'Informing Configuration Management', 'discrepancy', 'CIs', 'CMS � Making', 'amendment', 'discrepancy', 'authority', 'Configuration Management', 'Service Operation component', 'service', 'Responsibility', 'CMS', 'Configuration Management', 'case Operations staff', 'direction', 'Configuration Management', 'relationship', 'CIs', 'mark CIs', '�disposed�', 'CMS', 'update', 'activity', 'Operations staff', 'Release', 'Deployment Management Release', 'Deployment Management', 'Service Transition publication', 'aspect', 'process', 'Service Operation staff', 'basis', 'Actual implementation action', 'deployment', 'release', 'direction', 'Release', 'Deployment Management', 'Service Operation component', 'service', 'Participation', 'planning stage', 'release', 'Service Operation issue', 'handling', 'CIs', 'DML', 'role', 'Release', 'Deployment Management procedure', 'item']
manual concepts
['operational activities', 'processes', 'lifecycle phases', 'change management', 'service transition publication', 'service operation staff', 'day-to-day-basis', 'rfcs', 'service operation issues', 'cab', 'cab meetings', 'ec meetings', 'service operation risks', 'issues', 'views', 'changes', 'service operation component', 'services', 'change models', 'service operation components', 'change schedules', 'change management process', 'standard', 'operational-type changes', 'configuration management', 'aspects', 'day-to-day basis', 'discrepancies', 'cis', 'cms', 'amendments', 'correct', 'authority', 'responsibility', 'operations staff', 'relationships', 'disposed', 'updates', 'related', 'release and deployment management', 'process', 'actual implementation actions', 'deployment', 'new releases', 'direction', 'relate', 'participation', 'planning stages', 'releases', 'physical handling', 'dml', 'fulfil', 'operational roles', 'adhering', 'relevant release and deployment management procedures', 'ensure', 'items', 'booked out and back in', '']
number_of_manual_concepts: 59
number_of_automatic_concepts: 55
number_of_fully_correct_manual_concepts: 19
number_of_fully_correct_automatic_concepts: 19
number_of_full_and_partial_correct_manual_concepts: 36
number_of_full_and_partial_correct_automatic_concepts: 41
|
_docs/nbs/wikirecs-03-data-transformation.ipynb | ###Markdown
--- Setup
###Code
!pip install -q git+https://github.com/sparsh-ai/recochef.git
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from recochef.datasets.wikirecs import WikiRecs
from utils import *
from wiki_pull import *
%matplotlib inline
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Data loading
###Code
wikidata = WikiRecs()
df = wikidata.load_interactions()
df.info()
df.head()
###Output
_____no_output_____
###Markdown
EDA
###Code
# Look at the distribution of edit counts
edit_counts = df.groupby('USERID').USERID.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# Most edits by user
df.groupby(['USERID','USERNAME']).USERID.count().sort_values(ascending=False)
# Find the elbow in number of edits
plt.plot(df.groupby(['USERID','USERNAME']).USERID.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# What are the most popular pages (edited by the most users)
page_popularity = df.drop_duplicates(subset=['TITLE','USERNAME']).groupby('TITLE').count().USERNAME.sort_values()
page_popularity.iloc[-1000:].iloc[::-1]
df.sample().USERNAME
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
oneuser = get_edit_history(user="SanAnMan",
latest_timestamp="2021-07-08T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
oneuser
###Output
_____no_output_____
###Markdown
Data cleaning Remove consecutive edits and summarize runs
###Code
%%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['USERID']],x[c['ITEMID']])
first_and_last = lambda run: [run[0][c['USERID']],
run[0][c['USERNAME']],
run[0][c['ITEMID']],
run[0][c['TITLE']],
run[-1][c['TIMESTAMP']],
run[0][c['TIMESTAMP']],
sum([abs(r[c['SIZEDIFF']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['USERID', 'USER', 'ITEMID', 'TITLE', 'FIRST_TIMESTAMP', 'LAST_TIMESTAMP','SUM_SIZEDIFF','CONSECUTIVE_EDITS'])
clean_df = remove_consecutive_edits(df)
###Output
CPU times: user 58.4 s, sys: 2.15 s, total: 1min
Wall time: 1min
###Markdown
Remove top N most popular pages
###Code
# Get the top most popular pages
TOPN = 20
popularpages = df.drop_duplicates(subset=['TITLE','ITEMID','USERID']).groupby(['TITLE','ITEMID']).count().USERNAME.sort_values()[-TOPN:]
popularpages
# Remove those popular pages
before_count = len(df)
popular_pageids = popularpages.index.get_level_values(level='ITEMID').values
is_popular_page_edit = clean_df.ITEMID.isin(popular_pageids)
clean_df = clean_df.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_df)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
print("Number of unique page ids: {}".format(len(clean_df.ITEMID.unique())))
###Output
Number of unique page ids: 2461524
###Markdown
Remove users with too many or too few edits
###Code
MIN_EDITS = 5
MAX_EDITS = 10000
# Get user edit counts
all_user_edit_counts = clean_df.groupby(['USERID','USER']).USERID.count()
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_df = clean_df.loc[clean_df.USERID.isin(userids_to_keep)]
clean_df = clean_df.reset_index(drop=True)
print("Length after removing users: {}".format(len(clean_df)))
###Output
Length after removing users: 5907255
###Markdown
Build lookup tables
###Code
# Page id to title and back
lookup = clean_df.drop_duplicates(subset=['ITEMID']).loc[:,['ITEMID','TITLE']]
p2t = dict(zip(lookup.ITEMID, lookup.TITLE))
t2p = dict(zip(lookup.TITLE, lookup.ITEMID))
# User id to name and back
lookup = clean_df.drop_duplicates(subset=['USERID']).loc[:,['USERID','USER']]
u2n = dict(zip(lookup.USERID, lookup.USER))
n2u = dict(zip(lookup.USER, lookup.USERID))
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_df.ITEMID.unique())
userids = np.sort(clean_df.USERID.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
###Output
_____no_output_____
###Markdown
Build test and training set
###Code
# Make a test set from the most recent edit by each user
histories_test = clean_df.groupby(['USERID','USER'],as_index=False).first()
# Subtract it from the rest to make the training set
histories_train = dataframe_set_subtract(clean_df, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['USERID','USER'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
print("Number of pages in training set: {}".format(len(histories_train.ITEMID.unique())))
print("Number of users in training set: {}".format(len(histories_train.USERID.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['TITLE','USER']).groupby('TITLE').count().USER > 1)))
resurface_userids, discovery_userids = get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
###Output
7760 out of 28090 userids are resurfaced (27.6%)
20330 out of 28090 userids are discovered (72.4%)
###Markdown
Build matrix for implicit collaborative filtering
###Code
# Get the user/page edit counts
for_implicit = histories_train.groupby(["USERID","ITEMID"]).count().FIRST_TIMESTAMP.reset_index().rename(columns={'FIRST_TIMESTAMP':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
row = np.array([p2i[p] for p in for_implicit.ITEMID.values])
col = np.array([u2i[u] for u in for_implicit.USERID.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
###Output
_____no_output_____
###Markdown
Saving artifacts
###Code
save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), 'lookup_tables.pickle')
save_pickle((userids, pageids), 'users_and_pages.pickle')
save_pickle((resurface_userids, discovery_userids), 'resurface_discovery_users.pickle')
save_pickle(implicit_matrix,'implicit_matrix.pickle')
###Output
_____no_output_____ |
notebooks/kubeflow_pipelines/pipelines/labs/kfp_pipeline_vertex.ipynb | ###Markdown
Continuous Training with Kubeflow Pipeline and Vertex AI **Learning Objectives:**1. Learn how to use KF pre-built components1. Learn how to use KF lightweight python components1. Learn how to build a KF pipeline with these components1. Learn how to compile, upload, and run a KF pipelineIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates the **Vertex AI** services to train, tune, and deploy a **scikit-learn** model. Setup
###Code
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
###Output
env: PATH=/home/jupyter/.local/bin:/usr/local/cuda/bin:/opt/conda/bin:/opt/conda/condabin:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
###Markdown
Understanding the pipeline design The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `pipeline_vertex/pipeline.py` file that we will generate below.The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables. Build the trainer image The training step in the pipeline will require a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.
###Code
!cat trainer_image_vertex/Dockerfile
###Output
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
###Markdown
Let's now build and push this trainer container to the container registry:
###Code
IMAGE_NAME = "trainer_image_covertype_vertex"
TAG = "latest"
TRAINING_CONTAINER_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
###Output
Creating temporary tarball archive of 2 file(s) totalling 3.6 KiB before compression.
Uploading tarball of [trainer_image_vertex] to [gs://qwiklabs-gcp-01-37ab11ee03f8_cloudbuild/source/1644591903.562774-7814ce5f10094dca804cd67e9488e414.tgz]
Created [https://cloudbuild.googleapis.com/v1/projects/qwiklabs-gcp-01-37ab11ee03f8/locations/global/builds/81a3118d-3993-45a8-97e8-01443fb525ea].
Logs are available at [https://console.cloud.google.com/cloud-build/builds/81a3118d-3993-45a8-97e8-01443fb525ea?project=562035846305].
----------------------------- REMOTE BUILD OUTPUT ------------------------------
starting build "81a3118d-3993-45a8-97e8-01443fb525ea"
FETCHSOURCE
Fetching storage object: gs://qwiklabs-gcp-01-37ab11ee03f8_cloudbuild/source/1644591903.562774-7814ce5f10094dca804cd67e9488e414.tgz#1644591903735106
Copying gs://qwiklabs-gcp-01-37ab11ee03f8_cloudbuild/source/1644591903.562774-7814ce5f10094dca804cd67e9488e414.tgz#1644591903735106...
/ [1 files][ 1.7 KiB/ 1.7 KiB]
Operation completed over 1 objects/1.7 KiB.
BUILD
Already have image (with digest): gcr.io/cloud-builders/docker
Sending build context to Docker daemon 6.144kB
Step 1/5 : FROM gcr.io/deeplearning-platform-release/base-cpu
latest: Pulling from deeplearning-platform-release/base-cpu
ea362f368469: Already exists
eac27809cab6: Pulling fs layer
036adb2e026f: Pulling fs layer
02a952c9f89d: Pulling fs layer
4f4fb700ef54: Pulling fs layer
0ae3f8214e8b: Pulling fs layer
ca41810bd5e2: Pulling fs layer
b72e35350998: Pulling fs layer
c95a831d214e: Pulling fs layer
dd21cbaee501: Pulling fs layer
34c0d5f571ee: Pulling fs layer
cffd6b808cdb: Pulling fs layer
0c9fca2a66fe: Pulling fs layer
e7e70d8d1c2f: Pulling fs layer
13bd35af8cff: Pulling fs layer
549a6d6636b4: Pulling fs layer
812c2650a52b: Pulling fs layer
171e3814b2ec: Pulling fs layer
4f4fb700ef54: Waiting
0ae3f8214e8b: Waiting
ca41810bd5e2: Waiting
b72e35350998: Waiting
c95a831d214e: Waiting
dd21cbaee501: Waiting
34c0d5f571ee: Waiting
cffd6b808cdb: Waiting
0c9fca2a66fe: Waiting
e7e70d8d1c2f: Waiting
13bd35af8cff: Waiting
549a6d6636b4: Waiting
812c2650a52b: Waiting
171e3814b2ec: Waiting
eac27809cab6: Verifying Checksum
eac27809cab6: Download complete
eac27809cab6: Pull complete
4f4fb700ef54: Download complete
0ae3f8214e8b: Verifying Checksum
0ae3f8214e8b: Download complete
02a952c9f89d: Verifying Checksum
02a952c9f89d: Download complete
b72e35350998: Verifying Checksum
b72e35350998: Download complete
ca41810bd5e2: Verifying Checksum
ca41810bd5e2: Download complete
c95a831d214e: Verifying Checksum
c95a831d214e: Download complete
dd21cbaee501: Verifying Checksum
dd21cbaee501: Download complete
34c0d5f571ee: Verifying Checksum
34c0d5f571ee: Download complete
0c9fca2a66fe: Verifying Checksum
0c9fca2a66fe: Download complete
cffd6b808cdb: Verifying Checksum
cffd6b808cdb: Download complete
e7e70d8d1c2f: Verifying Checksum
e7e70d8d1c2f: Download complete
13bd35af8cff: Verifying Checksum
13bd35af8cff: Download complete
549a6d6636b4: Verifying Checksum
549a6d6636b4: Download complete
036adb2e026f: Download complete
171e3814b2ec: Verifying Checksum
171e3814b2ec: Download complete
812c2650a52b: Verifying Checksum
812c2650a52b: Download complete
036adb2e026f: Pull complete
02a952c9f89d: Pull complete
4f4fb700ef54: Pull complete
0ae3f8214e8b: Pull complete
ca41810bd5e2: Pull complete
b72e35350998: Pull complete
c95a831d214e: Pull complete
dd21cbaee501: Pull complete
34c0d5f571ee: Pull complete
cffd6b808cdb: Pull complete
0c9fca2a66fe: Pull complete
e7e70d8d1c2f: Pull complete
13bd35af8cff: Pull complete
549a6d6636b4: Pull complete
812c2650a52b: Pull complete
171e3814b2ec: Pull complete
Digest: sha256:0ff776d12620e1526f999481051595075692b977a0ce7bbf573208eed5867823
Status: Downloaded newer image for gcr.io/deeplearning-platform-release/base-cpu:latest
---> 70d8dcc15a81
Step 2/5 : RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
---> Running in 71888476569f
Collecting fire
Downloading fire-0.4.0.tar.gz (87 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 87.7/87.7 KB 5.2 MB/s eta 0:00:00
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Collecting cloudml-hypertune
Downloading cloudml-hypertune-0.1.0.dev6.tar.gz (3.2 kB)
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Collecting scikit-learn==0.20.4
Downloading scikit_learn-0.20.4-cp37-cp37m-manylinux1_x86_64.whl (5.4 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.4/5.4 MB 41.8 MB/s eta 0:00:00
Collecting pandas==0.24.2
Downloading pandas-0.24.2-cp37-cp37m-manylinux1_x86_64.whl (10.1 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.1/10.1 MB 51.7 MB/s eta 0:00:00
Requirement already satisfied: scipy>=0.13.3 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.7.3)
Requirement already satisfied: numpy>=1.8.2 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.19.5)
Requirement already satisfied: pytz>=2011k in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2021.3)
Requirement already satisfied: python-dateutil>=2.5.0 in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2.8.2)
Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from fire) (1.16.0)
Collecting termcolor
Downloading termcolor-1.1.0.tar.gz (3.9 kB)
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Building wheels for collected packages: fire, cloudml-hypertune, termcolor
Building wheel for fire (setup.py): started
Building wheel for fire (setup.py): finished with status 'done'
Created wheel for fire: filename=fire-0.4.0-py2.py3-none-any.whl size=115942 sha256=f2d5fc06ea3e8e859c5fc90467fc440f02b3b68b6fc2327a7ebaeb844a20f8f9
Stored in directory: /root/.cache/pip/wheels/8a/67/fb/2e8a12fa16661b9d5af1f654bd199366799740a85c64981226
Building wheel for cloudml-hypertune (setup.py): started
Building wheel for cloudml-hypertune (setup.py): finished with status 'done'
Created wheel for cloudml-hypertune: filename=cloudml_hypertune-0.1.0.dev6-py2.py3-none-any.whl size=3987 sha256=426b9cc2d96f817f6be4af8f397ffd7fd0e2af4ff379d539e78aa803334d4ed7
Stored in directory: /root/.cache/pip/wheels/a7/ff/87/e7bed0c2741fe219b3d6da67c2431d7f7fedb183032e00f81e
Building wheel for termcolor (setup.py): started
Building wheel for termcolor (setup.py): finished with status 'done'
Created wheel for termcolor: filename=termcolor-1.1.0-py3-none-any.whl size=4848 sha256=a3ce926968efe4f0ccd27f47b85c424fe3ea90766d886d00a323c96c0a2e1e7e
Stored in directory: /root/.cache/pip/wheels/3f/e3/ec/8a8336ff196023622fbcb36de0c5a5c218cbb24111d1d4c7f2
Successfully built fire cloudml-hypertune termcolor
Installing collected packages: termcolor, cloudml-hypertune, fire, scikit-learn, pandas
Attempting uninstall: scikit-learn
Found existing installation: scikit-learn 1.0.2
Uninstalling scikit-learn-1.0.2:
Successfully uninstalled scikit-learn-1.0.2
Attempting uninstall: pandas
Found existing installation: pandas 1.3.5
Uninstalling pandas-1.3.5:
Successfully uninstalled pandas-1.3.5
[91mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
visions 0.7.4 requires pandas>=0.25.3, but you have pandas 0.24.2 which is incompatible.
statsmodels 0.13.1 requires pandas>=0.25, but you have pandas 0.24.2 which is incompatible.
phik 0.12.0 requires pandas>=0.25.1, but you have pandas 0.24.2 which is incompatible.
pandas-profiling 3.1.0 requires pandas!=1.0.0,!=1.0.1,!=1.0.2,!=1.1.0,>=0.25.3, but you have pandas 0.24.2 which is incompatible.
[0mSuccessfully installed cloudml-hypertune-0.1.0.dev6 fire-0.4.0 pandas-0.24.2 scikit-learn-0.20.4 termcolor-1.1.0
[91mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
[0mRemoving intermediate container 71888476569f
---> a24394adcb08
Step 3/5 : WORKDIR /app
---> Running in 9e89a25a5010
Removing intermediate container 9e89a25a5010
---> ab44a4002051
Step 4/5 : COPY train.py .
---> c00bdb80419e
Step 5/5 : ENTRYPOINT ["python", "train.py"]
---> Running in c1685daf1cbc
Removing intermediate container c1685daf1cbc
---> c88c6e08705b
Successfully built c88c6e08705b
Successfully tagged gcr.io/qwiklabs-gcp-01-37ab11ee03f8/trainer_image_covertype_vertex:latest
PUSH
Pushing gcr.io/qwiklabs-gcp-01-37ab11ee03f8/trainer_image_covertype_vertex:latest
The push refers to repository [gcr.io/qwiklabs-gcp-01-37ab11ee03f8/trainer_image_covertype_vertex]
18432178705f: Preparing
180f3e800303: Preparing
05b783271ab3: Preparing
afdacae73a44: Preparing
beceb4a3223c: Preparing
b1e73422ceb7: Preparing
5b99d0f1aa52: Preparing
dbd6221f1b98: Preparing
4402691a71a1: Preparing
883e47620bc6: Preparing
f5e5c749d02e: Preparing
52ef15a58fce: Preparing
b94b9d90a09e: Preparing
f2c55a6fb80d: Preparing
1b7bf230df94: Preparing
0e19a08a8060: Preparing
5f70bf18a086: Preparing
36a8dea33eff: Preparing
dfe5bb6eff86: Preparing
57b271862993: Preparing
0eba131dffd0: Preparing
b1e73422ceb7: Waiting
5b99d0f1aa52: Waiting
dbd6221f1b98: Waiting
4402691a71a1: Waiting
883e47620bc6: Waiting
f5e5c749d02e: Waiting
52ef15a58fce: Waiting
b94b9d90a09e: Waiting
f2c55a6fb80d: Waiting
1b7bf230df94: Waiting
0e19a08a8060: Waiting
5f70bf18a086: Waiting
36a8dea33eff: Waiting
dfe5bb6eff86: Waiting
57b271862993: Waiting
0eba131dffd0: Waiting
afdacae73a44: Layer already exists
beceb4a3223c: Layer already exists
b1e73422ceb7: Layer already exists
5b99d0f1aa52: Layer already exists
dbd6221f1b98: Layer already exists
4402691a71a1: Layer already exists
883e47620bc6: Layer already exists
f5e5c749d02e: Layer already exists
52ef15a58fce: Layer already exists
b94b9d90a09e: Layer already exists
f2c55a6fb80d: Layer already exists
1b7bf230df94: Layer already exists
0e19a08a8060: Layer already exists
5f70bf18a086: Layer already exists
36a8dea33eff: Layer already exists
57b271862993: Layer already exists
dfe5bb6eff86: Layer already exists
0eba131dffd0: Layer already exists
18432178705f: Pushed
180f3e800303: Pushed
05b783271ab3: Pushed
latest: digest: sha256:0aa609748495b25867e408f42de3d6ec6b33f743890c55198444f1c39a690aa9 size: 4707
DONE
--------------------------------------------------------------------------------
ID CREATE_TIME DURATION SOURCE IMAGES STATUS
81a3118d-3993-45a8-97e8-01443fb525ea 2022-02-11T15:05:03+00:00 2M4S gs://qwiklabs-gcp-01-37ab11ee03f8_cloudbuild/source/1644591903.562774-7814ce5f10094dca804cd67e9488e414.tgz gcr.io/qwiklabs-gcp-01-37ab11ee03f8/trainer_image_covertype_vertex (+1 more) SUCCESS
###Markdown
To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline:
###Code
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
###Output
_____no_output_____
###Markdown
**Note:** If you change the version of the training ml framework you'll have to supply a serving container with matchin version (see [pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)). Building and deploying the pipeline Let us write the pipeline to disk: ExerciseImplement1. the `train_and_deploy` function in the `pipeline_vertex/training_lightweight_component.py`1. the `tune_hyperparameters` function in the `pipeline_vertex/tuning_lightweight_component.py`and complete the TODOs in the `pipeline.py` file below:
###Code
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = int(os.getenv("MAX_TRIAL_COUNT", "5"))
PARALLEL_TRIAL_COUNT = int(os.getenv("PARALLEL_TRIAL_COUNT", "5"))
THRESHOLD = float(os.getenv("THRESHOLD", "0.6"))
tune_hyperparameters_component = create_component_from_func_v2(
tune_hyperparameters,
base_image="python:3.8",
output_component_file="covertype_kfp_tune_hyperparameters.yaml",
packages_to_install=["google-cloud-aiplatform"],
)
train_and_deploy_component = create_component_from_func_v2(
train_and_deploy,
base_image="python:3.8",
output_component_file="covertype_kfp_train_and_deploy.yaml",
packages_to_install=["google-cloud-aiplatform"],
)
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = tune_hyperparameters_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
)
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = ( # pylint: disable=unused-variable
train_and_deploy_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
serving_container_uri=serving_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
alpha=tuning_op.outputs["best_alpha"],
max_iter=tuning_op.outputs["best_max_iter"],
)
)
###Output
Overwriting ./pipeline_vertex/pipeline.py
###Markdown
Compile the pipeline Let stat by defining the environment variables that will be passed to the pipeline compiler:
###Code
ARTIFACT_STORE = f"gs://{PROJECT_ID}-kfp-artifact-store"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
###Output
env: PIPELINE_ROOT=gs://qwiklabs-gcp-01-37ab11ee03f8-kfp-artifact-store/pipeline
env: PROJECT_ID=qwiklabs-gcp-01-37ab11ee03f8
env: REGION=us-central1
env: SERVING_CONTAINER_IMAGE_URI=us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest
env: TRAINING_CONTAINER_IMAGE_URI=gcr.io/qwiklabs-gcp-01-37ab11ee03f8/trainer_image_covertype_vertex:latest
env: TRAINING_FILE_PATH=gs://qwiklabs-gcp-01-37ab11ee03f8-kfp-artifact-store/data/training/dataset.csv
env: VALIDATION_FILE_PATH=gs://qwiklabs-gcp-01-37ab11ee03f8-kfp-artifact-store/data/validation/dataset.csv
###Markdown
Let us make sure that the `ARTIFACT_STORE` has been created, and let us create it if not:
###Code
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
###Output
gs://qwiklabs-gcp-01-37ab11ee03f8-kfp-artifact-store/
###Markdown
**Note:** In case the artifact store was not created and properly set before hand, you may needto run in **CloudShell** the following command to allow Vertex AI to access it:```PROJECT_ID=$(gcloud config get-value project)PROJECT_NUMBER=$(gcloud projects list --filter="name=$PROJECT_ID" --format="value(PROJECT_NUMBER)")gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:[email protected]" \ --role="roles/storage.objectAdmin"``` Use the CLI compiler to compile the pipeline We compile the pipeline from the Python file we generated into a JSON description using the following command:
###Code
PIPELINE_JSON = "covertype_kfp_pipeline.json"
###Output
_____no_output_____
###Markdown
ExerciseCompile the `pipeline_vertex/pipeline.py` with the `dsl-compile-v2` command line:
###Code
!dsl-compile-v2 --py pipeline_vertex/pipeline.py --output $PIPELINE_JSON
###Output
/opt/conda/lib/python3.7/site-packages/kfp/components/_python_op.py:987: FutureWarning: create_component_from_func_v2() has been deprecated and will be removed in KFP v1.9. Please use @kfp.v2.dsl.component() instead.
category=FutureWarning,
/opt/conda/lib/python3.7/site-packages/kfp/v2/compiler/compiler.py:1266: FutureWarning: APIs imported from the v1 namespace (e.g. kfp.dsl, kfp.components, etc) will not be supported by the v2 compiler since v2.0.0
category=FutureWarning,
###Markdown
**Note:** You can also use the Python SDK to compile the pipeline from its python function```pythoncompiler.Compiler().compile( pipeline_func=covertype_train, package_path=PIPELINE_JSON,)``` The result is the pipeline file.
###Code
!head {PIPELINE_JSON}
###Output
{
"pipelineSpec": {
"components": {
"comp-condition-deploy-decision-1": {
"dag": {
"tasks": {
"train-and-deploy": {
"cachingOptions": {
"enableCache": true
},
###Markdown
Deploy the pipeline package ExerciseUpload and run the pipeline to Vertex AI using `aiplatform.PipelineJob`:
###Code
aiplatform.init(project=PROJECT_ID, location=REGION)
pipeline = aiplatform.PipelineJob(
display_name="covertype_kfp_pipeline",
template_path=PIPELINE_JSON,
enable_caching=False,
)
pipeline.run()
###Output
INFO:google.cloud.aiplatform.pipeline_jobs:Creating PipelineJob
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob created. Resource name: projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331
INFO:google.cloud.aiplatform.pipeline_jobs:To use this PipelineJob in another session:
INFO:google.cloud.aiplatform.pipeline_jobs:pipeline_job = aiplatform.PipelineJob.get('projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331')
INFO:google.cloud.aiplatform.pipeline_jobs:View Pipeline Job:
https://console.cloud.google.com/vertex-ai/locations/us-central1/pipelines/runs/covertype-kfp-pipeline-20220211151331?project=562035846305
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/562035846305/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211151331 current state:
PipelineState.PIPELINE_STATE_RUNNING
###Markdown
Continuous Training with Kubeflow Pipeline and Vertex AI **Learning Objectives:**1. Learn how to use KF pre-built components1. Learn how to use KF lightweight python components1. Learn how to build a KF pipeline with these components1. Learn how to compile, upload, and run a KF pipelineIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates the **Vertex AI** services to train, tune, and deploy a **scikit-learn** model. Setup
###Code
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
###Output
env: PATH=/home/jupyter/.local/bin:/usr/local/cuda/bin:/opt/conda/bin:/opt/conda/condabin:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
###Markdown
Understanding the pipeline design The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `pipeline_vertex/pipeline.py` file that we will generate below.The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables. Build the trainer image The training step in the pipeline will require a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.
###Code
!cat trainer_image_vertex/Dockerfile
###Output
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
###Markdown
Let's now build and push this trainer container to the container registry:
###Code
IMAGE_NAME = "trainer_image_covertype_vertex"
TAG = "latest"
TRAINING_CONTAINER_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
###Output
Creating temporary tarball archive of 2 file(s) totalling 3.6 KiB before compression.
Uploading tarball of [trainer_image_vertex] to [gs://qwiklabs-gcp-02-7680e21dd047_cloudbuild/source/1644591498.482132-cefa4104558440ee8b44d411e7c9275f.tgz]
Created [https://cloudbuild.googleapis.com/v1/projects/qwiklabs-gcp-02-7680e21dd047/locations/global/builds/ced17aac-fd5e-4913-aa52-f6856ab311a3].
Logs are available at [https://console.cloud.google.com/cloud-build/builds/ced17aac-fd5e-4913-aa52-f6856ab311a3?project=517861155353].
----------------------------- REMOTE BUILD OUTPUT ------------------------------
starting build "ced17aac-fd5e-4913-aa52-f6856ab311a3"
FETCHSOURCE
Fetching storage object: gs://qwiklabs-gcp-02-7680e21dd047_cloudbuild/source/1644591498.482132-cefa4104558440ee8b44d411e7c9275f.tgz#1644591498717050
Copying gs://qwiklabs-gcp-02-7680e21dd047_cloudbuild/source/1644591498.482132-cefa4104558440ee8b44d411e7c9275f.tgz#1644591498717050...
/ [1 files][ 1.7 KiB/ 1.7 KiB]
Operation completed over 1 objects/1.7 KiB.
BUILD
Already have image (with digest): gcr.io/cloud-builders/docker
Sending build context to Docker daemon 6.144kB
Step 1/5 : FROM gcr.io/deeplearning-platform-release/base-cpu
latest: Pulling from deeplearning-platform-release/base-cpu
ea362f368469: Pulling fs layer
eac27809cab6: Pulling fs layer
036adb2e026f: Pulling fs layer
02a952c9f89d: Pulling fs layer
4f4fb700ef54: Pulling fs layer
0ae3f8214e8b: Pulling fs layer
ca41810bd5e2: Pulling fs layer
b72e35350998: Pulling fs layer
c95a831d214e: Pulling fs layer
dd21cbaee501: Pulling fs layer
34c0d5f571ee: Pulling fs layer
cffd6b808cdb: Pulling fs layer
0c9fca2a66fe: Pulling fs layer
e7e70d8d1c2f: Pulling fs layer
13bd35af8cff: Pulling fs layer
549a6d6636b4: Pulling fs layer
812c2650a52b: Pulling fs layer
171e3814b2ec: Pulling fs layer
02a952c9f89d: Waiting
4f4fb700ef54: Waiting
0ae3f8214e8b: Waiting
ca41810bd5e2: Waiting
b72e35350998: Waiting
c95a831d214e: Waiting
dd21cbaee501: Waiting
34c0d5f571ee: Waiting
cffd6b808cdb: Waiting
0c9fca2a66fe: Waiting
e7e70d8d1c2f: Waiting
13bd35af8cff: Waiting
549a6d6636b4: Waiting
812c2650a52b: Waiting
171e3814b2ec: Waiting
eac27809cab6: Verifying Checksum
eac27809cab6: Download complete
ea362f368469: Verifying Checksum
ea362f368469: Download complete
4f4fb700ef54: Verifying Checksum
4f4fb700ef54: Download complete
0ae3f8214e8b: Verifying Checksum
0ae3f8214e8b: Download complete
02a952c9f89d: Verifying Checksum
02a952c9f89d: Download complete
b72e35350998: Verifying Checksum
b72e35350998: Download complete
ca41810bd5e2: Verifying Checksum
ca41810bd5e2: Download complete
c95a831d214e: Verifying Checksum
c95a831d214e: Download complete
dd21cbaee501: Verifying Checksum
dd21cbaee501: Download complete
34c0d5f571ee: Verifying Checksum
34c0d5f571ee: Download complete
cffd6b808cdb: Verifying Checksum
cffd6b808cdb: Download complete
0c9fca2a66fe: Verifying Checksum
0c9fca2a66fe: Download complete
e7e70d8d1c2f: Verifying Checksum
e7e70d8d1c2f: Download complete
13bd35af8cff: Verifying Checksum
13bd35af8cff: Download complete
549a6d6636b4: Verifying Checksum
549a6d6636b4: Download complete
171e3814b2ec: Verifying Checksum
171e3814b2ec: Download complete
036adb2e026f: Verifying Checksum
036adb2e026f: Download complete
ea362f368469: Pull complete
eac27809cab6: Pull complete
812c2650a52b: Verifying Checksum
812c2650a52b: Download complete
036adb2e026f: Pull complete
02a952c9f89d: Pull complete
4f4fb700ef54: Pull complete
0ae3f8214e8b: Pull complete
ca41810bd5e2: Pull complete
b72e35350998: Pull complete
c95a831d214e: Pull complete
dd21cbaee501: Pull complete
34c0d5f571ee: Pull complete
cffd6b808cdb: Pull complete
0c9fca2a66fe: Pull complete
e7e70d8d1c2f: Pull complete
13bd35af8cff: Pull complete
549a6d6636b4: Pull complete
812c2650a52b: Pull complete
171e3814b2ec: Pull complete
Digest: sha256:0ff776d12620e1526f999481051595075692b977a0ce7bbf573208eed5867823
Status: Downloaded newer image for gcr.io/deeplearning-platform-release/base-cpu:latest
---> 70d8dcc15a81
Step 2/5 : RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
---> Running in 23d5e1a764cd
Collecting fire
Downloading fire-0.4.0.tar.gz (87 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 87.7/87.7 KB 12.4 MB/s eta 0:00:00
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Collecting cloudml-hypertune
Downloading cloudml-hypertune-0.1.0.dev6.tar.gz (3.2 kB)
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Collecting scikit-learn==0.20.4
Downloading scikit_learn-0.20.4-cp37-cp37m-manylinux1_x86_64.whl (5.4 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.4/5.4 MB 47.7 MB/s eta 0:00:00
Collecting pandas==0.24.2
Downloading pandas-0.24.2-cp37-cp37m-manylinux1_x86_64.whl (10.1 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.1/10.1 MB 47.8 MB/s eta 0:00:00
Requirement already satisfied: numpy>=1.8.2 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.19.5)
Requirement already satisfied: scipy>=0.13.3 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.7.3)
Requirement already satisfied: python-dateutil>=2.5.0 in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2.8.2)
Requirement already satisfied: pytz>=2011k in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2021.3)
Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from fire) (1.16.0)
Collecting termcolor
Downloading termcolor-1.1.0.tar.gz (3.9 kB)
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Building wheels for collected packages: fire, cloudml-hypertune, termcolor
Building wheel for fire (setup.py): started
Building wheel for fire (setup.py): finished with status 'done'
Created wheel for fire: filename=fire-0.4.0-py2.py3-none-any.whl size=115942 sha256=e76be26e165801ea11819220f4acce17f52ec47b4ad7b9f8e75900fe3edba0d4
Stored in directory: /root/.cache/pip/wheels/8a/67/fb/2e8a12fa16661b9d5af1f654bd199366799740a85c64981226
Building wheel for cloudml-hypertune (setup.py): started
Building wheel for cloudml-hypertune (setup.py): finished with status 'done'
Created wheel for cloudml-hypertune: filename=cloudml_hypertune-0.1.0.dev6-py2.py3-none-any.whl size=3987 sha256=abb748cf5381e4ef52f2e8301af77c60c998b262dc3b6dd8e8786b27a7588ed2
Stored in directory: /root/.cache/pip/wheels/a7/ff/87/e7bed0c2741fe219b3d6da67c2431d7f7fedb183032e00f81e
Building wheel for termcolor (setup.py): started
Building wheel for termcolor (setup.py): finished with status 'done'
Created wheel for termcolor: filename=termcolor-1.1.0-py3-none-any.whl size=4848 sha256=23bbda49325ccf6924bfc9b7f0e8c087cdfa81ee08bd34d15de985d193c85381
Stored in directory: /root/.cache/pip/wheels/3f/e3/ec/8a8336ff196023622fbcb36de0c5a5c218cbb24111d1d4c7f2
Successfully built fire cloudml-hypertune termcolor
Installing collected packages: termcolor, cloudml-hypertune, fire, scikit-learn, pandas
Attempting uninstall: scikit-learn
Found existing installation: scikit-learn 1.0.2
Uninstalling scikit-learn-1.0.2:
Successfully uninstalled scikit-learn-1.0.2
Attempting uninstall: pandas
Found existing installation: pandas 1.3.5
Uninstalling pandas-1.3.5:
Successfully uninstalled pandas-1.3.5
[91mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
visions 0.7.4 requires pandas>=0.25.3, but you have pandas 0.24.2 which is incompatible.
statsmodels 0.13.1 requires pandas>=0.25, but you have pandas 0.24.2 which is incompatible.
phik 0.12.0 requires pandas>=0.25.1, but you have pandas 0.24.2 which is incompatible.
pandas-profiling 3.1.0 requires pandas!=1.0.0,!=1.0.1,!=1.0.2,!=1.1.0,>=0.25.3, but you have pandas 0.24.2 which is incompatible.
[0mSuccessfully installed cloudml-hypertune-0.1.0.dev6 fire-0.4.0 pandas-0.24.2 scikit-learn-0.20.4 termcolor-1.1.0
[91mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
[0mRemoving intermediate container 23d5e1a764cd
---> 5b040af20728
Step 3/5 : WORKDIR /app
---> Running in a98b42f72ea9
Removing intermediate container a98b42f72ea9
---> f447aa99b80f
Step 4/5 : COPY train.py .
---> f9a2a2d2db1f
Step 5/5 : ENTRYPOINT ["python", "train.py"]
---> Running in b0a8b2e71d9b
Removing intermediate container b0a8b2e71d9b
---> b89996e941b1
Successfully built b89996e941b1
Successfully tagged gcr.io/qwiklabs-gcp-02-7680e21dd047/trainer_image_covertype_vertex:latest
PUSH
Pushing gcr.io/qwiklabs-gcp-02-7680e21dd047/trainer_image_covertype_vertex:latest
The push refers to repository [gcr.io/qwiklabs-gcp-02-7680e21dd047/trainer_image_covertype_vertex]
1e862f6af6fc: Preparing
4857e62f9068: Preparing
63c1dd6939ee: Preparing
afdacae73a44: Preparing
beceb4a3223c: Preparing
b1e73422ceb7: Preparing
5b99d0f1aa52: Preparing
dbd6221f1b98: Preparing
4402691a71a1: Preparing
883e47620bc6: Preparing
f5e5c749d02e: Preparing
52ef15a58fce: Preparing
b94b9d90a09e: Preparing
f2c55a6fb80d: Preparing
1b7bf230df94: Preparing
0e19a08a8060: Preparing
5f70bf18a086: Preparing
36a8dea33eff: Preparing
dfe5bb6eff86: Preparing
57b271862993: Preparing
0eba131dffd0: Preparing
b1e73422ceb7: Waiting
5b99d0f1aa52: Waiting
dbd6221f1b98: Waiting
4402691a71a1: Waiting
883e47620bc6: Waiting
f5e5c749d02e: Waiting
52ef15a58fce: Waiting
b94b9d90a09e: Waiting
f2c55a6fb80d: Waiting
1b7bf230df94: Waiting
0e19a08a8060: Waiting
5f70bf18a086: Waiting
36a8dea33eff: Waiting
dfe5bb6eff86: Waiting
57b271862993: Waiting
0eba131dffd0: Waiting
beceb4a3223c: Layer already exists
afdacae73a44: Layer already exists
5b99d0f1aa52: Layer already exists
b1e73422ceb7: Layer already exists
dbd6221f1b98: Layer already exists
883e47620bc6: Layer already exists
4402691a71a1: Layer already exists
f5e5c749d02e: Layer already exists
52ef15a58fce: Layer already exists
b94b9d90a09e: Layer already exists
f2c55a6fb80d: Layer already exists
1b7bf230df94: Layer already exists
0e19a08a8060: Layer already exists
5f70bf18a086: Layer already exists
36a8dea33eff: Layer already exists
dfe5bb6eff86: Layer already exists
0eba131dffd0: Layer already exists
57b271862993: Layer already exists
4857e62f9068: Pushed
1e862f6af6fc: Pushed
63c1dd6939ee: Pushed
latest: digest: sha256:2abc5ffba53f2b210245499d63c712057820aa416ba2dcd55890661d12912f8f size: 4707
DONE
--------------------------------------------------------------------------------
ID CREATE_TIME DURATION SOURCE IMAGES STATUS
ced17aac-fd5e-4913-aa52-f6856ab311a3 2022-02-11T14:58:18+00:00 2M5S gs://qwiklabs-gcp-02-7680e21dd047_cloudbuild/source/1644591498.482132-cefa4104558440ee8b44d411e7c9275f.tgz gcr.io/qwiklabs-gcp-02-7680e21dd047/trainer_image_covertype_vertex (+1 more) SUCCESS
###Markdown
To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline:
###Code
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
###Output
_____no_output_____
###Markdown
**Note:** If you change the version of the training ml framework you'll have to supply a serving container with matchin version (see [pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)). Building and deploying the pipeline Let us write the pipeline to disk: ExerciseImplement1. the `train_and_deploy` function in the `pipeline_vertex/training_lightweight_component.py`1. the `tune_hyperparameters` function in the `pipeline_vertex/tuning_lightweight_component.py`and complete the TODOs in the `pipeline.py` file below:
###Code
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = int(os.getenv("MAX_TRIAL_COUNT", "5"))
PARALLEL_TRIAL_COUNT = int(os.getenv("PARALLEL_TRIAL_COUNT", "5"))
THRESHOLD = float(os.getenv("THRESHOLD", "0.6"))
tune_hyperparameters_component = create_component_from_func_v2(
tune_hyperparameters,
base_image="python:3.8",
output_component_file="covertype_kfp_tune_hyperparameters.yaml",
packages_to_install=["google-cloud-aiplatform"],
)
train_and_deploy_component = create_component_from_func_v2(
train_and_deploy,
base_image="python:3.8",
output_component_file="covertype_kfp_train_and_deploy.yaml",
packages_to_install=["google-cloud-aiplatform"],
)
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = tune_hyperparameters_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
)
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = ( # pylint: disable=unused-variable
train_and_deploy_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
serving_container_uri=serving_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
alpha=tuning_op.outputs["best_alpha"],
max_iter=tuning_op.outputs["best_max_iter"],
)
)
###Output
Overwriting ./pipeline_vertex/pipeline.py
###Markdown
Compile the pipeline Let stat by defining the environment variables that will be passed to the pipeline compiler:
###Code
ARTIFACT_STORE = f"gs://{PROJECT_ID}-kfp-artifact-store"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
###Output
env: PIPELINE_ROOT=gs://qwiklabs-gcp-02-7680e21dd047-kfp-artifact-store/pipeline
env: PROJECT_ID=qwiklabs-gcp-02-7680e21dd047
env: REGION=us-central1
env: SERVING_CONTAINER_IMAGE_URI=us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest
env: TRAINING_CONTAINER_IMAGE_URI=gcr.io/qwiklabs-gcp-02-7680e21dd047/trainer_image_covertype_vertex:latest
env: TRAINING_FILE_PATH=gs://qwiklabs-gcp-02-7680e21dd047-kfp-artifact-store/data/training/dataset.csv
env: VALIDATION_FILE_PATH=gs://qwiklabs-gcp-02-7680e21dd047-kfp-artifact-store/data/validation/dataset.csv
###Markdown
Let us make sure that the `ARTIFACT_STORE` has been created, and let us create it if not:
###Code
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
###Output
gs://qwiklabs-gcp-02-7680e21dd047-kfp-artifact-store/
###Markdown
**Note:** In case the artifact store was not created and properly set before hand, you may needto run in **CloudShell** the following command to allow Vertex AI to access it:```PROJECT_ID=$(gcloud config get-value project)PROJECT_NUMBER=$(gcloud projects list --filter="name=$PROJECT_ID" --format="value(PROJECT_NUMBER)")gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:[email protected]" \ --role="roles/storage.objectAdmin"``` Use the CLI compiler to compile the pipeline We compile the pipeline from the Python file we generated into a JSON description using the following command:
###Code
PIPELINE_JSON = "covertype_kfp_pipeline.json"
###Output
_____no_output_____
###Markdown
ExerciseCompile the `pipeline_vertex/pipeline.py` with the `dsl-compile-v2` command line:
###Code
!dsl-compile-v2 --py pipeline_vertex/pipeline.py --output $PIPELINE_JSON
###Output
/opt/conda/lib/python3.7/site-packages/kfp/components/_python_op.py:987: FutureWarning: create_component_from_func_v2() has been deprecated and will be removed in KFP v1.9. Please use @kfp.v2.dsl.component() instead.
category=FutureWarning,
/opt/conda/lib/python3.7/site-packages/kfp/v2/compiler/compiler.py:1266: FutureWarning: APIs imported from the v1 namespace (e.g. kfp.dsl, kfp.components, etc) will not be supported by the v2 compiler since v2.0.0
category=FutureWarning,
###Markdown
**Note:** You can also use the Python SDK to compile the pipeline from its python function```pythoncompiler.Compiler().compile( pipeline_func=covertype_train, package_path=PIPELINE_JSON,)``` The result is the pipeline file.
###Code
!head {PIPELINE_JSON}
###Output
{
"pipelineSpec": {
"components": {
"comp-condition-deploy-decision-1": {
"dag": {
"tasks": {
"train-and-deploy": {
"cachingOptions": {
"enableCache": true
},
###Markdown
Deploy the pipeline package ExerciseUpload and run the pipeline to Vertex AI using `aiplatform.PipelineJob`:
###Code
aiplatform.init(project=PROJECT_ID, location=REGION)
pipeline = aiplatform.PipelineJob(
display_name="covertype_kfp_pipeline",
template_path=PIPELINE_JSON,
enable_caching=True,
)
pipeline.run(sync=False)
###Output
INFO:google.cloud.aiplatform.pipeline_jobs:Creating PipelineJob
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob created. Resource name: projects/517861155353/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211154023
INFO:google.cloud.aiplatform.pipeline_jobs:To use this PipelineJob in another session:
INFO:google.cloud.aiplatform.pipeline_jobs:pipeline_job = aiplatform.PipelineJob.get('projects/517861155353/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211154023')
INFO:google.cloud.aiplatform.pipeline_jobs:View Pipeline Job:
https://console.cloud.google.com/vertex-ai/locations/us-central1/pipelines/runs/covertype-kfp-pipeline-20220211154023?project=517861155353
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob run completed. Resource name: projects/517861155353/locations/us-central1/pipelineJobs/covertype-kfp-pipeline-20220211154023
###Markdown
Continuous Training with Kubeflow Pipeline and Vertex AI **Learning Objectives:**1. Learn how to use KF pre-built components1. Learn how to use KF lightweight python components1. Learn how to build a KF pipeline with these components1. Learn how to compile, upload, and run a KF pipelineIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates the **Vertex AI** services to train, tune, and deploy a **scikit-learn** model. Setup
###Code
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
###Output
_____no_output_____
###Markdown
Understanding the pipeline design The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `pipeline_vertex/pipeline.py` file that we will generate below.The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables. Build the trainer image The training step in the pipeline will require a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.
###Code
!cat trainer_image_vertex/Dockerfile
###Output
_____no_output_____
###Markdown
Let's now build and push this trainer container to the container registry:
###Code
IMAGE_NAME = "trainer_image_covertype_vertex"
TAG = "latest"
TRAINING_CONTAINER_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
###Output
_____no_output_____
###Markdown
To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline:
###Code
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
###Output
_____no_output_____
###Markdown
**Note:** If you change the version of the training ml framework you'll have to supply a serving container with matchin version (see [pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)). Building and deploying the pipeline Let us write the pipeline to disk: ExerciseImplement1. the `train_and_deploy` function in the `pipeline_vertex/training_lightweight_component.py`1. the `tune_hyperparameters` function in the `pipeline_vertex/tuning_lightweight_component.py`and complete the TODOs in the `pipeline.py` file below:
###Code
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = os.getenv("MAX_TRIAL_COUNT", 5)
PARALLEL_TRIAL_COUNT = os.getenv("PARALLEL_TRIAL_COUNT", 5)
THRESHOLD = os.getenv("THRESHOLD", 0.6)
tune_hyperparameters_component = # TODO
train_and_deploy_component = # TODO
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = # TODO
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = # TODO
###Output
_____no_output_____
###Markdown
Compile the pipeline Let stat by defining the environment variables that will be passed to the pipeline compiler:
###Code
ARTIFACT_STORE = f"gs://{PROJECT_ID}-vertex"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
###Output
_____no_output_____
###Markdown
Let us make sure that the `ARTIFACT_STORE` has been created, and let us create it if not:
###Code
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
###Output
_____no_output_____
###Markdown
**Note:** In case the artifact store was not created and properly set before hand, you may needto run in **CloudShell** the following command to allow Vertex AI to access it:```PROJECT_ID=$(gcloud config get-value project)PROJECT_NUMBER=$(gcloud projects list --filter="name=$PROJECT_ID" --format="value(PROJECT_NUMBER)")gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:[email protected]" \ --role="roles/storage.objectAdmin"``` Use the CLI compiler to compile the pipeline We compile the pipeline from the Python file we generated into a JSON description using the following command:
###Code
PIPELINE_JSON = "covertype_kfp_pipeline.json"
###Output
_____no_output_____
###Markdown
ExerciseCompile the `pipeline_vertex/pipeline.py` with the `dsl-compile-v2` command line:
###Code
# TODO
###Output
_____no_output_____
###Markdown
**Note:** You can also use the Python SDK to compile the pipeline from its python function```pythoncompiler.Compiler().compile( pipeline_func=covertype_train, package_path=PIPELINE_JSON,)``` The result is the pipeline file.
###Code
!head {PIPELINE_JSON}
###Output
_____no_output_____
###Markdown
Deploy the pipeline package ExerciseUpload and run the pipeline to Vertex AI using `aiplatform.PipelineJob`:
###Code
# TODO
###Output
_____no_output_____
###Markdown
Continuous Training with Kubeflow Pipeline and Vertex AI **Learning Objectives:**1. Learn how to use KF pre-built components1. Learn how to use KF lightweight python components1. Learn how to build a KF pipeline with these components1. Learn how to compile, upload, and run a KF pipelineIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates the **Vertex AI** services to train, tune, and deploy a **scikit-learn** model. Setup
###Code
from google.cloud import aiplatform
REGION = 'us-central1'
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
###Output
env: PATH=/home/jupyter/.local/bin:/usr/local/cuda/bin:/opt/conda/bin:/opt/conda/condabin:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games
###Markdown
Understanding the pipeline design The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `pipeline_vertex/pipeline.py` file that we will generate below.The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables. Build the trainer image The training step in the pipeline will require a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.
###Code
!cat trainer_image_vertex/Dockerfile
###Output
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
###Markdown
Let's now build and push this trainer container to the container registry:
###Code
IMAGE_NAME='trainer_image_covertype_vertex'
TAG='latest'
TRAINING_CONTAINER_IMAGE_URI=f'gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}'
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
###Output
Creating temporary tarball archive of 2 file(s) totalling 3.4 KiB before compression.
Uploading tarball of [trainer_image_vertex] to [gs://qwiklabs-gcp-00-eeb852ce8ccb_cloudbuild/source/1634329797.999404-7bdafb88b8a748c486039f451ddc6d1c.tgz]
Created [https://cloudbuild.googleapis.com/v1/projects/qwiklabs-gcp-00-eeb852ce8ccb/locations/global/builds/056d30e2-1410-421f-aa92-60e04570525b].
Logs are available at [https://console.cloud.google.com/cloud-build/builds/056d30e2-1410-421f-aa92-60e04570525b?project=432069008306].
----------------------------- REMOTE BUILD OUTPUT ------------------------------
starting build "056d30e2-1410-421f-aa92-60e04570525b"
FETCHSOURCE
Fetching storage object: gs://qwiklabs-gcp-00-eeb852ce8ccb_cloudbuild/source/1634329797.999404-7bdafb88b8a748c486039f451ddc6d1c.tgz#1634329798365584
Copying gs://qwiklabs-gcp-00-eeb852ce8ccb_cloudbuild/source/1634329797.999404-7bdafb88b8a748c486039f451ddc6d1c.tgz#1634329798365584...
/ [1 files][ 1.6 KiB/ 1.6 KiB]
Operation completed over 1 objects/1.6 KiB.
BUILD
Already have image (with digest): gcr.io/cloud-builders/docker
Sending build context to Docker daemon 6.144kB
Step 1/5 : FROM gcr.io/deeplearning-platform-release/base-cpu
latest: Pulling from deeplearning-platform-release/base-cpu
284055322776: Pulling fs layer
7fb8f1c4c1a1: Pulling fs layer
a76d4d20d139: Pulling fs layer
0b21e6315b88: Pulling fs layer
4f4fb700ef54: Pulling fs layer
f20cf01c7ad4: Pulling fs layer
1b7343e814c0: Pulling fs layer
60a9c997c4ea: Pulling fs layer
a697b94b728d: Pulling fs layer
742c7aa077c0: Pulling fs layer
32785b1c9c12: Pulling fs layer
4853207b454b: Pulling fs layer
b9d1b08c1bab: Pulling fs layer
1bb1e9e50f0e: Pulling fs layer
2f28472901e5: Pulling fs layer
d62168f40eab: Pulling fs layer
d3e0c8453e12: Pulling fs layer
0220041f8fd4: Pulling fs layer
0b21e6315b88: Waiting
4f4fb700ef54: Waiting
f20cf01c7ad4: Waiting
1b7343e814c0: Waiting
60a9c997c4ea: Waiting
a697b94b728d: Waiting
742c7aa077c0: Waiting
32785b1c9c12: Waiting
4853207b454b: Waiting
b9d1b08c1bab: Waiting
1bb1e9e50f0e: Waiting
2f28472901e5: Waiting
d62168f40eab: Waiting
d3e0c8453e12: Waiting
0220041f8fd4: Waiting
7fb8f1c4c1a1: Verifying Checksum
7fb8f1c4c1a1: Download complete
284055322776: Download complete
4f4fb700ef54: Verifying Checksum
4f4fb700ef54: Download complete
f20cf01c7ad4: Verifying Checksum
f20cf01c7ad4: Download complete
0b21e6315b88: Verifying Checksum
0b21e6315b88: Download complete
60a9c997c4ea: Verifying Checksum
60a9c997c4ea: Download complete
a697b94b728d: Verifying Checksum
a697b94b728d: Download complete
742c7aa077c0: Verifying Checksum
742c7aa077c0: Download complete
32785b1c9c12: Verifying Checksum
32785b1c9c12: Download complete
4853207b454b: Verifying Checksum
4853207b454b: Download complete
b9d1b08c1bab: Verifying Checksum
b9d1b08c1bab: Download complete
1bb1e9e50f0e: Download complete
2f28472901e5: Verifying Checksum
2f28472901e5: Download complete
d62168f40eab: Verifying Checksum
d62168f40eab: Download complete
1b7343e814c0: Verifying Checksum
1b7343e814c0: Download complete
0220041f8fd4: Verifying Checksum
0220041f8fd4: Download complete
a76d4d20d139: Verifying Checksum
a76d4d20d139: Download complete
284055322776: Pull complete
7fb8f1c4c1a1: Pull complete
d3e0c8453e12: Verifying Checksum
d3e0c8453e12: Download complete
a76d4d20d139: Pull complete
0b21e6315b88: Pull complete
4f4fb700ef54: Pull complete
f20cf01c7ad4: Pull complete
1b7343e814c0: Pull complete
60a9c997c4ea: Pull complete
a697b94b728d: Pull complete
742c7aa077c0: Pull complete
32785b1c9c12: Pull complete
4853207b454b: Pull complete
b9d1b08c1bab: Pull complete
1bb1e9e50f0e: Pull complete
2f28472901e5: Pull complete
d62168f40eab: Pull complete
d3e0c8453e12: Pull complete
0220041f8fd4: Pull complete
Digest: sha256:5625223382682dfadea17adb2536c6e90e4349c02e8938fe46ca71a8ee65978a
Status: Downloaded newer image for gcr.io/deeplearning-platform-release/base-cpu:latest
---> 3336e1db68a5
Step 2/5 : RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
---> Running in c37b110bd116
Collecting fire
Downloading fire-0.4.0.tar.gz (87 kB)
Collecting cloudml-hypertune
Downloading cloudml-hypertune-0.1.0.dev6.tar.gz (3.2 kB)
Collecting scikit-learn==0.20.4
Downloading scikit_learn-0.20.4-cp37-cp37m-manylinux1_x86_64.whl (5.4 MB)
Collecting pandas==0.24.2
Downloading pandas-0.24.2-cp37-cp37m-manylinux1_x86_64.whl (10.1 MB)
Requirement already satisfied: scipy>=0.13.3 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.7.1)
Requirement already satisfied: numpy>=1.8.2 in /opt/conda/lib/python3.7/site-packages (from scikit-learn==0.20.4) (1.19.5)
Requirement already satisfied: python-dateutil>=2.5.0 in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2.8.2)
Requirement already satisfied: pytz>=2011k in /opt/conda/lib/python3.7/site-packages (from pandas==0.24.2) (2021.3)
Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from fire) (1.16.0)
Collecting termcolor
Downloading termcolor-1.1.0.tar.gz (3.9 kB)
Building wheels for collected packages: fire, cloudml-hypertune, termcolor
Building wheel for fire (setup.py): started
Building wheel for fire (setup.py): finished with status 'done'
Created wheel for fire: filename=fire-0.4.0-py2.py3-none-any.whl size=115943 sha256=a822f08df5a442fd2962edfd505e4d20ca55cea2179ac2b7a9cb34539e5970e6
Stored in directory: /root/.cache/pip/wheels/8a/67/fb/2e8a12fa16661b9d5af1f654bd199366799740a85c64981226
Building wheel for cloudml-hypertune (setup.py): started
Building wheel for cloudml-hypertune (setup.py): finished with status 'done'
Created wheel for cloudml-hypertune: filename=cloudml_hypertune-0.1.0.dev6-py2.py3-none-any.whl size=3987 sha256=21bcdd7a9a62b75d1bd83cddad4a620a4d30b315a24a5d330666b85de73072d3
Stored in directory: /root/.cache/pip/wheels/a7/ff/87/e7bed0c2741fe219b3d6da67c2431d7f7fedb183032e00f81e
Building wheel for termcolor (setup.py): started
Building wheel for termcolor (setup.py): finished with status 'done'
Created wheel for termcolor: filename=termcolor-1.1.0-py3-none-any.whl size=4847 sha256=e85c78561b2224b1931515d580830b173ed540f3e2ead4ca25a3f6c1bc5c34ad
Stored in directory: /root/.cache/pip/wheels/3f/e3/ec/8a8336ff196023622fbcb36de0c5a5c218cbb24111d1d4c7f2
Successfully built fire cloudml-hypertune termcolor
Installing collected packages: termcolor, scikit-learn, pandas, fire, cloudml-hypertune
Attempting uninstall: scikit-learn
Found existing installation: scikit-learn 1.0
Uninstalling scikit-learn-1.0:
Successfully uninstalled scikit-learn-1.0
Attempting uninstall: pandas
Found existing installation: pandas 1.3.3
Uninstalling pandas-1.3.3:
Successfully uninstalled pandas-1.3.3
Successfully installed cloudml-hypertune-0.1.0.dev6 fire-0.4.0 pandas-0.24.2 scikit-learn-0.20.4 termcolor-1.1.0
[91mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
visions 0.7.4 requires pandas>=0.25.3, but you have pandas 0.24.2 which is incompatible.
statsmodels 0.13.0 requires pandas>=0.25, but you have pandas 0.24.2 which is incompatible.
phik 0.11.2 requires pandas>=0.25.1, but you have pandas 0.24.2 which is incompatible.
pandas-profiling 3.1.0 requires pandas!=1.0.0,!=1.0.1,!=1.0.2,!=1.1.0,>=0.25.3, but you have pandas 0.24.2 which is incompatible.
[0m[91mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
[0mRemoving intermediate container c37b110bd116
---> 586ecff3ff80
Step 3/5 : WORKDIR /app
---> Running in f6f00a2d19ee
Removing intermediate container f6f00a2d19ee
---> 6e476d5dfb1d
Step 4/5 : COPY train.py .
---> f897d028f9c7
Step 5/5 : ENTRYPOINT ["python", "train.py"]
---> Running in f49177b96f0a
Removing intermediate container f49177b96f0a
---> c112a7cc1b66
Successfully built c112a7cc1b66
Successfully tagged gcr.io/qwiklabs-gcp-00-eeb852ce8ccb/trainer_image_covertype_vertex:latest
PUSH
Pushing gcr.io/qwiklabs-gcp-00-eeb852ce8ccb/trainer_image_covertype_vertex:latest
The push refers to repository [gcr.io/qwiklabs-gcp-00-eeb852ce8ccb/trainer_image_covertype_vertex]
6e939ae0cc90: Preparing
aab8721b4115: Preparing
6a172883fed6: Preparing
36452ee8c539: Preparing
14da8ed3cd11: Preparing
c0b791673ed4: Preparing
30b70207e11d: Preparing
cefbe2063c4d: Preparing
13aae071416e: Preparing
6905e12d5606: Preparing
6d80f75a021b: Preparing
bd325cd68ba0: Preparing
41b0c4cd943a: Preparing
45f84ed3ae7e: Preparing
7a827fd16c0d: Preparing
0df2e122bf51: Preparing
5f70bf18a086: Preparing
7ea0939ee63b: Preparing
379ac210e2ce: Preparing
ea39a5660b36: Preparing
824bf068fd3d: Preparing
c0b791673ed4: Waiting
45f84ed3ae7e: Waiting
7a827fd16c0d: Waiting
0df2e122bf51: Waiting
5f70bf18a086: Waiting
7ea0939ee63b: Waiting
379ac210e2ce: Waiting
ea39a5660b36: Waiting
824bf068fd3d: Waiting
30b70207e11d: Waiting
cefbe2063c4d: Waiting
13aae071416e: Waiting
6905e12d5606: Waiting
41b0c4cd943a: Waiting
6d80f75a021b: Waiting
bd325cd68ba0: Waiting
14da8ed3cd11: Layer already exists
36452ee8c539: Layer already exists
30b70207e11d: Layer already exists
c0b791673ed4: Layer already exists
cefbe2063c4d: Layer already exists
6905e12d5606: Layer already exists
13aae071416e: Layer already exists
6d80f75a021b: Layer already exists
bd325cd68ba0: Layer already exists
45f84ed3ae7e: Layer already exists
41b0c4cd943a: Layer already exists
0df2e122bf51: Layer already exists
7a827fd16c0d: Layer already exists
5f70bf18a086: Layer already exists
7ea0939ee63b: Layer already exists
379ac210e2ce: Layer already exists
ea39a5660b36: Layer already exists
824bf068fd3d: Layer already exists
6e939ae0cc90: Pushed
aab8721b4115: Pushed
6a172883fed6: Pushed
latest: digest: sha256:436a6e5b2cc8e05fb7d33156f264dcf35c51354193cec033cfb24d2e2549f259 size: 4707
DONE
--------------------------------------------------------------------------------
ID CREATE_TIME DURATION SOURCE IMAGES STATUS
056d30e2-1410-421f-aa92-60e04570525b 2021-10-15T20:29:58+00:00 2M38S gs://qwiklabs-gcp-00-eeb852ce8ccb_cloudbuild/source/1634329797.999404-7bdafb88b8a748c486039f451ddc6d1c.tgz gcr.io/qwiklabs-gcp-00-eeb852ce8ccb/trainer_image_covertype_vertex (+1 more) SUCCESS
###Markdown
To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline:
###Code
SERVING_CONTAINER_IMAGE_URI = 'us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest'
###Output
_____no_output_____
###Markdown
**Note:** If you change the version of the training ml framework you'll have to supply a serving container with matchin version (see [pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)). Building and deploying the pipeline Let us write the pipeline to disk: ExerciseImplement1. the `train_and_deploy` function in the `pipeline_vertex/training_lightweight_component.py`1. the `tune_hyperparameters` function in the `pipeline_vertex/tuning_lightweight_component.py`and complete the TODOs in the `pipeline.py` file below:
###Code
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from tuning_lightweight_component import tune_hyperparameters
from training_lightweight_component import train_and_deploy
PIPELINE_ROOT = os.getenv('PIPELINE_ROOT')
PROJECT_ID = os.getenv('PROJECT_ID')
REGION = os.getenv('REGION')
TRAINING_CONTAINER_IMAGE_URI = os.getenv('TRAINING_CONTAINER_IMAGE_URI')
SERVING_CONTAINER_IMAGE_URI = os.getenv('SERVING_CONTAINER_IMAGE_URI')
TRAINING_FILE_PATH = os.getenv('TRAINING_FILE_PATH')
VALIDATION_FILE_PATH = os.getenv('VALIDATION_FILE_PATH')
MAX_TRIAL_COUNT = os.getenv('MAX_TRIAL_COUNT', 5)
PARALLEL_TRIAL_COUNT = os.getenv('PARALLEL_TRIAL_COUNT', 5)
THRESHOLD = os.getenv('THRESHOLD', 0.6)
tune_hyperparameters_component = create_component_from_func_v2(
tune_hyperparameters,
base_image='python:3.8',
output_component_file='covertype_kfp_tune_hyperparameters.yaml',
packages_to_install=['google-cloud-aiplatform'],
)
train_and_deploy_component = create_component_from_func_v2(
train_and_deploy,
base_image='python:3.8',
output_component_file='covertype_kfp_train_and_deploy.yaml',
packages_to_install=['google-cloud-aiplatform'],
)
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f'{pipeline_root}/staging'
tuning_op = tune_hyperparameters_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
)
accuracy = tuning_op.outputs['best_accuracy']
with dsl.Condition(accuracy >= accuracy_deployment_threshold, name="deploy_decision"):
train_and_deploy_op = train_and_deploy_component(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
serving_container_uri=serving_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
alpha=tuning_op.outputs['best_alpha'],
max_iter=tuning_op.outputs['best_max_iter'],
)
###Output
Overwriting ./pipeline_vertex/pipeline.py
###Markdown
Compile the pipeline Let stat by defining the environment variables that will be passed to the pipeline compiler:
###Code
ARTIFACT_STORE = f'gs://{PROJECT_ID}-vertex'
PIPELINE_ROOT = f'{ARTIFACT_STORE}/pipeline'
DATA_ROOT = f'{ARTIFACT_STORE}/data'
TRAINING_FILE_PATH = f'{DATA_ROOT}/training/dataset.csv'
VALIDATION_FILE_PATH = f'{DATA_ROOT}/validation/dataset.csv'
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
###Output
env: PIPELINE_ROOT=gs://qwiklabs-gcp-00-eeb852ce8ccb-vertex/pipeline
env: PROJECT_ID=qwiklabs-gcp-00-eeb852ce8ccb
env: REGION=us-central1
env: SERVING_CONTAINER_IMAGE_URI=us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest
env: TRAINING_CONTAINER_IMAGE_URI=gcr.io/qwiklabs-gcp-00-eeb852ce8ccb/trainer_image_covertype_vertex:latest
env: TRAINING_FILE_PATH=gs://qwiklabs-gcp-00-eeb852ce8ccb-vertex/data/training/dataset.csv
env: VALIDATION_FILE_PATH=gs://qwiklabs-gcp-00-eeb852ce8ccb-vertex/data/validation/dataset.csv
###Markdown
Let us make sure that the `ARTIFACT_STORE` has been created, and let us create it if not:
###Code
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
###Output
gs://qwiklabs-gcp-00-eeb852ce8ccb-vertex/
###Markdown
**Note:** In case the artifact store was not created and properly set before hand, you may needto run in **CloudShell** the following command to allow Vertex AI to access it:```PROJECT_ID=$(gcloud config get-value project)PROJECT_NUMBER=$(gcloud projects list --filter="name=$PROJECT_ID" --format="value(PROJECT_NUMBER)")gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:[email protected]" \ --role="roles/storage.objectAdmin"``` Use the CLI compiler to compile the pipeline We compile the pipeline from the Python file we generated into a JSON description using the following command:
###Code
PIPELINE_JSON = 'covertype_kfp_pipeline.json'
###Output
_____no_output_____
###Markdown
ExerciseCompile the `pipeline_vertex/pipeline.py` with the `dsl-compile-v2` command line:
###Code
!dsl-compile-v2 --py pipeline_vertex/pipeline.py --output $PIPELINE_JSON
###Output
/opt/conda/lib/python3.7/site-packages/kfp/components/_python_op.py:987: FutureWarning: create_component_from_func_v2() has been deprecated and will be removed in KFP v1.9. Please use kfp.v2.components.create_component_from_func() instead.
category=FutureWarning,
###Markdown
**Note:** You can also use the Python SDK to compile the pipeline from its python function```pythoncompiler.Compiler().compile( pipeline_func=covertype_train, package_path=PIPELINE_JSON,)``` The result is the pipeline file.
###Code
!head {PIPELINE_JSON}
###Output
{
"pipelineSpec": {
"components": {
"comp-condition-deploy-decision-1": {
"dag": {
"tasks": {
"train-and-deploy": {
"cachingOptions": {
"enableCache": true
},
###Markdown
Deploy the pipeline package ExerciseUpload and run the pipeline to Vertex AI using `aiplatform.PipelineJob`:
###Code
aiplatform.init(project=PROJECT_ID, location=REGION)
pipeline = aiplatform.PipelineJob(
display_name='covertype_kfp_pipeline',
template_path=PIPELINE_JSON,
enable_caching=False,
)
pipeline.run()
###Output
INFO:google.cloud.aiplatform.pipeline_jobs:Creating PipelineJob
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob created. Resource name: projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264
INFO:google.cloud.aiplatform.pipeline_jobs:To use this PipelineJob in another session:
INFO:google.cloud.aiplatform.pipeline_jobs:pipeline_job = aiplatform.PipelineJob.get('projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264')
INFO:google.cloud.aiplatform.pipeline_jobs:View Pipeline Job:
https://console.cloud.google.com/vertex-ai/locations/us-central1/pipelines/runs/7250909749275787264?project=432069008306
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
INFO:google.cloud.aiplatform.pipeline_jobs:PipelineJob projects/432069008306/locations/us-central1/pipelineJobs/7250909749275787264 current state:
PipelineState.PIPELINE_STATE_RUNNING
###Markdown
Continuous Training with Kubeflow Pipeline and Vertex AI **Learning Objectives:**1. Learn how to use KF pre-built components1. Learn how to use KF lightweight python components1. Learn how to build a KF pipeline with these components1. Learn how to compile, upload, and run a KF pipelineIn this lab, you will build, deploy, and run a KFP pipeline that orchestrates the **Vertex AI** services to train, tune, and deploy a **scikit-learn** model. Setup
###Code
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
###Output
_____no_output_____
###Markdown
Understanding the pipeline design The workflow implemented by the pipeline is defined using a Python based Domain Specific Language (DSL). The pipeline's DSL is in the `pipeline_vertex/pipeline.py` file that we will generate below.The pipeline's DSL has been designed to avoid hardcoding any environment specific settings like file paths or connection strings. These settings are provided to the pipeline code through a set of environment variables. Build the trainer image The training step in the pipeline will require a custom training container. The custom training image is defined in `trainer_image/Dockerfile`.
###Code
!cat trainer_image_vertex/Dockerfile
###Output
_____no_output_____
###Markdown
Let's now build and push this trainer container to the container registry:
###Code
IMAGE_NAME = "trainer_image_covertype_vertex"
TAG = "latest"
TRAINING_CONTAINER_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
###Output
_____no_output_____
###Markdown
To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline:
###Code
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
###Output
_____no_output_____
###Markdown
**Note:** If you change the version of the training ml framework you'll have to supply a serving container with matchin version (see [pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)). Building and deploying the pipeline Let us write the pipeline to disk: ExerciseImplement1. the `train_and_deploy` function in the `pipeline_vertex/training_lightweight_component.py`1. the `tune_hyperparameters` function in the `pipeline_vertex/tuning_lightweight_component.py`and complete the TODOs in the `pipeline.py` file below:
###Code
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Kubeflow Covertype Pipeline."""
import os
from kfp import dsl
from kfp.components import create_component_from_func_v2
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = os.getenv("MAX_TRIAL_COUNT", 5)
PARALLEL_TRIAL_COUNT = os.getenv("PARALLEL_TRIAL_COUNT", 5)
THRESHOLD = os.getenv("THRESHOLD", 0.6)
tune_hyperparameters_component = # TODO
train_and_deploy_component = # TODO
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = # TODO
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = # TODO
###Output
_____no_output_____
###Markdown
Compile the pipeline Let stat by defining the environment variables that will be passed to the pipeline compiler:
###Code
ARTIFACT_STORE = f"gs://{PROJECT_ID}-kfp-artifact-store"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
###Output
_____no_output_____
###Markdown
Let us make sure that the `ARTIFACT_STORE` has been created, and let us create it if not:
###Code
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
###Output
_____no_output_____
###Markdown
**Note:** In case the artifact store was not created and properly set before hand, you may needto run in **CloudShell** the following command to allow Vertex AI to access it:```PROJECT_ID=$(gcloud config get-value project)PROJECT_NUMBER=$(gcloud projects list --filter="name=$PROJECT_ID" --format="value(PROJECT_NUMBER)")gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="serviceAccount:[email protected]" \ --role="roles/storage.objectAdmin"``` Use the CLI compiler to compile the pipeline We compile the pipeline from the Python file we generated into a JSON description using the following command:
###Code
PIPELINE_JSON = "covertype_kfp_pipeline.json"
###Output
_____no_output_____
###Markdown
ExerciseCompile the `pipeline_vertex/pipeline.py` with the `dsl-compile-v2` command line:
###Code
# TODO
###Output
_____no_output_____
###Markdown
**Note:** You can also use the Python SDK to compile the pipeline from its python function```pythoncompiler.Compiler().compile( pipeline_func=covertype_train, package_path=PIPELINE_JSON,)``` The result is the pipeline file.
###Code
!head {PIPELINE_JSON}
###Output
_____no_output_____
###Markdown
Deploy the pipeline package ExerciseUpload and run the pipeline to Vertex AI using `aiplatform.PipelineJob`:
###Code
# TODO
###Output
_____no_output_____ |
codici/.ipynb_checkpoints/lda_dimred-checkpoint.ipynb | ###Markdown
Riduzione di dimensionalità mediante LDA
###Code
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
import pandas as pd
import numpy as np
import math
import scipy.stats as st
import scipy.linalg as la
from sklearn.preprocessing import LabelEncoder
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import seaborn.apionly as sns
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c',
'#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b',
'#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09']
cmap = mcolors.LinearSegmentedColormap.from_list("", ["#82cafc", "#069af3", "#0485d1", colors[0], colors[8]])
###Output
_____no_output_____
###Markdown
Legge dataset
###Code
df = pd.read_csv("../../data/iris.csv", header=0, delimiter=';')
df.head()
###Output
_____no_output_____
###Markdown
Deriva matrice delle feature e vettore target
###Code
X = np.array(df[df.columns[:-1]])
y = np.array(df[df.columns[-1]])
###Output
_____no_output_____
###Markdown
Codifica le classi come interi
###Code
enc = LabelEncoder()
label_encoder = enc.fit(y)
y = label_encoder.transform(y) + 1
label_dict = {label_encoder.transform(['setosa'])[0]+1: 'Setosa',
label_encoder.transform(['versicolor'])[0]+1: 'Versicolor',
label_encoder.transform(['virginica'])[0]+1:'Virginica'}
###Output
_____no_output_____
###Markdown
Matrice delle distribuzioni mutue delle feature. Sulla diagonale, distribuzione delle singole feature
###Code
fig = plt.figure(figsize=(16, 8))
sns.pairplot(df, size=4, diag_kind='kde', hue='class', palette={'setosa': colors[1], 'versicolor': colors[2], 'virginica':colors[3]},
diag_kws=dict(shade=True, alpha=.5))
plt.show()
###Output
_____no_output_____
###Markdown
Valori medi delle feature per tutte le classi
###Code
mean_vectors = []
for cl in range(1,4):
mean_vectors.append(np.mean(X[y==cl], axis=0))
print('Class {0:s}: {1:s}'.format(label_dict[cl], mean_vectors[cl-1]))
###Output
Class Setosa: [1.462 0.246 5.006 3.428]
Class Versicolor: [4.26 1.326 5.936 2.77 ]
Class Virginica: [5.552 2.026 6.588 2.974]
###Markdown
Deriva scatter matrix within classes
###Code
S_W = np.zeros((4,4))
for cl,mv in zip(range(1,4), mean_vectors):
Xp = X[y == cl]
S_W += np.dot((Xp-mv).T,(Xp-mv))
print('Within-class Scatter Matrix:\n')
print('{0:s}'.format(S_W))
###Output
Within-class Scatter Matrix:
[[27.2226 6.2718 24.6246 8.1208]
[ 6.2718 6.1566 5.645 4.8084]
[24.6246 5.645 38.9562 13.63 ]
[ 8.1208 4.8084 13.63 16.962 ]]
###Markdown
Deriva scatter matrix between classes
###Code
overall_mean = np.mean(X, axis=0)
S_B = np.zeros((4,4))
for cl,mv in zip(range(1,4), mean_vectors):
n = X[y==c1].shape[0]
cv = (mv-overall_mean).reshape(1,-1)
S_B += n*np.dot(cv.T,cv)
print('Between-class Scatter Matrix:\n')
print('{0:s}'.format(S_B))
###Output
Between-class Scatter Matrix:
[[437.1028 186.774 165.2484 -57.2396]
[186.774 80.4133 71.2793 -22.9327]
[165.2484 71.2793 63.2121 -19.9527]
[-57.2396 -22.9327 -19.9527 11.3449]]
###Markdown
Calcola autovalori e autovettori di $S_W^{-1}S_B$
###Code
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
for i in range(len(eig_vals)):
eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('Autovalore {0:d}: {1:.2e}. Autovettore {2:s}'.format(i+1, eig_vals[i].real, eigvec_sc.real.reshape(1,-1)))
###Output
Autovalore 1: 3.22e+01. Autovettore [[-0.554 -0.7074 0.2087 0.3862]]
Autovalore 2: 2.85e-01. Autovettore [[-0.2526 0.7695 0.0065 0.5866]]
Autovalore 3: 3.46e-15. Autovettore [[-0.3758 0.7164 0.0747 -0.3167]]
Autovalore 4: 3.46e-15. Autovettore [[-0.3758 0.7164 0.0747 -0.3167]]
###Markdown
Ordina le coppie autovalore-autovettore in ordine decrescente nel modulo dell'autovalore
###Code
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
print('Varianza descritta dagli autovalori:')
eigv_sum = sum(eig_vals)
for i,j in enumerate(eig_pairs):
print('Autovalore {0:}: {1:.2%}'.format(i+1, (j[0]/eigv_sum).real))
###Output
Varianza descritta dagli autovalori:
Autovalore 1: 99.12%
Autovalore 2: 0.88%
Autovalore 3: 0.00%
Autovalore 4: 0.00%
###Markdown
Deriva la matrice di proiezione sull'iperpiano definito dai primi new_dim autovettori
###Code
new_dim = 2
W = eig_pairs[0][1].reshape(4,1)
for i in range(1, new_dim):
W = np.hstack((W, eig_pairs[i][1].reshape(4,1)))
print('Matrice di proiezione W:')
print(W.real)
###Output
Matrice di proiezione W:
[[-0.554 -0.2526]
[-0.7074 0.7695]
[ 0.2087 0.0065]
[ 0.3862 0.5866]]
###Markdown
Proietta gli elementi nel nuovo spazio
###Code
X_lda = X.dot(W)
fig = plt.figure(figsize=(16,8))
for label in range(1,4):
plt.scatter(x=X_lda[:,0].real[y == label],
y=X_lda[:,1].real[y == label],
color=colors[label],
alpha=0.8,
label=label_dict[label])
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend(loc='upper right', fancybox=True)
plt.title('LDA: proiezione su spazio a 2 dimensioni')
plt.show()
###Output
_____no_output_____
###Markdown
Utilizza la funzione in scikit-learn
###Code
sklearn_lda = LDA(n_components=2)
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
fig = plt.figure(figsize=(16,8))
for label in range(1,4):
plt.scatter(x=X_lda_sklearn[:,0][y == label],
y=X_lda_sklearn[:,1][y == label] * -1,
color=colors[label],
alpha=0.8,
label=label_dict[label])
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend(loc='upper right', fancybox=True)
plt.title('LDA via sklearn: proiezione su spazio a 2 dimensioni')
plt.show()
###Output
_____no_output_____ |
PeakyBlinders.ipynb | ###Markdown
In The name of the **AlgorithmX fellowship**!!!!! Manjunathan.R ,SSN CE *A note on the Novelty and the implication of this work along with required references for the purpose of the **AlgorithmX** panel:*1. This project is implemented **completely online**, uses Google drive as the source for all the files that are required. Done on **Google colab.**2. The model I have built is **completely dynamic** in the sense that even the variables that store the representations, are created dynamically, meaning that this could be done for **any** such picture where everyone's face, except Thomas shelby's needs to be anonymised.3. The model here uses the **VGG-net** which works wihtout training by using **Siamese network based similarity calculation of images**. We have used two metrics here, namely the Euclidean distance and the Cosine similarity with a threshold of 0.4 and 120 respectively, above which the images are not similar.4. This hence, works as a **Image verification** problem, where you know one is Thomas shelby, and you try to match it with the other faces, and blur all, except Thomas' face. 5. This devoids the model of GPU bottlenecking, or any such issues of wasting time. Making it fast!---Links:* My AlgorithmX drive [link](https://drive.google.com/drive/folders/1IK5gd-vh_D_Po9U0WmEG4KTwZVU2zOIm?usp=sharing)* Haar Cascade Classifier XML [file](https://drive.google.com/file/d/1kDa3wLEUPJhUpg16Wx4ULe-02ZVl3ty_/view)* My [Resume](https://drive.google.com/file/d/12VsiPM3pAfNSrhc-mfI0WD15O8NSMpSz/view?usp=sharing)* Siamese Network Features for Image Matching [paper](https://users.aalto.fi/~kannalj1/publications/icpr2016.pdf)* VGG Net architecture [Medium article](https://medium.com/analytics-vidhya/vggnet-architecture-explained-e5c7318aa5b6)---Thanks for this opportunity to learn and work!
###Code
#Let's first install OpenCV which will be the backbone for our project.
!apt-get -qq install -y libsm6 libxext6 && pip install -q -U opencv-python
#Import Google Drive
from google.colab import drive
drive.mount('/content/gdrive')
root_path = 'gdrive/My Drive/AlgorithmxFellowship'
!ls -a "/content/gdrive/MyDrive/AlgorithmxFellowship"
!pwd
###Output
/content
###Markdown
**Imports**
###Code
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
#Reading thew Given Image for Anonymisation and further processing:
test_image=cv2.imread('/content/gdrive/MyDrive/AlgorithmxFellowship/peakyblinders.jpg')
#Ordering the color of the image to RGB and then converting into Grayscale:
test_image=cv2.cvtColor(test_image,cv2.COLOR_BGR2RGB)
gs_test_image=cv2.cvtColor(test_image,cv2.COLOR_RGB2GRAY)
###Output
_____no_output_____
###Markdown
For detecting the Faces in our model we use a trained Haar cascade classifier which is available at the Open source [Link](https://drive.google.com/file/d/1kDa3wLEUPJhUpg16Wx4ULe-02ZVl3ty_/view) got from the internet
###Code
face_detector=cv2.CascadeClassifier("/content/gdrive/MyDrive/AlgorithmxFellowship/classifier/haarcascade_frontalface_default.xml")
faces_image= face_detector.detectMultiScale(gs_test_image,1.3,5)
#Checking whether the output is good or not
faces_image
###Output
_____no_output_____
###Markdown
We observe that the given images have been reduced into faces. The array corresponds to x_pos,y_pos,height and width of the located BBox of the face.
###Code
##Plotting the figures using Matplotlib subplotting
fig = plt.figure(figsize=(10, 7))
rows = 1
columns = 10
peaky_face=[]
for i in range(len(faces_image)):
for (x_pos,y_pos,width,height) in faces_image:
peaky_face.append(test_image[y_pos:y_pos+height,x_pos:x_pos+width])
fig.add_subplot(rows, columns, i+1)
plt.imshow(peaky_face[i])
plt.axis('off')
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:10: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# Remove the CWD from sys.path while we load stuff.
###Markdown
We have successfully taken out all faces! Now my thought process is to build a classifier based on Thomas shelby's image and pass this to that classifier. If it detects him in a particular frame, we can remove that frame from the list of frames and blur all. This can be dealt as an verification problem rather than a Recognition one ------ **Now we shall start building the model. This model is inspired by VGG-Net, Siamese Networks based image deduction, Global dynanmic variable creation**
###Code
from keras.models import Sequential,Model
from keras.layers import Activation, Dense,ZeroPadding2D,Convolution2D,MaxPooling2D,Dropout,Flatten
from keras.models import model_from_json
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
###Output
_____no_output_____
###Markdown
Instead of using Multiple images to train the model, we can use the process of **Image verification** than image identification, just like how it is used in Biometrics in offices. The Model we use is the VGG-Face. This Model ois nearly similar to the ImageNet model, however it varies at the output layer an image of size 224x224x3 is made into 2622 encoded values which is used for learning or in our case verification. 
###Code
##Building a VGG network for image identification:
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.summary()
model.load_weights('/content/gdrive/MyDrive/AlgorithmxFellowship/classifier/vgg_face_weights.h5')
###Output
_____no_output_____
###Markdown
Let's Build an instance of this descriptor!
###Code
vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
###Output
_____no_output_____
###Markdown
Since this takes input in only a particular format, we use preprocessing , both Built in VGG16 as well as self defined ones can be used.
###Code
def preprocess_image(image_path):
img = image.load_img(image_path, target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
return img
for i in range(len(faces_image)):
im = Image.fromarray(peaky_face[i])
im.save("face{0}.jpeg".format(i))
len(peaky_face)
###Output
_____no_output_____
###Markdown
Let us create Global Variables to store the value of the faces represented in a 1D vector format. This helps in dynamic modeling
###Code
#Creating global Variables to store the img values of Faces.
i = 0
vals=[]
for j in range(len(faces_image)):
i += 1
globals()["facerep" + str(i)] = vgg_face_descriptor.predict(preprocess_image('/content/face{0}.jpeg'.format(j)))[0,:]
vals.append(vgg_face_descriptor.predict(preprocess_image('/content/face{0}.jpeg'.format(j)))[0,:])
np.shape(vals)
###Output
_____no_output_____
###Markdown
Let's verify the shape of the model!
###Code
print(len(facerep1))
###Output
2622
###Markdown
We Now have our Test image representations. All we need now is to load an image of Thomas Shelby and try to verify if it is the same person in both the images. -------------------------------------------------------------------------------------------- **Shelby Image Cornering**
###Code
shelby=cv2.imread('/content/gdrive/MyDrive/AlgorithmxFellowship/shelby_test.jpg')
shelby1=cv2.cvtColor(shelby,cv2.COLOR_BGR2RGB)
shelby_in_gs=cv2.cvtColor(shelby1,cv2.COLOR_RGB2GRAY)
shelby_face= face_detector.detectMultiScale(shelby_in_gs,1.3,5)
shelby_face
for (x_pos,y_pos,width,height) in shelby_face:
shelby_img=shelby[y_pos:y_pos+height,x_pos:x_pos+width]
plt.imshow(shelby_img)
im = Image.fromarray(shelby_img)
im.save("Shelby.jpeg")
shelby_representation = vgg_face_descriptor.predict(preprocess_image('/content/Shelby.jpeg'))[0,:]
###Output
_____no_output_____
###Markdown
**End of Shelby Image cornering** --- Now we have the represntations of the Images in our 1D array format. Now we need to compare the Cosine Similarities and Euclidean similarities which are used in **Siamese Networks**
###Code
def CosineSimilarity(source, test):
a = np.matmul(np.transpose(source), test)
b = np.sum(np.multiply(source, source))
c = np.sum(np.multiply(test, test))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def EuclideanDistance(source, test):
euclidean_distance = source - test
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
###Output
_____no_output_____
###Markdown
These two values represent the closeness between the images. If they are pretty close, then they are the same image. If not, they can be considered as a different image. We hence set a threshold for the classification
###Code
thresh_cosine=0.40
thresh_euclidean=120
def MatchFace(rep1, rep2):
cosine_similarity = CosineSimilarity(rep1, rep2)
euclidean_distance = EuclideanDistance(rep1, rep2)
if(cosine_similarity < thresh_cosine):
return True
else:
return False
MatchFace(shelby_representation,facerep7)
MatchFace(shelby_representation,facerep6)
for i in range(len(faces_image)):
if MatchFace(shelby_representation,vals[i]):
idx=i
idx
###Output
_____no_output_____
###Markdown
--- **Now onto the final part of our code: The Blurred image output:**
###Code
faces_image.shape
number,numberofelems=faces_image.shape
startlimit=idx*numberofelems
endlimit=(idx+1)*(numberofelems)
iter_idx=np.arange(startlimit,endlimit)
fin_faces=np.delete(faces_image,iter_idx)
fin_faces= fin_faces.reshape((number-1,numberofelems))
for (x_pos,y_pos,width,height) in fin_faces:
test_image[y_pos:y_pos+height,x_pos:x_pos+width]=cv2.blur(test_image[y_pos:y_pos+height,x_pos:x_pos+width],ksize=(10,10))
###Output
_____no_output_____
###Markdown
**FINGERS CROSSED**
###Code
plt.figure(figsize=(16,10))
plt.imshow(test_image)
plt.axis('off')
###Output
_____no_output_____
###Markdown
**No training. Got them for good.**
###Code
im = Image.fromarray(test_image)
im.save("Anonymised_Image.jpeg")
###Output
_____no_output_____ |
Pylab1.ipynb | ###Markdown
Python program to find the area of a rectangle given that its length is 10 units and breadth is 20 units.
###Code
length=float(input("Type the length:"))
breadth=float(input("Type the breadth:"))
area= length * breadth
print("The area is:",area)
###Output
_____no_output_____
###Markdown
Python program to find the sum of two numbers.
###Code
a=int(input())
b=int(input())
Sum=a+b
print(Sum)
###Output
_____no_output_____
###Markdown
Check the memory address
###Code
num=29
id(num)
num1= 30-1
id(num1)
###Output
_____no_output_____
###Markdown
Program of explicit type conversion from int to float.
###Code
num1= 10
num2= 20
num3= num1+num2
print(num3)
print(type(num))
num4= float(num1+num2)
print(num4)
print(type(num4))
###Output
_____no_output_____
###Markdown
Program to check if a number is prime or not. A number that is divisible only by itself and 1.
###Code
num = int(input("Enter a number: "))
flag = False
# prime numbers are greater than 1
if num > 1:
# check for factors
for i in range(2, num):
if (num % i) == 0:
# if factor is found, set flag to True
flag = True
# break out of loop
break
if flag:
print("Not Prime")
else:
print("Prime")
num=int(input())
if num>1:
if(num%i)==0:
print("Prime")
else:
print("Not Prime")
else:
print("Not Prime either")
###Output
_____no_output_____
###Markdown
Python Program to find factorial of a number. factorial of n=n*(n-1)*(n-2)*....
###Code
n=int(input())
def factorial(n):
fac=1
for i in range(0,n):
fac*=n-i
print(fac)
factor(6)
###Output
_____no_output_____
###Markdown
Python program to print factors.
###Code
def fac(n):
for i in range(1,n+1):
if(n%i)==0:
print(i)
fac(5)
fac(6)
###Output
_____no_output_____
###Markdown
Python Program to print all the prime numbers in an interval.
###Code
def factinterval(lower,upper):
for num in range(lower, upper + 1):
# all prime numbers are greater than 1
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
print(num)
return
factinterval(2,5)
###Output
_____no_output_____
###Markdown
Python program to find Armstrong Number in an interval. Armstrong number is a number that is equal to the sum of cubes of its digits. For example 0, 1, 153, 370, 371 and 407 are the Armstrong numbers.
###Code
def arm(n):
sum=0
digit=0
temp=n
order=len(str(n))
while n>0:
digit=n%10
sum+=digit**order
n=n//10
if sum==temp:
print("Yes, the number is Armstrong number.")
else:
print("No!")
#driver
n=int(input())
arm(n)
###Output
_____no_output_____
###Markdown
Python Program to Find the Square Root.
###Code
def Squareroot(n):
if n>1:
sq=n**0.5
print("Square root is:",sq)
else:
print("Square root is:",n)
n=int(input())
Squareroot(n)
###Output
_____no_output_____
###Markdown
For real or complex numbers
###Code
import cmath
def Squareroot_c(n):
nsqrt=cmath.sqrt(n)
print("The square root for the number is:",nsqrt)
n=eval(input())
Squareroot_c(n)
###Output
_____no_output_____
###Markdown
Python Program to Solve Quadratic Equation.
###Code
import cmath
def solve(a,b,c):
dis=b*b-4*a*c
sqrt_val=cmath.sqrt(abs(dis))
if dis>0:
print(" real and different roots ")
print((-b + sqrt_val)/(2 * a))
print((-b - sqrt_val)/(2 * a))
elif dis == 0:
print(" real and same roots")
print(-b / (2 * a))
else:
print("Complex Roots")
print(- b / (2 * a), " + i", sqrt_val)
print(- b / (2 * a), " - i", sqrt_val)
#driver
a=int(input())
b=int(input())
c=int(input())
solve(a,b,c)
###Output
_____no_output_____ |
src/Example2_ReadInFileIllustration.ipynb | ###Markdown
This Jupyter notebook illustrates how to read data in from an external file [notebook provides a simple illustration, users can easily use these examples to modify and customize for their data storage scheme and/or preferred workflows] Motion Blur Filtering: A Statistical Approach for Extracting Confinement Forces & Diffusivity from a Single Blurred TrajectoryAuthor: Chris CalderonCopyright 2015 Ursa Analytics, Inc.Licensed under the Apache License, Version 2.0 (the "License");You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Cell below loads the required modules and packages
###Code
%matplotlib inline
#command above avoids using the "dreaded" pylab flag when launching ipython (always put magic command above as first arg to ipynb file)
import matplotlib.font_manager as font_manager
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as spo
import findBerglundVersionOfMA1 #this module builds off of Berglund's 2010 PRE parameterization (atypical MA1 formulation)
import MotionBlurFilter
import Ursa_IPyNBpltWrapper
###Output
_____no_output_____
###Markdown
Now that required modules packages are loaded, set parameters for simulating "Blurred" OU trajectories. Specific mixed continuous/discrete model:\begin{align}dr_t = & ({v}-{\kappa} r_t)dt + \sqrt{2 D}dB_t \\\psi_{t_i} = & \frac{1}{t_E}\int_{t_{i}-t_E}^{t_i} r_s ds + \epsilon^{\mathrm{loc}}_{t_i}\end{align}In above equations, parameter vector specifying model is: $\theta = (\kappa,D,\sigma_{\mathrm{loc}},v)$Statistically exact discretization of above for uniform time spacing $\delta$ (non-uniform $\delta$ requires time dependent vectors and matrices below):\begin{align}r_{t_{i+1}} = & A + F r_{t_{i}} + \eta_{t_i} \\\psi_{t_i} = & H_A + H_Fr_{t_{i-1}} + \epsilon^{\mathrm{loc}}_{t_i} + \epsilon^{\mathrm{mblur}}_{t_i} \\\epsilon^{\mathrm{loc}}_{t_i} + & \epsilon^{\mathrm{mblur}}_{t_i} \sim \mathcal{N}(0,R_i) \\\eta_i \sim & \mathcal{N}(0,Q) \\t_{i-1} = & t_{i}-t_E \\ C = & cov(\epsilon^{\mathrm{mblur}}_{t_i},\eta_{t_{i-1}}) \ne 0\end{align}Note: Kalman Filter (KF) and Motion Blur Filter (MBF) codes estimate $\sqrt(2D)$ directly as "thermal noise" parameter For situations where users would like to read data in from external source, many options exist. In cell below, we show how to read in a text file and process the data assuming the text file contains two columns: One column with the 1D measurements and one with localization standard deviation vs. time estimates. Code chunk below sets up some default variables (tunable values indicated by comments below). Note that for multivariate signals, chunks below can readily be modified to process x/y or x/y/z measurements separately. Future work will address estimating 2D/3D models with the MBF (computational [not theoretical] issues exists in this case); however, the code currently provides diagnostic information to determine if unmodeled multivariate interaction effects are important (see main paper and Calderon, Weiss, Moerner, PRE 2014) Plot examles from other notebooks can be used to explore output within this notbook or another. Next, a simple example of "Batch" processing is illustrated.
###Code
filenameBase='./ExampleData/MyTraj_' #assume all trajectory files have this prefix (adjust file location accordingly)
N=20 #set the number of trajectories to read.
delta = 25./1000. #user must specify the time (in seconds) between observations. code provided assumes uniform continuous illumination and
#NOTE: in this simple example, all trajectories assumed to be collected with exposure time delta input above
#now loop over trajectories and store MLE results
resBatch=[] #variable for storing MLE output
#loop below just copies info from cell below (only difference is file to read is modified on each iteration of the loop)
for i in range(N):
filei = filenameBase + str(i+1) + '.txt'
print ''
print '^'*100
print 'Reading in file: ', filei
#first load the sample data stored in text file. here we assume two columns of numerica data (col 1 are measurements)
data = np.loadtxt(filei)
(T,ncol)=data.shape
#above we just used a simple default text file reader; however, any means of extracting the data and
#casting it to a Tx2 array (or Tx1 if no localization accuracy info available) will work.
ymeas = data[:,0]
locStdGuess = data[:,1] #if no localization info avaible, just set this to zero or a reasonable estimate of localization error [in nm]
Dguess = 0.1 #input a guess of the local diffusion coefficient of the trajecotry to seed the MLE searches (need not be accurate)
velguess = np.mean(np.diff(ymeas))/delta #input a guess of the velocity of the trajecotry to seed the MLE searches (need not be accurate)
MA=findBerglundVersionOfMA1.CostFuncMA1Diff(ymeas,delta) #construct an instance of the Berglund estimator
res = spo.minimize(MA.evalCostFuncVel, (np.sqrt(Dguess),np.median(locStdGuess),velguess), method='nelder-mead')
#output Berglund estimation result.
print 'Berglund MLE',res.x[0]*np.sqrt(2),res.x[1],res.x[-1]
print '-'*100
#obtain crude estimate of mean reversion parameter. see Calderon, PRE (2013)
kappa1 = np.log(np.sum(ymeas[1:]*ymeas[0:-1])/(np.sum(ymeas[0:-1]**2)-T*res.x[1]**2))/-delta
#construct an instance of the MBF estimator
BlurF = MotionBlurFilter.ModifiedKalmanFilter1DwithCrossCorr(ymeas,delta,StaticErrorEstSeq=locStdGuess)
#use call below if no localization info avaible
# BlurF = MotionBlurFilter.ModifiedKalmanFilter1DwithCrossCorr(ymeas,delta)
parsIG=np.array([np.abs(kappa1),res.x[0]*np.sqrt(2),res.x[1],res.x[-1]]) #kick off MLE search with "warm start" based on simpler model
#kick off nonlinear cost function optimization given data and initial guess
resBlur = spo.minimize(BlurF.evalCostFunc,parsIG, method='nelder-mead')
print 'parsIG for Motion Blur filter',parsIG
print 'Motion Blur MLE result:',resBlur
#finally evaluate diagnostic statistics at MLE just obtained
loglike,xfilt,pit,Shist =BlurF.KFfilterOU1d(resBlur.x)
print np.mean(pit),np.std(pit)
print 'crude assessment of model: check above mean is near 0.5 and std is approximately',np.sqrt(1/12.)
print 'statements above based on generalized residual U[0,1] shape'
print 'other hypothesis tests outlined which can use PIT sequence above outlined/referenced in paper.'
#finally just store the MLE of the MBF in a list
resBatch.append(resBlur.x)
#Summarize the results of the above N simulations
#
resSUM=np.array(resBatch)
print 'Blur medians',np.median(resSUM[:,0]),np.median(resSUM[:,1]),np.median(resSUM[:,2]),np.median(resSUM[:,3])
print 'means',np.mean(resSUM[:,0]),np.mean(resSUM[:,1]),np.mean(resSUM[:,2]),np.mean(resSUM[:,3])
print 'std',np.std(resSUM[:,0]),np.std(resSUM[:,1]),np.std(resSUM[:,2]),np.std(resSUM[:,3])
print '^'*100 ,'\n\n'
###Output
Blur medians 1.27208496571 0.436395237852 0.0162757902936 0.0759079000468
means 1.32234434672 0.434561850166 0.0160360992158 0.0655553122287
std 0.380912845778 0.0234327797792 0.00299196795637 0.182354637945
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
tutorials/week3_pandas/obspy.ipynb | ###Markdown
Introduction to obspy The obspy package is very useful to download seismic data and to do some signal processing on them. Most signal processing methods are based on the signal processing method in the Python package scipy. First we import useful packages.
###Code
import obspy
import obspy.clients.earthworm.client as earthworm
import obspy.clients.fdsn.client as fdsn
from obspy import read
from obspy import read_inventory
from obspy import UTCDateTime
from obspy.core.stream import Stream
from obspy.signal.cross_correlation import correlate
import matplotlib.pyplot as plt
import numpy as np
import os
import urllib.request
%matplotlib inline
###Output
_____no_output_____
###Markdown
We are going to download data from an array of seismic stations.
###Code
network = 'XU'
arrayName = 'BS'
staNames = ['BS01', 'BS02', 'BS03', 'BS04', 'BS05', 'BS06', 'BS11', 'BS20', 'BS21', 'BS22', 'BS23', 'BS24', 'BS25', \
'BS26', 'BS27']
chaNames = ['SHE', 'SHN', 'SHZ']
staCodes = 'BS01,BS02,BS03,BS04,BS05,BS06,BS11,BS20,BS21,BS22,BS23,BS24,BS25,BS26,BS27'
chans = 'SHE,SHN,SHZ'
###Output
_____no_output_____
###Markdown
We also need to define the time period for which we want to download data.
###Code
myYear = 2010
myMonth = 8
myDay = 17
myHour = 6
TDUR = 2 * 3600.0
Tstart = UTCDateTime(year=myYear, month=myMonth, day=myDay, hour=myHour)
Tend = Tstart + TDUR
###Output
_____no_output_____
###Markdown
We start by defining the client for downloading the data
###Code
fdsn_client = fdsn.Client('IRIS')
###Output
_____no_output_____
###Markdown
Download the seismic data for all the stations in the array.
###Code
Dtmp = fdsn_client.get_waveforms(network=network, station=staCodes, location='--', channel=chans, starttime=Tstart, \
endtime=Tend, attach_response=True)
###Output
_____no_output_____
###Markdown
Some stations did not record the entire two hours. We delete these and keep only stations with a complte two hour recording.
###Code
ntmp = []
for ksta in range(0, len(Dtmp)):
ntmp.append(len(Dtmp[ksta]))
ntmp = max(set(ntmp), key=ntmp.count)
D = Dtmp.select(npts=ntmp)
###Output
_____no_output_____
###Markdown
This is a function for plotting after each operation on the data.
###Code
def plot_2hour(D, channel, offset, title):
""" Plot seismograms
D = Stream
channel = 'E', 'N', or 'Z'
offset = Offset between two stations
title = Title of the figure
"""
fig, ax = plt.subplots(figsize=(15, 10))
Dplot = D.select(component=channel)
t = (1.0 / Dplot[0].stats.sampling_rate) * np.arange(0, Dplot[0].stats.npts)
for ksta in range(0, len(Dplot)):
plt.plot(t, ksta * offset + Dplot[ksta].data, 'k')
plt.xlim(np.min(t), np.max(t))
plt.ylim(- offset, len(Dplot) * offset)
plt.title(title, fontsize=24)
plt.xlabel('Time (s)', fontsize=24)
ax.set_yticklabels([])
ax.tick_params(labelsize=20)
plot_2hour(D, 'E', 1200.0, 'Downloaded data')
###Output
_____no_output_____
###Markdown
We start by detrending the data.
###Code
D
D.detrend(type='linear')
plot_2hour(D, 'E', 1200.0, 'Detrended data')
###Output
_____no_output_____
###Markdown
We then taper the data.
###Code
D.taper(type='hann', max_percentage=None, max_length=5.0)
plot_2hour(D, 'E', 1200.0, 'Tapered data')
###Output
_____no_output_____
###Markdown
And we remove the instrment response.
###Code
D.remove_response(output='VEL', pre_filt=(0.2, 0.5, 10.0, 15.0), water_level=80.0)
plot_2hour(D, 'E', 1.0e-6, 'Deconvolving the instrument response')
###Output
_____no_output_____
###Markdown
Then we filter the data.
###Code
D.filter('bandpass', freqmin=2.0, freqmax=8.0, zerophase=True)
plot_2hour(D, 'E', 1.0e-6, 'Filtered data')
###Output
_____no_output_____
###Markdown
And we resample the data.
###Code
D.interpolate(100.0, method='lanczos', a=10)
D.decimate(5, no_filter=True)
plot_2hour(D, 'E', 1.0e-6, 'Resampled data')
###Output
_____no_output_____
###Markdown
We can also compute the envelope of the signal.
###Code
for index in range(0, len(D)):
D[index].data = obspy.signal.filter.envelope(D[index].data)
plot_2hour(D, 'E', 1.0e-6, 'Envelope')
###Output
_____no_output_____
###Markdown
You can also download the instrument response separately:
###Code
network = 'XQ'
station = 'ME12'
channels = 'BHE,BHN,BHZ'
location = '01'
###Output
_____no_output_____
###Markdown
This is to download the instrument response.
###Code
fdsn_client = fdsn.Client('IRIS')
inventory = fdsn_client.get_stations(network=network, station=station, level='response')
inventory.write('response/' + network + '_' + station + '.xml', format='STATIONXML')
###Output
_____no_output_____
###Markdown
We then read the data and start precessing the signal as we did above.
###Code
fdsn_client = fdsn.Client('IRIS')
Tstart = UTCDateTime(year=2008, month=4, day=1, hour=4, minute=49)
Tend = UTCDateTime(year=2008, month=4, day=1, hour=4, minute=50)
D = fdsn_client.get_waveforms(network=network, station=station, location=location, channel=channels, starttime=Tstart, endtime=Tend, attach_response=False)
D.detrend(type='linear')
D.taper(type='hann', max_percentage=None, max_length=5.0)
###Output
_____no_output_____
###Markdown
But we now use the xml file that contains the instrment response to remove it from the signal.
###Code
filename = 'response/' + network + '_' + station + '.xml'
inventory = read_inventory(filename, format='STATIONXML')
D.attach_response(inventory)
D.remove_response(output='VEL', pre_filt=(0.2, 0.5, 10.0, 15.0), water_level=80.0)
###Output
_____no_output_____
###Markdown
We resume signal processing.
###Code
D.filter('bandpass', freqmin=2.0, freqmax=8.0, zerophase=True)
D.interpolate(100.0, method='lanczos', a=10)
D.decimate(5, no_filter=True)
###Output
_____no_output_____
###Markdown
And we plot.
###Code
t = (1.0 / D[0].stats.sampling_rate) * np.arange(0, D[0].stats.npts)
plt.plot(t, D[0].data, 'k')
plt.xlim(np.min(t), np.max(t))
plt.title('Single waveform', fontsize=18)
plt.xlabel('Time (s)', fontsize=18)
###Output
_____no_output_____
###Markdown
Not all seismic data are stored on IRIS. This is an example of how to download data from the Northern California Earthquake Data Center (NCEDC).
###Code
network = 'BK'
station = 'WDC'
channels = 'BHE,BHN,BHZ'
location = '--'
###Output
_____no_output_____
###Markdown
This is to download the instrument response.
###Code
url = 'http://service.ncedc.org/fdsnws/station/1/query?net=' + network + '&sta=' + station + '&level=response&format=xml&includeavailability=true'
s = urllib.request.urlopen(url)
contents = s.read()
file = open('response/' + network + '_' + station + '.xml', 'wb')
file.write(contents)
file.close()
###Output
_____no_output_____
###Markdown
And this is to download the data.
###Code
Tstart = UTCDateTime(year=2007, month=2, day=12, hour=1, minute=11, second=54)
Tend = UTCDateTime(year=2007, month=2, day=12, hour=1, minute=12, second=54)
request = 'waveform_' + station + '.request'
file = open(request, 'w')
message = '{} {} {} {} '.format(network, station, location, channels) + \
'{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d} '.format( \
Tstart.year, Tstart.month, Tstart.day, Tstart.hour, Tstart.minute, Tstart.second) + \
'{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}\n'.format( \
Tend.year, Tend.month, Tend.day, Tend.hour, Tend.minute, Tend.second)
file.write(message)
file.close()
miniseed = 'station_' + station + '.miniseed'
request = 'curl -s --data-binary @waveform_' + station + '.request -o ' + miniseed + ' http://service.ncedc.org/fdsnws/dataselect/1/query'
os.system(request)
D = read(miniseed)
D.detrend(type='linear')
D.taper(type='hann', max_percentage=None, max_length=5.0)
filename = 'response/' + network + '_' + station + '.xml'
inventory = read_inventory(filename, format='STATIONXML')
D.attach_response(inventory)
D.remove_response(output='VEL', pre_filt=(0.2, 0.5, 10.0, 15.0), water_level=80.0)
D.filter('bandpass', freqmin=2.0, freqmax=8.0, zerophase=True)
D.interpolate(100.0, method='lanczos', a=10)
D.decimate(5, no_filter=True)
t = (1.0 / D[0].stats.sampling_rate) * np.arange(0, D[0].stats.npts)
plt.plot(t, D[0].data, 'k')
plt.xlim(np.min(t), np.max(t))
plt.title('Single waveform', fontsize=18)
plt.xlabel('Time (s)', fontsize=18)
###Output
_____no_output_____ |
notebooks/Step-by-step user guide on train.py.ipynb | ###Markdown
train.py: What it does step by stepThis tutorial will break down what train.py does when it is run, and illustrate the functionality of some of the custom 'utils' functions that are called during a training run, in a way that is easy to understand and follow.Note that parts of the functionality of train.py depend on the config.json file you are using. This tutorial is self-contained, and doesn't use a config file, but for more information on working with this file when using ProLoaF, see [this explainer](https://acs.pages.rwth-aachen.de/public/automation/plf/proloaf/docs/files-and-scripts/config/). Before proceeding to any of the sections below, please run the following code block:
###Code
import os
import sys
sys.path.append("../")
import pandas as pd
import utils.datahandler as dh
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Table of contents:[1. Dealing with missing values in the data](1.-Dealing-with-missing-values-in-the-data)[2. Selecting and scaling features](2.-Selecting-and-scaling-features)[3. Creating a dataframe to log training results](3.-Creating-a-dataframe-to-log-training-results)[4. Exploration](4.-Exploration)[5. Main run - creating the training model](5.-Main-run---creating-the-training-model)[6. Main run - training the model](6.-Main-run---training-the-model)[7. Updating the config, Saving the model & logs](7.-Updating-the-config,-saving-the-model-&-logs) 1. Dealing with missing values in the dataThe first thing train.py does after loading the dataset that was specified in your config file, is to check for any missing values, and fill them in as necessary. It does this using the function 'utils.datahandler.fill_if_missing'. In the following example, we will load some data that has missing values and examine what the 'fill_if_missing' function does. Please run the code block below to get started.
###Code
#Load the data sample and prep for use with datahandler functions
df = pd.read_csv("../data/fill_missing.csv", sep=";")
df['Time'] = pd.to_datetime(df['Time'])
df = df.set_index('Time')
df = df.astype(float)
df_missing_range = df.copy()
#Plot the data
df.iloc[0:194].plot(kind='line',y='DE_load_actual_entsoe_transparency', figsize = (12, 6), xlabel='Hours', use_index = False)
###Output
_____no_output_____
###Markdown
As should be clearly visible in the plot above, the data has some missing values. There is a missing range (a range refers to multiple adjacent values), from around 96-121, as well as two individual values that are missing, at 160 and 192. Please run the code block below to see how 'fill_if_missing' deals with these problems.
###Code
#Use fill_if_missing and plot the results
df=dh.fill_if_missing(df, periodicity=24)
df.iloc[0:192].plot(kind='line',y='DE_load_actual_entsoe_transparency', figsize = (12, 6), use_index = False)
#TODO: Test this again once interpolation is working
###Output
_____no_output_____
###Markdown
As we can see by the printed console messages, fill_if_missing first checks whether there are any missing values. If there are, it checks whether they are individual values or ranges, and handles these cases differently: Single missing values:These are simply replaced by the average of the values on either side. Missing range:If a range of values is missing, fill_if_missing will use the specified periodicity of the data to provide an estimate of the missing values, by averaging the ranges on either side of the missing range and then adapting the new values to fit the trend. If not specified, the periodicity has a default value of 1, but since we are using hourly data, we will use a periodicity of p = 24.For each missing value at a given position t in the range, fill_if_missing first searches backwards through the data at intervals equal to the periodicity of the data (i.e. t1 = t - 24\*n, n = 1, 2,...) until it finds an existing value. It then does the same thing searching forwards through the data (i.e. t2 = t + 24\*n, n = 1, 2,...), and then it sets the value at t equal to the average of t1 and t2. Run the code block below to see the result for the missing range at 95-121:
###Code
start = 95
end = 121
p = 24
seas = np.zeros(len(df_missing_range))
#fill the missing values
for t in range(start, end + 1):
p1 = p
p2 = p
while np.isnan(df_missing_range.iloc[t - p1, 0]):
p1 += p
while np.isnan(df_missing_range.iloc[t + p2, 0]):
p2 += p
seas[t] = (df_missing_range.iloc[t - p1, 0] + df_missing_range.iloc[t + p2, 0]) / 2
#plot the result
ax = plt.gca()
df_missing_range["Interpolated"] = pd.Series(len(seas))
for t in range(start, end + 1):
df_missing_range.iloc[t, 1] = seas[t]
df_missing_range.iloc[0:192].plot(kind='line',y='DE_load_actual_entsoe_transparency', figsize = (12, 6), use_index = False, ax = ax)
df_missing_range.iloc[0:192].plot(kind='line',y='Interpolated', figsize = (12, 6), use_index = False, ax = ax)
###Output
_____no_output_____
###Markdown
The missing values in the range between 95 and 121 have now been filled in, but the end points aren't continuous with the original data, and the new values don't take into account the trend in the data. To deal with this, the function uses the difference in slope between the start and end points of the missing data range, and the start and end points of the newly interpolated values, to offset the new values so that they line up with the original data:
###Code
print("Create two straight lines that connect the interpolated start and end points, and the original start and end points.\nThese capture the 'trend' in each case over the missing section")
trend1 = np.poly1d(
np.polyfit([start, end], [seas[start], seas[end]], 1)
)
trend2 = np.poly1d(
np.polyfit(
[start - 1, end + 1],
[df_missing_range.iloc[start - 1, 0], df_missing_range.iloc[end + 1, 0]],
1,
)
)
#by subtracting the trend of the interpolated data, then adding the trend of the original data, we match the filled in
#values to what we had before
for t in range(start, end + 1):
df_missing_range.iloc[t, 1] = seas[t] - trend1(t) + trend2(t)
#plot the result
ax = plt.gca()
df_missing_range.iloc[0:192].plot(kind='line',y='DE_load_actual_entsoe_transparency', figsize = (12, 6), use_index = False, ax = ax)
df_missing_range.iloc[0:192].plot(kind='line',y='Interpolated', figsize = (12, 6), use_index = False, ax = ax)
###Output
Create two straight lines that connect the interpolated start and end points, and the original start and end points.
These capture the 'trend' in each case over the missing section
###Markdown
**Please note:**- Missing data ranges at the beginning or end of the data are handled differently (TODO: Explain how)- Though the examples shown here use a single column for simplicity's sake, fill_if_missing automatically works on every column (feature) of your original dataframe. 2. Selecting and scaling featuresThe next thing train.py does is to select and scale features in the data as specified in the relevant config file, using the function 'utils.datahandler.scale_all'.Consider the following dataset:
###Code
#Load and then plot the new dataset
df_to_scale = pd.read_csv("../data/opsd.csv", sep=";", index_col=0)
df_to_scale.plot(kind='line',y='AT_load_actual_entsoe_transparency', figsize = (8, 4), use_index = False)
df_to_scale.plot(kind='line',y='AT_temperature', figsize = (8, 4), use_index = False)
df_to_scale.head()
###Output
_____no_output_____
###Markdown
The above dataset has 55 features (columns), some of which are at totally different scales, as is clearly visible when looking at the y-axes of the above graphs for load and temperature data from Austria. Depending on our dataset, we may not want to use all of the available features for training. If we wanted to select only the two features highlighted above for training, we could do so by editing the value at the "feature_groups" key in the config.json, which takes the form of a list of dicts like the one below:
###Code
two_features = [
{
"name": "main",
"scaler": [
"minmax",
-1.0,
1.0
],
"features": [
"AT_load_actual_entsoe_transparency",
"AT_temperature"
]
}
]
###Output
_____no_output_____
###Markdown
Each dict in the list represents a feature group, and should have the following keys:- "name" - the name of the feature group- "scaler" - the scaler used by this feature group (value: a list with entries for scaler name and scaler specific attributes.) Valid scaler names include 'standard', 'robust' or 'minmax'. For more information on these scalers and their use, please see the [scikit-learn documentation](https://scikit-learn.org/stable/modules/preprocessing.htmlpreprocessing) or [the documentation for scale_all](https://acs.pages.rwth-aachen.de/public/automation/plf/proloaf/reference/proloaf/proloaf/utils/datahandler.htmlscale_all)- "features" - which features are to be included in the group (value: a list containing the feature names)The 'scale_all' function will only return the selected features, scaled using the scaler assigned to their feature group.Here we only have one group, 'main', which uses the 'minmax' scaler:
###Code
#Select, scale and plot the features as specified by the two_features list (see above)
selected_features, scalers = dh.scale_all(df_to_scale, two_features)
selected_features.plot(figsize = (12, 6), use_index = False)
print("Currently used scalers:")
print(scalers)
###Output
Currently used scalers:
{'main': MinMaxScaler(feature_range=(-1.0, 1.0))}
###Markdown
As you can see, both of our features (load and temperature for Austria) have now been scaled to fit within the same range (between -1 and 1).Let's say we also wanted to include the weekday data from the data set in our training. Let us first take a look at what the weekday features look like. Here are the first 500 hours (approx. 3 weeks) of weekday_0:
###Code
df_to_scale[:500].plot(kind='line',y='weekday_0', figsize = (12, 4), use_index = False)
###Output
_____no_output_____
###Markdown
As we can see, these features are already within the range [0,1] and thus don't need to be scaled. So we can include them in a second feature group called 'aux'. Note, features which we deliberately aren't scaling should go in a group with this name.The value of the "feature_groups" key in the config.json could then look like this:
###Code
feature_groups = [
{
"name": "main",
"scaler": [
"minmax",
0.0,
1.0
],
"features": [
"AT_load_actual_entsoe_transparency",
"AT_temperature"
]
},
{
"name": "aux",
"scaler": None,
"features": [
"weekday_0",
"weekday_1",
"weekday_2",
"weekday_3",
"weekday_4",
"weekday_5",
"weekday_6"
]
}
]
###Output
_____no_output_____
###Markdown
We now have two feature groups, 'main' (which uses the 'minmax' scaler, this time with a range between 0 and 1) and 'aux' (which uses no scaler):
###Code
#Select, scale and plot the features as specified by feature_groups (see above)
selected_features, scalers = dh.scale_all(df_to_scale,feature_groups)
selected_features[23000:28000].plot(figsize = (12, 6), use_index = False)
print("Currently used scalers:")
print(scalers)
###Output
Currently used scalers:
{'main': MinMaxScaler(feature_range=(0.0, 1.0)), 'aux': None}
###Markdown
We can see that all of our selected features now fit between 0 and 1. From this point onward, train.py will only work with our selected, scaled features.
###Code
print("Currently selected and scaled features: ")
print(selected_features.columns)
###Output
Currently selected and scaled features:
Index(['AT_load_actual_entsoe_transparency', 'AT_temperature', 'weekday_0',
'weekday_1', 'weekday_2', 'weekday_3', 'weekday_4', 'weekday_5',
'weekday_6'],
dtype='object')
###Markdown
Selecting scalersWhen selecting which scalers to use, it is important that whichever one we choose does not adversely affect the shape of the distribution of our data, as this would distort our results. For example, this is the distribution of the feature "AT_load_actual_entsoe_transparency" before scaling:
###Code
df_unscaled = pd.read_csv("../data/opsd.csv", sep=";", index_col=0)
df_unscaled["AT_load_actual_entsoe_transparency"].plot.kde()
###Output
_____no_output_____
###Markdown
And this is the distribution after scaling using the minmax scaler, as we did above:
###Code
selected_features["AT_load_actual_entsoe_transparency"].plot.kde()
###Output
_____no_output_____
###Markdown
It is clear that in both cases, the distribution functions have a similar shape. The axes are scaled differently, but both graphs have maxima to the right of zero. On the other hand, this is what the distribution looks like if we use the robust scaler on this data:
###Code
feature_robust = [
{
"name": "main",
"scaler": [
"robust",
0.25,
0.75
],
"features": [
"AT_load_actual_entsoe_transparency",
]
}
]
selected_feat_robust, scalers_robust = dh.scale_all(df_to_scale,feature_robust)
selected_feat_robust["AT_load_actual_entsoe_transparency"].plot.kde()
###Output
_____no_output_____
###Markdown
Not only have the axes been scaled, but the data has also been shifted so that the maxima are centered around zero. The same problem can be observed with the "standard" scaler:
###Code
feature_std = [
{
"name": "main",
"scaler": [
"standard"
],
"features": [
"AT_load_actual_entsoe_transparency",
]
}
]
selected_feat_std, scalers_std = dh.scale_all(df_to_scale,feature_std)
selected_feat_std["AT_load_actual_entsoe_transparency"].plot.kde()
###Output
_____no_output_____ |
Part1-Data-Preprocessing/Section 3 - Data Preprocessing in Python/Python/data_preprocessing_tools.ipynb | ###Markdown
Data Preprocessing Tools Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
dataset
dataset.shape
print(X)
print(y)
###Output
['No' 'Yes' 'No' 'No' 'Yes' 'Yes' 'No' 'Yes' 'No' 'Yes']
###Markdown
Taking care of missing data
###Code
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
print(X)
###Output
[['France' 44.0 72000.0]
['Spain' 27.0 48000.0]
['Germany' 30.0 54000.0]
['Spain' 38.0 61000.0]
['Germany' 40.0 63777.77777777778]
['France' 35.0 58000.0]
['Spain' 38.77777777777778 52000.0]
['France' 48.0 79000.0]
['Germany' 50.0 83000.0]
['France' 37.0 67000.0]]
###Markdown
Encoding categorical data Encoding the Independent Variable
###Code
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
print(X)
###Output
[[1.0 0.0 0.0 44.0 72000.0]
[0.0 0.0 1.0 27.0 48000.0]
[0.0 1.0 0.0 30.0 54000.0]
[0.0 0.0 1.0 38.0 61000.0]
[0.0 1.0 0.0 40.0 63777.77777777778]
[1.0 0.0 0.0 35.0 58000.0]
[0.0 0.0 1.0 38.77777777777778 52000.0]
[1.0 0.0 0.0 48.0 79000.0]
[0.0 1.0 0.0 50.0 83000.0]
[1.0 0.0 0.0 37.0 67000.0]]
###Markdown
Encoding the Dependent Variable
###Code
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
print(y)
###Output
[0 1 0 0 1 1 0 1 0 1]
###Markdown
Splitting the dataset into the Training set and Test set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
print(X_train)
print(X_test)
print(y_train)
print(y_test)
###Output
[0 1]
###Markdown
Feature Scaling
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, 3:] = sc.fit_transform(X_train[:, 3:])
X_test[:, 3:] = sc.transform(X_test[:, 3:])
print(X_train)
print(X_test)
###Output
[[0.0 1.0 0.0 -1.4661817944830124 -0.9069571034860727]
[1.0 0.0 0.0 -0.44973664397484414 0.2056403393225306]]
|
ipynb/US-Kansas.ipynb | ###Markdown
United States: Kansas* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Kansas", weeks=5);
overview(country="US", region="Kansas");
compare_plot(country="US", region="Kansas");
# load the data
cases, deaths = get_country_data("US", "Kansas")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
United States: Kansas* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Kansas");
# load the data
cases, deaths, region_label = get_country_data("US", "Kansas")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
United States: Kansas* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Kansas", weeks=5);
overview(country="US", region="Kansas");
compare_plot(country="US", region="Kansas");
# load the data
cases, deaths = get_country_data("US", "Kansas")
# get population of the region for future normalisation:
inhabitants = population(country="US", region="Kansas")
print(f'Population of country="US", region="Kansas": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kansas.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____ |
dev/21_vision_learner.ipynb | ###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
a = create_head(5,5)
a
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(types=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_resnet_meta}, models.xresnet.xresnet34: {**_resnet_meta},
models.xresnet.xresnet50 :{**_resnet_meta}, models.xresnet.xresnet101:{**_resnet_meta},
models.xresnet.xresnet152:{**_resnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(types=(PILImage, PILMask),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
This cell doesn't have an export destination and was ignored:
e
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda pretrained : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False, lin_first=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], nc))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#hide
mods = list(tst.children())
test_eq(len(mods), 9)
assert isinstance(mods[2], nn.BatchNorm1d)
assert isinstance(mods[-1], nn.Linear)
tst = create_head(5, 10, lin_first=True)
mods = list(tst.children())
test_eq(len(mods), 8)
assert isinstance(mods[2], nn.Dropout)
#export
from fastai2.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-3, 'split':_xresnet_split }
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda pretrained : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False, lin_first=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], nc))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#hide
mods = list(tst.children())
test_eq(len(mods), 9)
assert isinstance(mods[2], nn.BatchNorm1d)
assert isinstance(mods[-1], nn.Linear)
tst = create_head(5, 10, lin_first=True)
mods = list(tst.children())
test_eq(len(mods), 8)
assert isinstance(mods[2], nn.Dropout)
#export
from local.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-3, 'split':_xresnet_split }
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
learn.fit_one_cycle(4)#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
a = create_head(5,5)
a
tst = create_head(5, 10)
tst
#export
from local.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_resnet_meta}, models.xresnet.xresnet34: {**_resnet_meta},
models.xresnet.xresnet50 :{**_resnet_meta}, models.xresnet.xresnet101:{**_resnet_meta},
models.xresnet.xresnet152:{**_resnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda pretrained : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False, lin_first=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], nc))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#hide
mods = list(tst.children())
test_eq(len(mods), 9)
assert isinstance(mods[2], nn.BatchNorm1d)
assert isinstance(mods[-1], nn.Linear)
tst = create_head(5, 10, lin_first=True)
mods = list(tst.children())
test_eq(len(mods), 8)
assert isinstance(mods[2], nn.Dropout)
#export
from local.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-3, 'split':_xresnet_split }
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 09b_vision_utils.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(types=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(types=(PILImage, PILMask),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda pretrained : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512] if lin_ftrs is None else [nf] + lin_ftrs
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-1) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-1) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn)
layers.append(nn.Linear(lin_ftrs[-1], nc))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
from local.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-3, 'split':_xresnet_split }
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 71_callback_tensorboard.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
Converted xse_resnext.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512] if lin_ftrs is None else [nf] + lin_ftrs
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-1) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-1) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn)
layers.append(nn.Linear(lin_ftrs[-1], nc))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
from local.callback.hook import num_features_model
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
#TODO: refactor, i.e. something like this?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
#export
def default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
#export
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_resnet_meta}, models.xresnet.xresnet34: {**_resnet_meta},
models.xresnet.xresnet50 :{**_resnet_meta}, models.xresnet.xresnet101:{**_resnet_meta},
models.xresnet.xresnet152:{**_resnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(blocks=(ImageBlock, MaskBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
#export
@typedispatch
def show_results(x:TensorImage, y:(TensorImageBase, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(max_n))]
for x in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(x.itemgot(0),ctxs[1::2],range(max_n))]
return ctxs
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (Str(r), Float(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core_foundation.ipynb.
Converted 01a_core_utils.ipynb.
Converted 01b_core_dispatch.ipynb.
Converted 01c_core_transform.ipynb.
Converted 02_core_script.ipynb.
Converted 03_torchcore.ipynb.
Converted 03a_layers.ipynb.
Converted 04_data_load.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 09a_vision_data.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 70_callback_wandb.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(types=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(types=(PILImage, PILMask),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_data_block.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
a = create_head(5,5)
a
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True, init=nn.init.kaiming_normal_):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
tst = create_cnn_model(models.resnet18, 10, None, True)
#export
@delegates(create_cnn_model)
def cnn_config(**kwargs):
"Convenienc function to easily create a config for `create_cnn_model`"
return kwargs
pets = DataBlock(types=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
batch_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.xresnet.xresnet18 :{**_resnet_meta}, models.xresnet.xresnet34: {**_resnet_meta},
models.xresnet.xresnet50 :{**_resnet_meta}, models.xresnet.xresnet101:{**_resnet_meta},
models.xresnet.xresnet152:{**_resnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
###Output
_____no_output_____
###Markdown
`Learner` convenience functions
###Code
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a convnet style learner"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
###Output
_____no_output_____
###Markdown
The model is built from `arch` using the number of final activation inferred from `dbunch` by `get_c`. It might be `pretrained` and the architecture is cut and split using the default metadata of the model architecture (this can be customized by passing a `cut` or a `splitter`). To customize the model creation, use `cnn_config` and pass the result to the `config` argument.
###Code
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(), config=cnn_config(ps=0.25))
#export
@delegates(models.unet.DynamicUnet.__init__)
def unet_config(**kwargs):
"Convenience function to easily create a config for `DynamicUnet`"
return kwargs
#export
@delegates(Learner.__init__)
def unet_learner(dbunch, arch, loss_func=None, pretrained=True, cut=None, splitter=None, config=None, **kwargs):
"Build a unet learner from `dbunch` and `arch`"
if config is None: config = {}
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, pretrained, ifnone(cut, meta['cut']))
try: size = dbunch.train_ds[0][0].size
except: size = dbunch.one_batch()[0].shape[-2:]
model = models.unet.DynamicUnet(body, get_c(dbunch), size, **config)
learn = Learner(dbunch, model, loss_func=loss_func, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
return learn
camvid = DataBlock(types=(PILImage, PILMask),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}')
dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms())
dbunch.show_batch(max_n=9, vmin=1, vmax=30)
#TODO: Find a way to pass the classes properly
dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str)
learn = unet_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), config=unet_config())
###Output
_____no_output_____
###Markdown
Show functions
###Code
#export
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, rows=None, cols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), rows=rows, cols=cols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_utils.ipynb.
Converted 01b_dispatch.ipynb.
Converted 01c_transform.ipynb.
Converted 02_script.ipynb.
Converted 03_torch_core.ipynb.
Converted 03a_layers.ipynb.
Converted 04_dataloader.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_data_block.ipynb.
Converted 08_vision_core.ipynb.
Converted 09_vision_augment.ipynb.
Converted 10_pets_tutorial.ipynb.
Converted 11_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 14a_callback_data.ipynb.
Converted 15_callback_hook.ipynb.
Converted 15a_vision_models_unet.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 21_vision_learner.ipynb.
Converted 22_tutorial_imagenette.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block_examples.ipynb.
Converted 60_medical_imaging.ipynb.
Converted 65_medical_text.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_notebook_test.ipynb.
Converted 95_index.ipynb.
Converted 96_data_external.ipynb.
Converted 97_utils_test.ipynb.
Converted notebook2jekyll.ipynb.
|
nbs/examples/migrating.ipynb | ###Markdown
Tutorial - Migrating from pure PyTorch> Incrementally adding fastai goodness to your PyTorch models Original PyTorch code Here's the MNIST training code from the official PyTorch examples (slightly reformatted for space, updated from AdaDelta to AdamW, and converted from a script to a notebook). There's a lot of code!
###Code
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx*len(data), len(train_loader.dataset),
100. * batch_idx/len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss,correct = 0,0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss/len(test_loader.dataset), correct, len(test_loader.dataset),
100. * correct/len(test_loader.dataset)))
batch_size,test_batch_size = 256,512
epochs,lr,gamma = 1,1e-2,0.7
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
###Output
_____no_output_____
###Markdown
Use the fastai training loop The most important step is to replace the custom training loop with fastai's. That means you can get rid of `train()`, `test()`, and the epoch loop above, and replace it all with just this:
###Code
data = DataLoaders(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
###Output
_____no_output_____
###Markdown
fastai supports many schedulers. We recommend using 1cycle:
###Code
learn.fit_one_cycle(epochs, lr)
###Output
_____no_output_____
###Markdown
Tutorial - Migrating from pure PyTorch> Incrementally adding fastai goodness to your PyTorch models Original PyTorch code Here's the MNIST training code from the official PyTorch examples (slightly reformatted for space, updated from AdaDelta to AdamW, and converted from a script to a notebook). There's a lot of code!
###Code
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx*len(data), len(train_loader.dataset),
100. * batch_idx/len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss,correct = 0,0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss/len(test_loader.dataset), correct, len(test_loader.dataset),
100. * correct/len(test_loader.dataset)))
batch_size,test_batch_size = 256,512
epochs,lr,gamma = 1,1e-2,0.7
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
###Output
_____no_output_____
###Markdown
Use the fastai training loop The most important step is to replace the custom training loop with fastai's. That means you can get rid of `train()`, `test()`, and the epoch loop above, and replace it all with just this:
###Code
data = DataBunch(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
###Output
_____no_output_____
###Markdown
fastai supports many schedulers. We recommend using 1cycle:
###Code
learn.fit_one_cycle(epochs, lr)
###Output
_____no_output_____
###Markdown
Tutorial - Migrating from pure PyTorch> Incrementally adding fastai goodness to your PyTorch models Original PyTorch code Here's the MNIST training code from the official PyTorch examples (slightly reformatted for space, updated from AdaDelta to AdamW, and converted from a script to a notebook). There's a lot of code!
###Code
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx*len(data), len(train_loader.dataset),
100. * batch_idx/len(train_loader), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss,correct = 0,0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss/len(test_loader.dataset), correct, len(test_loader.dataset),
100. * correct/len(test_loader.dataset)))
batch_size,test_batch_size = 256,512
epochs,lr,gamma = 1,1e-2,0.7
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
###Output
_____no_output_____
###Markdown
Use the fastai training loop The most important step is to replace the custom training loop with fastai's. That means you can get rid of `train()`, `test()`, and the epoch loop above, and replace it all with just this:
###Code
data = DataLoaders(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
###Output
_____no_output_____
###Markdown
fastai supports many schedulers. We recommend using 1cycle:
###Code
learn.fit_one_cycle(epochs, lr)
###Output
_____no_output_____ |
docs/Diagram Widget.ipynb | ###Markdown
Diagram WidgetThe same _renderer_ that powers the [Diagram Document](./Diagram%20Document.ipynb) can be used as a computable _Jupyter Widget_, which offers even more power than the [Diagram Rich Display](./Diagram%20Rich%20Display.ipynb).
###Code
from ipywidgets import HBox, VBox, Textarea, jslink, jsdlink, FloatSlider, IntSlider, Checkbox, Text, SelectMultiple, Accordion
from lxml import etree
from traitlets import observe, link, dlink
from ipydrawio import Diagram
diagram = Diagram(layout=dict(min_height="80vh", flex="1"))
box = HBox([diagram])
box
###Output
_____no_output_____
###Markdown
valueA `Diagram.source`'s `value` trait is the raw drawio XML. You can use one document for multiple diagrams.> [graphviz2drawio](https://pypi.org/project/graphviz2drawio) is recommended for getting to **give me some drawio XML from my data right now**.
###Code
Diagram(source=diagram.source, layout=dict(min_height="400px"))
diagram.source.value = '''<mxfile host="127.0.0.1" modified="2021-01-27T15:56:33.612Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36" etag="u04aDhBnb7c9tLWsiHn9" version="13.6.10">
<diagram id="x" name="Page-1">
<mxGraphModel dx="1164" dy="293" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
<root>
<mxCell id="0"/>
<mxCell id="1" parent="0"/>
<mxCell id="2" value="" style="edgeStyle=entityRelationEdgeStyle;startArrow=none;endArrow=none;segment=10;curved=1;" parent="1" source="4" target="5" edge="1">
<mxGeometry relative="1" as="geometry"/>
</mxCell>
<mxCell id="3" value="" style="edgeStyle=entityRelationEdgeStyle;startArrow=none;endArrow=none;segment=10;curved=1;" parent="1" source="4" target="6" edge="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="260" y="160" as="sourcePoint"/>
</mxGeometry>
</mxCell>
<UserObject label="The Big Idea" treeRoot="1" id="4">
<mxCell style="ellipse;whiteSpace=wrap;html=1;align=center;collapsible=0;container=1;recursiveResize=0;" parent="1" vertex="1">
<mxGeometry x="300" y="140" width="100" height="40" as="geometry"/>
</mxCell>
</UserObject>
<mxCell id="5" value="Branch" style="whiteSpace=wrap;html=1;shape=partialRectangle;top=0;left=0;bottom=1;right=0;points=[[0,1],[1,1]];strokeColor=#000000;fillColor=none;align=center;verticalAlign=bottom;routingCenterY=0.5;snapToPoint=1;collapsible=0;container=1;recursiveResize=0;autosize=1;" parent="1" vertex="1">
<mxGeometry x="460" y="120" width="80" height="20" as="geometry"/>
</mxCell>
<mxCell id="6" value="Sub Topic" style="whiteSpace=wrap;html=1;rounded=1;arcSize=50;align=center;verticalAlign=middle;collapsible=0;container=1;recursiveResize=0;strokeWidth=1;autosize=1;spacing=4;" parent="1" vertex="1">
<mxGeometry x="460" y="160" width="72" height="26" as="geometry"/>
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>'''
value = Textarea(description="value", rows=20)
controls = Accordion([value])
controls.set_title(0, "value")
jslink((diagram.source, "value"), (value, "value"))
box.children = [controls, diagram]
###Output
_____no_output_____
###Markdown
There are a number of challenges in using it as a protocol:- includes hostname (ick!)- includes etag- stripping these out creates flicker when updatingAt present, tools like jinja2, which work directly with XML, or `lxml`, which can work at a higher level, with e.g. XPath. > Stay tuned for better tools for working with this format with e.g. `networkx` Interactive stateA `Diagram` exposes a number of parts of both the content and interactive state of the editor.
###Code
zoom = FloatSlider(description="zoom", min=0.01)
scroll_x, scroll_y = [FloatSlider(description=f"scroll {x}", min=-1e5, max=1e5) for x in "xy"]
current_page = IntSlider(description="page")
jslink((diagram, "zoom"), (zoom, "value"))
jslink((diagram, "scroll_x"), (scroll_x, "value"))
jslink((diagram, "scroll_y"), (scroll_y, "value"))
jslink((diagram, "current_page"), (current_page, "value"))
controls.children = [VBox([zoom, scroll_x, scroll_y, current_page]), value]
controls._titles = {"0": "ui", "1": "value"}
selected_cells = SelectMultiple(description="selected")
enable_selected = Checkbox(True, description="enable select")
def update_selected(*_):
if enable_selected.value:
diagram.selected_cells = [*selected_cells.value]
def update_selected_options(*_):
try:
with selected_cells.hold_trait_notifications():
selected_cells.options = [
cell.attrib["id"]
for cell in etree.fromstring(diagram.source.value).xpath("//mxCell")
if "id" in cell.attrib
]
selected_cells.value = diagram.selected_cells
except:
pass
selected_cells.observe(update_selected, "value")
diagram.source.observe(update_selected_options, "value")
diagram.observe(update_selected_options, "selected_cells")
update_selected_options()
controls.children = [VBox([zoom, scroll_x, scroll_y, current_page]), VBox([enable_selected, selected_cells]), value]
controls._titles = {"0": "ui", "1": "selection", "2": "value"}
HBox([enable_selected, selected_cells])
###Output
_____no_output_____
###Markdown
Page Information`Diagrams` actually describe a "real thing", measured in inches.
###Code
page_format = {
k: IntSlider(description=k, value=v, min=0, max=1e5)
for k,v in diagram.page_format.items()
}
def update_format(*_):
diagram.page_format = {
k: v.value for k, v in page_format.items()
}
def update_sliders(*_):
for k, v in page_format.items():
v.value = diagram.page_format[k]
[v.observe(update_format, "value") for k, v in page_format.items()]
[diagram.observe(update_sliders, "page_format")]
controls.children = [VBox([zoom, scroll_x, scroll_y, current_page]), VBox([enable_selected, selected_cells]), VBox([*page_format.values()]), value]
controls._titles = {"0": "ui", "1": "selection", "2": "page", "3": "value"}
###Output
_____no_output_____
###Markdown
GridThe styling of the on-screen grid is cutomizable. This typically _won't_ be included in export to e.g. SVG.
###Code
grid_enabled = Checkbox(description="grid")
grid_size = FloatSlider(description="grid size")
grid_color = Text("#66666666", description="grid color")
jslink((diagram, "grid_enabled"), (grid_enabled, "value"))
jslink((diagram, "grid_size"), (grid_size, "value"))
jslink((diagram, "grid_color"), (grid_color, "value"))
controls.children = [VBox([zoom, scroll_x, scroll_y, current_page]), VBox([enable_selected, selected_cells]), VBox([*page_format.values()]), VBox([ grid_enabled, grid_size, grid_color]), value]
controls._titles = {"0": "ui", "1": "selection", "2": "page", "3":"grid", "4": "value"}
###Output
_____no_output_____ |
genmod/vanilla_autoencoder/vanilla_autoencoder.ipynb | ###Markdown
The Basic Idea of Machine-learning Imagine a monkey drawing on a canvas (say, of `128 * 128` pixels). What's the probability that it draw a human-face? Almost none, isn't it. This implies that* the manifold of human-face involved in $\mathbb{R}^{128 \times 128}$ has relatively much smaller dimensions.* Even, the manifold is spares.To see this, imagine you modify the background of a painting with a human-face in the foreground, the points in $\mathbb{R}^{128 \times 128}$ before and after the modification are generally far from each other. Thus, the task of machine-learning is to find out the low-dimensional spares manifold, mapping the manifold to a lower dimensional compact space, and mapping the element there back to generate real-world object, like painting. We call the real-world object "observable", and the low-dimensional spares manifold "latent" space. This serves both to data-compression and data-abstraction. In fact, these are two aspects of one thing: the probability distribution of data (which we will talk in the next topic). Auto-encoder Conceptions This basic idea naturally forces to "auto-encoder", which has two parts:1. Encoder: mapping the observable to latent.2. Decoder: mapping the latent to observable. Let $X$ the space of observable, and $Z$ the latent. Let $f: X \mapsto Z$ denotes the encoder, and $g: Z \mapsto X$ the decoder. Then, for $\forall x \in X$, we would expect\begin{equation} g \circ f(x) \approx x.\end{equation}To numerically characterize this approximation, let $d_{\text{obs}}$ some pre-defined distance in the space of observable, we can define loss\begin{equation} \mathcal{L}_{\text{recon}} = \frac{1}{|D|} \sum_{x \in D} d_{\text{obs}} \left(x, g \circ f (x) \right).\end{equation}We call this "reconstruction" loss, since $g \circ f (x)$ is a reconstruction of $x$. For ensuring the compactness of the latent, an additional regularizer is added to the reconstruction loss, by some pre-defined distance in the latant space $d_{\text{lat}}$. Thus, the total loss is\begin{equation} \mathcal{L} = \frac{1}{|D|} \sum_{x \in D} d_{\text{obs}} \left(x, g \circ f (x) \right) + d_{\text{lat}} \left( f(x), 0 \right).\end{equation} The task is thus to find the functions $f$ and $g$ that minimize the total loss. This utilizes the universality property of neural network. Reference: 1. [Wikipedia](https://en.wikipedia.org/wiki/Autoencoder). Implementation
###Code
%matplotlib inline
from IPython.display import display
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data_path = '../../dat/MNIST/'
mnist = input_data.read_data_sets(
data_path, one_hot=True,
source_url='http://yann.lecun.com/exdb/mnist/')
def get_encoder(latent_dim, hidden_layers):
def encoder(observable, name='encoder', reuse=None):
with tf.variable_scope(name, reuse=reuse):
hidden = observable
for hidden_layer in hidden_layers:
hidden = tf.layers.dense(hidden, hidden_layer,
activation=tf.nn.relu)
latent = tf.layers.dense(hidden, latent_dim, activation=None)
return latent
return encoder
def get_decoder(observable_dim, hidden_layers):
def decoder(latent, name='decoder', reuse=None):
with tf.variable_scope(name, reuse=reuse):
hidden = latent
for hidden_layer in hidden_layers:
hidden = tf.layers.dense(hidden, hidden_layer,
activation=tf.nn.relu)
reconstructed = tf.layers.dense(hidden, observable_dim,
activation=tf.nn.sigmoid)
return reconstructed
return decoder
def get_loss(observable, encoder, decoder, regularizer=None, reuse=None):
if regularizer is None:
regularizer = lambda latent: 0.0
with tf.name_scope('loss'):
# shape: [batch_size, latent_dim]
latent = encoder(observable, reuse=reuse)
# shape: [batch_size, observable_dim]
reconstructed = decoder(latent, reuse=reuse)
# shape: [batch_size]
squared_errors = tf.reduce_sum(
(reconstructed - observable) ** 2,
axis=1)
mean_square_error = tf.reduce_mean(squared_errors)
return mean_square_error + regularizer(latent)
latent_dim = 64
encoder = get_encoder(latent_dim=latent_dim,
hidden_layers=[512, 256, 128])
decoder = get_decoder(observable_dim=28*28,
hidden_layers=[128, 256, 512])
observable = tf.placeholder(shape=[None, 28*28],
dtype='float32',
name='observable')
latent_samples = tf.placeholder(shape=[None, latent_dim],
dtype='float32',
name='latent_samples')
generated = decoder(latent_samples, reuse=tf.AUTO_REUSE)
def regularizer(latent, name='regularizer'):
with tf.name_scope(name):
distances = tf.reduce_sum(latent ** 2, axis=1)
return tf.reduce_mean(distances)
loss = get_loss(observable, encoder, decoder,
regularizer=regularizer,
reuse=tf.AUTO_REUSE)
optimizer = tf.train.AdamOptimizer(epsilon=1e-3)
train_op = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
loss_vals = []
for i in tqdm(range(100000)):
X, y = mnist.train.next_batch(batch_size=128)
_, loss_val = sess.run([train_op, loss], {observable: X})
if np.isnan(loss_Xy_val):
raise ValueError('Loss has been NaN.')
loss_vals.append(loss_val)
print('Final loss:', np.mean(loss_vals[-100:]))
plt.plot(loss_vals)
plt.xlabel('steps')
plt.ylabel('loss')
plt.show()
def get_image(array):
"""
Args:
array: Numpy array with shape `[28*28]`.
Returns:
An image.
"""
array = 255 * array
array = array.reshape([28, 28])
array = array.astype(np.uint8)
return Image.fromarray(array)
latent_sample_vals = np.random.normal(size=[128, latent_dim])
generated_vals = sess.run(generated, {latent_samples: latent_sample_vals})
# Display the results
n_display = 5
for i in range(n_display):
print('Gnerated:')
display(get_image(generated_vals[i]))
print()
###Output
Gnerated:
|
Microcanonical_NN/Tensorflow_microlocal/Convolutional_microlocal_tf.ipynb | ###Markdown
Microlocal analysis of the convolutional layers in LPD Utilitary libraries
###Code
%matplotlib inline
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import sys
sys.path.append("../../WF_inpaint/")
# Import the needed modules
from data.data_factory import random_realphantom
from ellipse.ellipseWF_factory import plot_WF, WFupdate, WFupdate_sino
import matplotlib.pyplot as plt
import numpy.random as rnd
import numpy as np
import odl
import matplotlib.pyplot as plt
import os
import adler
adler.util.gpu.setup_one_gpu(0)
from adler.odl.phantom import random_phantom
from adler.tensorflow import prelu, cosine_decay
import tensorflow as tf
import numpy as np
import odl
import odl.contrib.tensorflow
np.random.seed(0)
#name = os.path.splitext(os.path.basename(__file__))[0]
name = os.path.splitext(os.getcwd())[0]+'/checkpoints'
sess = tf.InteractiveSession()
###Output
_____no_output_____
###Markdown
Radon transform
###Code
size = 256
lowd = 40
from ray_transform.canon_relation import (point_img2sino, class_img2sino, CanRel_img2sino,
point_sino2img, CanRel_sino2img)
# Create ODL data structures
space = odl.uniform_discr([-int(size/2), -int(size/2)], [int(size/2), int(size/2)], [size, size],
dtype='float32')
# Full dose radon transform
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=180)
operator = odl.tomo.RayTransform(space, geometry)
opnorm = odl.power_method_opnorm(operator)
operator = (1 / opnorm) * operator
# Low dose radon transform
geometry_lowd = odl.tomo.parallel_beam_geometry(space, num_angles=lowd)
operator_lowd = odl.tomo.RayTransform(space, geometry_lowd)
opnorm_lowd = odl.power_method_opnorm(operator_lowd)
operator_lowd = (1 / opnorm_lowd) * operator_lowd
###Output
_____no_output_____
###Markdown
Data generator for CT recon
###Code
size = 256
nClasses = 180
lowd = 40
nRegions = np.random.randint(5,10)
npoints_max = np.random.randint(8,15)
batch_size = 5
n_iter = 10
n_primal = 5
n_dual = 5
def generate_data_CT(validation=False):
"""Generate a set of random data."""
if validation:
n_generate = 1
else:
n_generate = batch_size
y_arr = np.empty((n_generate, operator_lowd.range.shape[0], operator_lowd.range.shape[1], 1), dtype='float32')
x_true_arr = np.empty((n_generate, space.shape[0], space.shape[1], 1), dtype='float32')
for i in range(n_generate):
if validation:
phantom = odl.phantom.shepp_logan(space, True)
else:
phantom, _, _, _ = random_realphantom(size, nRegions, npoints_max, nClasses)
data = operator_lowd(phantom)
noisy_data = data + odl.phantom.white_noise(operator_lowd.range) * np.mean(np.abs(data)) * 0.05
x_true_arr[i, ..., 0] = phantom
y_arr[i, ..., 0] = noisy_data
return y_arr, x_true_arr
y_arr, x_true_arr = generate_data_CT()
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(y_arr[0,:,:,0], cmap="gray")
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(x_true_arr[0,:,:,0], cmap="gray")
###Output
_____no_output_____
###Markdown
Evaluate primal dual
###Code
# Create tensorflow layer from odl operator
odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer(operator_lowd,
'RayTransform')
odl_op_layer_adjoint = odl.contrib.tensorflow.as_tensorflow_layer(operator_lowd.adjoint,
'RayTransformAdjoint')
with tf.name_scope('placeholders'):
x_true = tf.placeholder(tf.float32, shape=[None, size, size, 1], name="x_true")
y_rt = tf.placeholder(tf.float32, shape=[None, operator_lowd.range.shape[0], operator_lowd.range.shape[1], 1], name="y_rt")
is_training = tf.placeholder(tf.bool, shape=(), name='is_training')
def apply_conv(x, filters=32):
return tf.layers.conv2d(x, filters=filters, kernel_size=3, padding='SAME',
kernel_initializer=tf.contrib.layers.xavier_initializer(), use_bias=False)
relu = tf.nn.relu
primal_values = []
dual_values = []
with tf.name_scope('tomography'):
with tf.name_scope('initial_values'):
primal = tf.concat([tf.zeros_like(x_true)] * n_primal, axis=-1)
dual = tf.concat([tf.zeros_like(y_rt)] * n_dual, axis=-1)
for i in range(n_iter):
with tf.variable_scope('dual_iterate_{}'.format(i)):
evalop = odl_op_layer(primal[..., 1:2])
update = tf.concat([dual, evalop, y_rt], axis=-1)
#update = prelu(apply_conv(update), name='prelu_1')
#update = prelu(apply_conv(update), name='prelu_2')
update = relu(apply_conv(update), name='relu_1')
update = relu(apply_conv(update), name='relu_2')
update = apply_conv(update, filters=n_dual)
dual = dual + update
with tf.variable_scope('primal_iterate_{}'.format(i)):
evalop = odl_op_layer_adjoint(dual[..., 0:1])
update = tf.concat([primal, evalop], axis=-1)
update = relu(apply_conv(update), name='relu_1')
update = relu(apply_conv(update), name='relu_2')
update = apply_conv(update, filters=n_primal)
primal = primal + update
primal_values.append(primal)
dual_values.append(dual)
x_result = primal[..., 0:1]
# Initialize all TF variables
sess.run(tf.global_variables_initializer())
ckp_name = name+'_lpd_nobias/checkpoints'
adler.tensorflow.util.default_checkpoint_path(ckp_name)
# Add op to save and restore
saver = tf.train.Saver()
if 1:
saver.restore(sess,
adler.tensorflow.util.default_checkpoint_path(ckp_name))
# Generate validation data
y_arr_validate, x_true_arr_validate = generate_data_CT(validation=True)
primal_values_result, dual_values_result = sess.run([primal_values, dual_values],
feed_dict={x_true: x_true_arr_validate,
y_rt: y_arr_validate,
is_training: False})
import matplotlib.pyplot as plt
from skimage.measure import compare_ssim as ssim
from skimage.measure import compare_psnr as psnr
print(ssim(primal_values_result[-1][0, ..., 0], x_true_arr_validate[0, ..., 0]))
print(psnr(primal_values_result[-1][0, ..., 0], x_true_arr_validate[0, ..., 0], data_range=1))
###Output
0.986987719406
34.1344250283
###Markdown
Taking a look on the different layers
###Code
plt.figure(figsize=(6,6))
plt.imshow(dual_values_result[3][0, ..., 0])
plt.axis('off')
plt.figure(figsize=(6,6))
plt.imshow(primal_values_result[9][0, ..., 0])
plt.axis('off')
plt.figure(figsize=(6,6))
plt.imshow(x_true_arr_validate[0, ..., 0])
plt.axis('off')
###Output
_____no_output_____
###Markdown
In this case we have 10 LPD conv_ResNet subnetworks representing each Dual/Primal step List of variables
###Code
tf.global_variables()
# Get access to the graph
gr = tf.get_default_graph()
###Output
_____no_output_____
###Markdown
First kernels in the dual step
###Code
kernels = gr.get_tensor_by_name('dual_iterate_0/conv2d/kernel:0').eval()
# The first convolutional layer is a concatenation of 7 convolutional layers, for the input values
# each one with 32 kernels
kernels.shape
# The first kernel
n_dual_value = 0
channel = 0
kernel= kernels[:,:,n_dual_value,channel]
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(kernel)
###Output
_____no_output_____
###Markdown
Approximation of the kernel with finite differences Finite differences matrices
###Code
D_11 = np.array([[0,0,0],[0, 1, 0], [0 , 0 , 0]])
D_12 = np.array([[0,1,0],[0, 0, 0], [0 , -1 , 0]])
D_21 = np.array([[0,0,0],[1, 0, -1], [0 , 0 , 0]])
D_22 = np.array([[1,0,-1],[0, 0, 0], [-1 , 0 , 1]])
D_13 = np.array([[0,-1,0],[0, 2, 0], [0 , -1 , 0]])
D_31 = np.array([[0,0,0],[1, -2, 1], [0 , 0 , 0]])
D_23 = np.array([[1,-2,1],[0, 0, 0], [-1 , 2 , -1]])
D_32 = np.array([[1,0,-1],[-2, 0, 2], [1 , 0 , -1]])
D_33 = np.array([[-1,2,-1],[2, -4, 2], [-1 , 2 , -1]])
D = [D_11, D_12, D_13, D_21, D_22, D_23, D_31, D_32, D_33]
###Output
_____no_output_____
###Markdown
Change of variable
###Code
# Let us define the matrix for the change of coordinates
A = np.array([
[0, 0, 0, 0, 1, 1, 0, 1, -1],
[0, 1, -1, 0, 0, -2, 0, 0, 2],
[0, 0, 0, 0, -1, 1, 0, -1, -1],
[0, 0, 0, 1, 0, 0, 1, -2, 2],
[1, 0, 2, 0, 0, 0, -2, 0, -4],
[0, 0, 0, -1, 0, 0, 1, 2, 2],
[0, 0, 0, 0, -1, -1, 0, 1, -1],
[0, -1, -1, 0, 0, 2, 0, 0, 2],
[0, 0, 0, 0, 1, -1, 0, -1, -1]
])
# Inverse
Ainv = np.linalg.inv(A)
###Output
_____no_output_____
###Markdown
Coefficients
###Code
# Let us compute the flatten version of beta
Bflat = Ainv.dot(kernel.flatten())
B = Bflat.reshape(3,3);
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(B)
###Output
_____no_output_____
###Markdown
Reconstruction
###Code
kernel_recon = sum([Bflat[i]*D[i] for i in range(len(D))])
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(kernel_recon)
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(kernel)
###Output
_____no_output_____
###Markdown
See if it gets the same values in the convolution
###Code
y_arr, x_true_arr = generate_data_CT()
kernel_tf = gr.get_tensor_by_name('dual_iterate_0/conv2d/kernel:0')[:,:,n_dual_value,channel:channel+1,np.newaxis]
inp = tf.constant(x_true_arr[0:1], dtype = tf.float32)
# Compute the result in tensorflow
tf_conv_out = tf.nn.conv2d(inp, kernel_tf,strides=[1, 1, 1, 1],padding='SAME').eval()[0,:,:,0]
from scipy.signal import convolve2d
scipy_conv_out = convolve2d(x_true_arr[0,:,:,0], kernel)
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(scipy_conv_out, cmap="gray")
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(tf_conv_out, cmap="gray")
###Output
_____no_output_____
###Markdown
Evaluate the ellipticity
###Code
plt.plot(Bflat)
def pT(B, Xi):
p = (B[0,0]+B[0,1]*Xi[1]+B[1,0]*Xi[0]+B[1,1]*Xi[0]*Xi[1]+
B[0,2]*Xi[1]**2+B[2,0]*Xi[1]**2+B[1,2]*Xi[0]*Xi[1]**2+
B[2,1]*Xi[1]*Xi[0]**2+B[2,2]*(Xi[1]**2)*Xi[0]**2)
return p
ellipt = np.array([pT(B,np.array([xi,yi])) for xi in range(size) for yi in range(size)]).reshape(size,size);
# The kernel is elliptic
np.min(np.abs(ellipt))
# Notice that the symbol acting on the fourier domain is killing the high frequencies
# meaning is smoothing out tha array
plt.imshow(ellipt)
###Output
_____no_output_____
###Markdown
We can create a function that evaluates each convolutional layer and finds its ellipticity
###Code
names_variables = [variable.name for variable in tf.global_variables()]
# Convolutional variables
conv_variable = list(np.array(names_variables)[(list(map(lambda string: "conv" in string,names_variables)))])
name_layer = conv_variable[0]
# Getting kernels
kernels = gr.get_tensor_by_name(name_layer).eval()
# Let us define the matrix for the change of coordinates
A = np.array([
[0, 0, 0, 0, 1, 1, 0, 1, -1],
[0, 1, -1, 0, 0, -2, 0, 0, 2],
[0, 0, 0, 0, -1, 1, 0, -1, -1],
[0, 0, 0, 1, 0, 0, 1, -2, 2],
[1, 0, 2, 0, 0, 0, -2, 0, -4],
[0, 0, 0, -1, 0, 0, 1, 2, 2],
[0, 0, 0, 0, -1, -1, 0, 1, -1],
[0, -1, -1, 0, 0, 2, 0, 0, 2],
[0, 0, 0, 0, 1, -1, 0, -1, -1]
])
def pT(B, Xi):
p = (B[0,0]+B[0,1]*Xi[1]+B[1,0]*Xi[0]+B[1,1]*Xi[0]*Xi[1]+
B[0,2]*Xi[1]**2+B[2,0]*Xi[1]**2+B[1,2]*Xi[0]*Xi[1]**2+
B[2,1]*Xi[1]*Xi[0]**2+B[2,2]*(Xi[1]**2)*Xi[0]**2)
return p
Ainv_tf = tf.constant(Ainv, dtype = tf.float32)
###Output
_____no_output_____
###Markdown
Function that computes the ellipticity of the layer as the mean of the minimum values of the symbol in the corresponding (dual/primal) grid. We should implement this directly on tensorflow
###Code
def ellipt_layer_numpy(name_layer, Ainv):
if "dual" in name_layer:
shape = operator_lowd.range.shape
else:
shape = operator_lowd.domain.shape
kernels = gr.get_tensor_by_name(name_layer).eval()
ellipts = []
for n_value in range(kernels.shape[2]):
for channel in range(kernels.shape[3]):
kernel= kernels[:,:,n_value,channel]
Bflat = Ainv.dot(kernel.flatten())
B = Bflat.reshape(3,3);
ellipts.append(np.min(np.abs(np.array([pT(B,np.array([xi,yi]))
for xi in range(shape[0]) for yi in
range(shape[1])]))));
return np.mean(ellipts)
def ellipt_layer_tf(name_layer, Ainv_tf):
if "dual" in name_layer:
shape = operator_lowd.range.shape
else:
shape = operator_lowd.domain.shape
kernels = tf.transpose(gr.get_tensor_by_name(name_layer),[2,0,1,3])
kernels_shape = kernels.shape
kernels = tf.reshape(kernels, [kernels_shape[0],kernels_shape[1]*kernels_shape[2], kernels_shape[3]])
Bflats = tf.tensordot(Ainv_tf,kernels, axes = [0,1])
Bs = tf.reshape(Bflats,[3,3,kernels_shape[0], kernels_shape[3]]).eval()
ellipts = []
ellipts = []
for n_value in range(Bs.shape[2]):
for channel in range(Bs.shape[3]):
B = Bs[:,:, n_value, channel]
ellipts.append(np.min(np.abs(np.array([pT(B,np.array([xi,yi]))
for xi in range(shape[0]) for yi in
range(shape[1])]))));
return np.mean(ellipts)
import time
t = time.time()
ellipt_layer_numpy(name_layer, Ainv)
ellapsed_numpy = time.time()-t
t = time.time()
ellipt_layer_tf(name_layer, Ainv_tf)
ellapsed_tf = time.time()-t
ellapsed_numpy, ellapsed_tf
###Output
_____no_output_____ |
SciPy2016/MTwork/ForwardModeling_noExtension_GKR/Setup MT forward modelling.ipynb | ###Markdown
Build the mesh Design the tensorshSize,vSize = 25., 10nrCcore = [15, 8, 6, 5, 4, 2, 2, 2, 2]hPad = simpeg.Utils.meshTensor([(hSize,10,1.5)])hx = np.concatenate((hPad[::-1],np.ones(((be-bw)/hSize,))*hSize,hPad))hy = np.concatenate((hPad[::-1],np.ones(((bn-bs)/hSize,))*hSize,hPad))airPad = simpeg.Utils.meshTensor([(vSize,13,1.5)])vCore = np.concatenate([ np.ones(i)*s for i, s in zip(nrCcore,(simpeg.Utils.meshTensor([(vSize,1),(vSize,8,1.3)])))])[::-1]botPad = simpeg.Utils.meshTensor([(vCore[0],8,-1.5)])hz = np.concatenate((botPad,vCore,airPad)) Calculate the x0 pointx0 = np.array([bw-np.sum(hPad),bs-np.sum(hPad),bt-np.sum(vCore)-np.sum(botPad)]) Make the meshmeshFor = simpeg.Mesh.TensorMesh([hx,hy,hz],x0)
###Code
# Build the Inversion mesh
# Design the tensors
hSizeI,vSizeI = 25., 10.
nrCcoreI = [12, 6, 4, 4, 3, 3, 3, 2, 1]
hPadI = simpeg.Utils.meshTensor([(hSizeI,10,1.75)])
hxI = np.concatenate((hPadI[::-1],np.ones(((be-bw)/hSizeI,))*hSizeI,hPadI))
hyI = np.concatenate((hPadI[::-1],np.ones(((bn-bs)/hSizeI,))*hSizeI,hPadI))
airPadI = simpeg.Utils.meshTensor([(vSizeI,12,1.75)])
vCoreI = np.concatenate([ np.ones(i)*s for i, s in zip(nrCcoreI,(simpeg.Utils.meshTensor([(vSizeI,1),(vSizeI,8,1.3)])))])[::-1]
botPadI = simpeg.Utils.meshTensor([(vCoreI[0],8,-1.75)])
hzI = np.concatenate((botPadI,vCoreI,airPadI))
# Calculate the x0 point
x0I = np.array([bw-np.sum(hPadI),bs-np.sum(hPadI),bt-np.sum(vCoreI)-np.sum(botPadI)])
# Make the mesh
meshInv = simpeg.Mesh.TensorMesh([hxI,hyI,hzI],x0I)
meshFor = copy.deepcopy(meshInv)
NSEM.Utils.skindepth(1e2,10)
print np.sum(vCoreI)
print np.sum(hPadI)
print np.sum(airPadI), np.sum(botPadI)
print meshFor.nC
print meshFor
# Save the mesh
meshFor.writeVTK('nsmesh_GKRcoarse.vtr',{'id':np.arange(meshFor.nC)})
nsvtr = telluricpy.vtkTools.io.readVTRFile('nsmesh_GKRcoarse.vtr')
nsvtr
topoSurf = telluricpy.vtkTools.polydata.normFilter(telluricpy.vtkTools.io.readVTPFile('../../Geological_model/CDED_Lake_Coarse.vtp'))
activeMod = telluricpy.vtkTools.extraction.extractDataSetWithPolygon(nsvtr,topoSurf)
topoSurf
#telluricpy.vtkTools.io.writeVTUFile('activeModel.vtu',activeMod)
# Get active indieces
activeInd = telluricpy.vtkTools.dataset.getDataArray(activeMod,'id')
# Make the conductivity dictionary
# Note: using the background value for the till, since the extraction gets the ind's below the till surface
geoStructFileDict = {'Till':1e-4,
'PK1':5e-2,
'HK1':1e-3,
'VK':5e-3}
# Loop through
extP = '../../Geological_model/'
geoStructIndDict = {}
for key, val in geoStructFileDict.iteritems():
geoPoly = telluricpy.vtkTools.polydata.normFilter(telluricpy.vtkTools.io.readVTPFile(extP+key+'.vtp'))
modStruct = telluricpy.vtkTools.extraction.extractDataSetWithPolygon(activeMod,geoPoly,extBoundaryCells=True,extInside=True,extractBounds=True)
geoStructIndDict[key] = telluricpy.vtkTools.dataset.getDataArray(modStruct,'id')
# Make the physical prop
sigma = np.ones(meshFor.nC)*1e-8
sigma[activeInd] = 1e-3 # 1e-4 is the background and 1e-3 is the till value
# Add the structure
for key in ['Till','PK1','HK1','VK']:
sigma[geoStructIndDict[key]] = geoStructFileDict[key]
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111)
model = sigma.reshape(meshFor.vnC,order='F')
a = ax.pcolormesh(meshFor.gridCC[:,0].reshape(meshFor.vnC,order='F')[:,20,:],meshFor.gridCC[:,2].reshape(meshFor.vnC,order='F')[:,20,:],np.log10(model[:,20,:]),edgecolor='k')
ax.set_xlim([bw,be])
ax.set_ylim([-0,bt])
ax.grid(which="major")
plt.colorbar(a)
ax.set_aspect("equal")
# Save the model
meshFor.writeVTK('nsmesh_GKRCoarseHKPK1.vtr',{'S/m':sigma})
import numpy as np
# Set up the forward modeling
freq = np.logspace(5,0,31)
np.save('MTfrequencies',freq)
freq
# Find the locations on the surface of the model.
# Get the outer shell of the model
import vtk
actModVTP = telluricpy.vtkTools.polydata.normFilter(telluricpy.vtkTools.extraction.geometryFilt(activeMod))
polyBox = vtk.vtkCubeSource()
polyBox.SetBounds(bw-5.,be+5,bs-5.,bn+5.,bb-5.,bt+5)
polyBox.Update()
# Exract the topo of the model
modTopoVTP = telluricpy.vtkTools.extraction.extractDataSetWithPolygon(actModVTP,telluricpy.vtkTools.polydata.normFilter(polyBox.GetOutput()),extractBounds=False)
telluricpy.vtkTools.io.writeVTPFile('topoSurf.vtp',actModVTP)
# Make the rxLocations file
x,y = np.meshgrid(np.arange(bw+12.5,be,25),np.arange(bs+12.5,bn,25))
xy = np.hstack((x.reshape(-1,1),y.reshape(-1,1)))
# Find the location array
locArr = telluricpy.modelTools.surfaceIntersect.findZofXYOnPolydata(xy,actModVTP) #modTopoVTP)
np.save('MTlocations',locArr)
telluricpy.vtkTools.io.writeVTPFile('MTloc.vtp',telluricpy.dataFiles.XYZtools.makeCylinderPtsVTP(locArr,5,10,10))
# Running the forward modelling on the Cluster.
# Define the forward run in findDiam_MTforward.py
%matplotlib qt
#sys.path.append('/home/gudni/Dropbox/code/python/MTview/')
import interactivePlotFunctions as iPf
# Load the data
mtData = np.load('MTdataStArr_nsmesh_HKPK1Coarse_noExtension.npy')
mtData
iPf.MTinteractiveMap([mtData])
# Looking at the data shows that data below 100Hz is affected by the boundary conditions,
# which makes sense for very conductive conditions as we have.
# Invert data in the 1e5-1e2 range.
###Output
_____no_output_____
###Markdown
Run the inversion on the cluster using the inv3d/run1/findDiam_inversion.py
###Code
drecAll = np.load('MTdataStArr_nsmesh_0.npy')
np.unique(drecAll['freq'])[10::]
# Build the Inversion mesh
# Design the tensors
hSizeI,vSizeI = 25., 10.
nrCcoreI = [12, 6, 6, 6, 5, 4, 3, 2, 1]
hPadI = simpeg.Utils.meshTensor([(hSizeI,9,1.5)])
hxI = np.concatenate((hPadI[::-1],np.ones(((be-bw)/hSizeI,))*hSizeI,hPadI))
hyI = np.concatenate((hPadI[::-1],np.ones(((bn-bs)/hSizeI,))*hSizeI,hPadI))
airPadI = simpeg.Utils.meshTensor([(vSizeI,12,1.5)])
vCoreI = np.concatenate([ np.ones(i)*s for i, s in zip(nrCcoreI,(simpeg.Utils.meshTensor([(vSizeI,1),(vSizeI,8,1.3)])))])[::-1]
botPadI = simpeg.Utils.meshTensor([(vCoreI[0],7,-1.5)])
hzI = np.concatenate((botPadI,vCoreI,airPadI))
# Calculate the x0 point
x0I = np.array([bw-np.sum(hPadI),bs-np.sum(hPadI),bt-np.sum(vCoreI)-np.sum(botPadI)])
# Make the mesh
meshInv = simpeg.Mesh.TensorMesh([hxI,hyI,hzI],x0I)
meshInv.writeVTK('nsmesh_HPVK1_inv.vtr',{'id':np.arange(meshInv.nC)})
nsInvvtr = telluricpy.vtkTools.io.readVTRFile('nsmesh_HPVK1_inv.vtr')
activeModInv = telluricpy.vtkTools.extraction.extractDataSetWithPolygon(nsInvvtr,topoSurf,extBoundaryCells=True)
sigma = np.ones(meshInv.nC)*1e-8
indAct = telluricpy.vtkTools.dataset.getDataArray(activeModInv,'id')
sigma[indAct] = 1e-4
meshInv.writeVTK('nsmesh_HPVK1_inv.vtr',{'id':np.arange(meshInv.nC),'S/m':sigma})
from pymatsolver import MumpsSolver
pymatsolver.AvailableSolvers
NSEM.Utils.skindepth(1000,100000)
np.unique(mtData['freq'])[5::]
###Output
_____no_output_____ |
Attention_Based_Machine_Translation.ipynb | ###Markdown
This tutorial is inspired from Coursera Deep Learning Specialization We will use faker for generating Fake Dates
###Code
!pip -q install faker
from faker import Faker
import numpy as np
import random
from babel.dates import format_date
faker = Faker()
faker.seed(5)
np.random.seed(5)
#these are the date formats we are going to generate
FORMATS = ['short','medium','medium','medium','long','long','long','long','long','full','full','full','d MMM YYY','d MMMM YYY',
'd MMMM YYY','d MMMM YYY','d MMMM YYY','d MMMM YYY','dd/MM/YYY','EE d, MMM YYY','EEEE d, MMMM YYY','MMM d, YYY',
'MMMM d, YYY','YYY, d MMM','YYY, d MMMM','EE YYY, d MMMM','EEEE YYY, d MM',]
for format in FORMATS:
print('%s => %s' %(format, format_date(faker.date_object(), format=format, locale='en')))
def random_date():
dt = faker.date_between(start_date = '-500y',end_date='+50y')
try:
date = format_date(dt, format=random.choice(FORMATS), locale='en')
human_readable = date.lower().replace(',', '')
machine_readable = dt.isoformat()
except AttributeError as e:
return None, None, None
return human_readable, machine_readable
random_date()
human_vocab = set()
machine_vocab = set()
dataset = []
m = 50000
for i in range(m):
hd,md = random_date()
dataset.append((hd,md))
human_vocab.update( tuple(hd) )
machine_vocab.update( tuple(md) )
human_vocab.update(('<pad>','<unk>'))
human_vocab = dict(enumerate(human_vocab))
human_vocab = { v:i for i,v in human_vocab.items() }
machine_vocab.add('<unk>')
machine_vocab = dict(enumerate(machine_vocab))
inv_machine_vocab = { v:i for i,v in machine_vocab.items()}
print(len(dataset),len(human_vocab),len(machine_vocab))
dataset[:10]
#test set
t = 10000
testset= []
for i in range(t):
hd,md = random_date()
testset.append((hd,md))
HUMAN_VOCAB = len(human_vocab)
MACHINE_VOCAB = len(machine_vocab)
Tx = 30
Ty = 10
print( HUMAN_VOCAB, MACHINE_VOCAB )
###Output
36 12
###Markdown
1. Converting Human readable dates to character vectors 2. Converting Machine Dates to character vectors
###Code
def string_to_ohe( string, T, vocab ):
string = string.lower()
arr = []
while len(arr) < len(string):
arr.append( vocab.get( string[len(arr)], vocab['<unk>']) )
while len(arr) < T:
arr.append( vocab['<pad>'] )
onehot = np.zeros( (T,len(vocab)) )
for i in range(T):
onehot[ i, arr[i] ] = 1
return onehot, arr
def output_to_date( out, vocab ):
arr = np.argmax(out,axis=-1)
string = ''
for i in arr:
string += vocab[ i ]
return string
X = []
Y = []
for x,y in dataset:
X.append( string_to_ohe(x, Tx, human_vocab)[0] )
Y.append( string_to_ohe(y, Ty, inv_machine_vocab)[0] )
X,Y = np.array(X), np.array(Y)
X.shape, Y.shape
Xt, Yt = [],[]
for x,y in testset:
Xt.append( string_to_ohe(x, Tx, human_vocab)[0] )
Yt.append( string_to_ohe(y, Ty, inv_machine_vocab)[0] )
Xt,Yt = np.array(Xt), np.array(Yt)
Xt.shape, Yt.shape
###Output
_____no_output_____
###Markdown
Defining Attention Model | Overall | Attention Mechanism ||-------------|------------------------||  |  | * The post-attention LSTM passes $s^{\langle t \rangle}, c^{\langle t \rangle}$ from one time step to the next.* in this model the post-attention LSTM at time $t$ does will not take the specific generated $y^{\langle t-1 \rangle}$ as input; it only takes $s^{\langle t\rangle}$ and $c^{\langle t\rangle}$ as input. * We use $a^{\langle t \rangle} = [\overrightarrow{a}^{\langle t \rangle}; \overleftarrow{a}^{\langle t \rangle}]$ to represent the concatenation of the activations of both the forward-direction and backward-directions of the pre-attention Bi-LSTM.* The diagram on the right uses a RepeatVector node to copy $s^{\langle t-1 \rangle}$'s value $T_x$ times, and then Concatenation to concatenate $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ to compute $e^{\langle t, t'}$, which is then passed through a softmax to compute $\alpha^{\langle t, t' \rangle}$. We'll explain how to use RepeatVector and Concatenation in Keras below.
###Code
from keras.layers import RepeatVector, Concatenate, Dense, Dot, Activation
#combines activations generated from BiLSTM with previous state of Post LSTM cell to get attention to be given to each timestep
#heart of attention model
def one_step_attention( a, s_prev ):
x = RepeatVector(Tx)(s_prev) #repeat s_prev Tx times
x = Concatenate(axis=-1)( [ a, x ] ) #concat each copy of s_prev with each timestep hidden state
e = Dense(10, activation='tanh')(x) #pass each concatenated vector through Dense Layer to get intermediate energies
energy = Dense(1, activation='relu')(e) #get timestep's energy
alphas = Activation('softmax')(energy) #convert energy to probabilities i.e. attention weights
context = Dot(axes=1)([alphas,a]) #multiply attention weights and timestep hidden state to get context vector
return context
from keras.layers import Input, Bidirectional, LSTM
from keras.models import Model
n_a = 32 #pre attention LSTM state, since Bi directional attention=64
n_s = 64 #post attention LSTM state
inp = Input( (Tx, HUMAN_VOCAB ) )
s0 = Input( (n_s,) )
c0 = Input( (n_s,) )
outputs = []
s=s0
c=c0
a = Bidirectional( LSTM( n_a, return_sequences=True ) )(inp) #generate hidden state for every timestep
"https://machinelearningmastery.com/return-sequences-and-return-states-for-lstms-in-keras/"
postLSTM = LSTM( n_s, return_state = True)
output = Dense( MACHINE_VOCAB, activation='softmax') #our final output layer
for _ in range(Ty): #iterate for every output step
context = one_step_attention(a, s) #get context
s,_,c = postLSTM(context, initial_state=[s,c]) #generate cell_state_seq(currently 1), cell_state, memory
out = output(s)
outputs.append(out)
model = Model( [inp,s0,c0], outputs )
from keras.optimizers import Adam
model.compile( optimizer=Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01), loss='categorical_crossentropy', metrics=['accuracy'] )
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
Y = list(Y.swapaxes(0,1))
Yt = list(Yt.swapaxes(0,1))
history = model.fit( [X,s0,c0], Y, epochs=100,
validation_data=([Xt,np.zeros((t, n_s)),np.zeros((t, n_s))],Yt),
batch_size=128, verbose=1)
model.save_weights('attention_weights.h5')
%matplotlib inline
import matplotlib.pyplot as plt
epoch_list = history.epoch
plt.plot(epoch_list, history.history['loss'], label='Train Loss')
plt.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
plt.ylabel('Loss');plt.xlabel('Epoch');plt.title('Loss')
plt.legend(loc="best");plt.grid(color='gray', linestyle='-', linewidth=0.5)
model.load_weights('attention_weights.h5')
def getTranslation(date,model):
date = date.lower().replace(',','')
source = np.array(string_to_ohe(date, Tx, human_vocab)[0])
source = np.expand_dims(source,axis=0)
prediction = np.array(model.predict([source, s0, c0]))
prediction = np.squeeze(prediction.swapaxes(0,1))
return output_to_date(prediction,machine_vocab)
EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001',
'1 March 2001','jun 10 2017','11/07/2002']
for example in EXAMPLES:
print(f"{example} -> {getTranslation(example,model)}")
done = False
while not done:
dt = input("Enter Date : ")
print(f"Translation : {getTranslation(dt,model)} Continue('y/n') :",end="")
done = input() == 'n'
###Output
_____no_output_____ |
AAAI/Interpretability/dataset1/second_layer_averaging_with_entropy_001.ipynb | ###Markdown
Generate dataset
###Code
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
color = ['#1F77B4','orange', 'g','brown']
name = [1,2,3,0]
for i in range(10):
if i==3:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3],label="D_"+str(name[i]))
elif i>=4:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3])
else:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[i],label="D_"+str(name[i]))
plt.legend()
x[idx[0]][0], x[idx[5]][5]
desired_num = 6000
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,9)
a = []
for i in range(9):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(a)
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
len(mosaic_list_of_images), mosaic_list_of_images[0]
###Output
_____no_output_____
###Markdown
load mosaic data
###Code
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label,fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]
batch = 250
msd1 = MosaicDataset(mosaic_list_of_images[0:3000], mosaic_label[0:3000] , fore_idx[0:3000])
train_loader = DataLoader( msd1 ,batch_size= batch ,shuffle=True)
batch = 250
msd2 = MosaicDataset(mosaic_list_of_images[3000:6000], mosaic_label[3000:6000] , fore_idx[3000:6000])
test_loader = DataLoader( msd2 ,batch_size= batch ,shuffle=True)
###Output
_____no_output_____
###Markdown
models
###Code
class Focus_deep(nn.Module):
'''
deep focus network averaged at zeroth layer
input : elemental data
'''
def __init__(self,inputs,output,K,d):
super(Focus_deep,self).__init__()
self.inputs = inputs
self.output = output
self.K = K
self.d = d
self.linear1 = nn.Linear(self.inputs,50, bias=False) #,self.output)
self.linear2 = nn.Linear(50,50 , bias=False)
self.linear3 = nn.Linear(50,self.output, bias=False)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.xavier_normal_(self.linear3.weight)
def forward(self,z):
batch = z.shape[0]
x = torch.zeros([batch,self.K],dtype=torch.float64)
y = torch.zeros([batch,50], dtype=torch.float64) # number of features of output
features = torch.zeros([batch,self.K,50],dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
features = features.to("cuda")
for i in range(self.K):
alp,ftrs = self.helper(z[:,i] ) # self.d*i:self.d*i+self.d
x[:,i] = alp[:,0]
features[:,i] = ftrs
log_x = F.log_softmax(x,dim=1)
x = F.softmax(x,dim=1) # alphas
for i in range(self.K):
x1 = x[:,i]
y = y+torch.mul(x1[:,None],features[:,i]) # self.d*i:self.d*i+self.d
return y , x ,log_x
def helper(self,x):
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
x1 = F.tanh(x)
x = F.relu(x)
x = self.linear3(x)
#print(x1.shape)
return x,x1
class Classification_deep(nn.Module):
'''
input : elemental data
deep classification module data averaged at zeroth layer
'''
def __init__(self,inputs,output):
super(Classification_deep,self).__init__()
self.inputs = inputs
self.output = output
self.linear1 = nn.Linear(self.inputs,50)
#self.linear2 = nn.Linear(6,12)
self.linear2 = nn.Linear(50,self.output)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.zeros_(self.linear2.bias)
def forward(self,x):
x = F.relu(self.linear1(x))
#x = F.relu(self.linear2(x))
x = self.linear2(x)
return x
# torch.manual_seed(12)
# focus_net = Focus_deep(2,1,9,2).double()
# focus_net = focus_net.to("cuda")
# focus_net.linear2.weight.shape,focus_net.linear3.weight.shape
# focus_net.linear2.weight.data[25:,:] = focus_net.linear2.weight.data[:25,:] #torch.nn.Parameter(torch.tensor([last_layer]) )
# (focus_net.linear2.weight[:25,:]== focus_net.linear2.weight[25:,:] )
# focus_net.linear3.weight.data[:,25:] = -focus_net.linear3.weight.data[:,:25] #torch.nn.Parameter(torch.tensor([last_layer]) )
# focus_net.linear3.weight
# focus_net.helper( torch.randn((5,2,2)).double().to("cuda") )
criterion = nn.CrossEntropyLoss()
def my_cross_entropy(x, y,alpha,log_alpha,k):
# log_prob = -1.0 * F.log_softmax(x, 1)
# loss = log_prob.gather(1, y.unsqueeze(1))
# loss = loss.mean()
loss = criterion(x,y)
#alpha = torch.clamp(alpha,min=1e-10)
b = -1.0* alpha * log_alpha
b = torch.mean(torch.sum(b,dim=1))
closs = loss
entropy = b
loss = (1-k)*loss + ((k)*b)
return loss,closs,entropy
def calculate_attn_loss(dataloader,what,where,criter,k):
what.eval()
where.eval()
r_loss = 0
cc_loss = 0
cc_entropy = 0
alphas = []
lbls = []
pred = []
fidices = []
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha,log_alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
#ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch
# mx,_ = torch.max(alpha,1)
# entropy = np.mean(-np.log2(mx.cpu().detach().numpy()))
# print("entropy of batch", entropy)
#loss = (1-k)*criter(outputs, labels) + k*ent
loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
r_loss += loss.item()
cc_loss += closs.item()
cc_entropy += entropy.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,cc_loss/i,cc_entropy/i,analysis
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
###Output
_____no_output_____
###Markdown
training
###Code
number_runs = 10
full_analysis =[]
FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"])
k = 0.001
for n in range(number_runs):
print("--"*40)
# instantiate focus and classification Model
torch.manual_seed(n)
where = Focus_deep(2,1,9,2).double()
where.linear2.weight.data[25:,:] = where.linear2.weight.data[:25,:]
where.linear3.weight.data[:,25:] = -where.linear3.weight.data[:,:25]
where = where.double().to("cuda")
ex,_ = where.helper( torch.randn((5,2,2)).double().to("cuda"))
print(ex)
torch.manual_seed(n)
what = Classification_deep(50,3).double()
where = where.to("cuda")
what = what.to("cuda")
# instantiate optimizer
optimizer_where = optim.Adam(where.parameters(),lr =0.001)
optimizer_what = optim.Adam(what.parameters(), lr=0.001)
#criterion = nn.CrossEntropyLoss()
acti = []
analysis_data = []
loss_curi = []
epochs = 2000
# calculate zeroth epoch loss and FTPT values
running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k)
loss_curi.append(running_loss)
analysis_data.append(anlys_data)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what.train()
where.train()
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha,log_alpha = where(inputs)
outputs = what(avg)
my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k)
# print statistics
running_loss += my_loss.item()
my_loss.backward()
optimizer_where.step()
optimizer_what.step()
#break
running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k)
analysis_data.append(anls_data)
if(epoch % 200==0):
print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.01:
break
print('Finished Training run ' +str(n))
#break
analysis_data = np.array(analysis_data)
FTPT_analysis.loc[n] = analysis_data[-1,:4]/30
full_analysis.append((epoch, analysis_data))
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels,_ = data
images = images.double()
images, labels = images.to("cuda"), labels.to("cuda")
avg, alpha,log_alpha = where(images)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 3000 test images: %f %%' % ( 100 * correct / total))
print(np.mean(np.array(FTPT_analysis),axis=0)) #[7.42700000e+01 2.44100000e+01 7.33333333e-02 1.24666667e+00]
FTPT_analysis
cnt=1
for epoch, analysis_data in full_analysis:
analysis_data = np.array(analysis_data)
# print("="*20+"run ",cnt,"="*20)
plt.figure(figsize=(6,5))
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0]/30,label="FTPT")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1]/30,label="FFPT")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2]/30,label="FTPF")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3]/30,label="FFPF")
plt.title("Training trends for run "+str(cnt))
plt.grid()
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.legend()
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("percentage train data", fontsize=14, fontweight = 'bold')
plt.savefig(path + "run"+str(cnt)+".png",bbox_inches="tight")
plt.savefig(path + "run"+str(cnt)+".pdf",bbox_inches="tight")
cnt+=1
FTPT_analysis.to_csv(path+"synthetic_zeroth.csv",index=False)
###Output
_____no_output_____ |
src/tutorial.ipynb | ###Markdown
Handwritten Text Recognition using TensorFlow 2.0This tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.0 Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install TensorFlow 2.0 with GPU support.
###Code
!pip install -q tensorflow-gpu==2.1.0-rc2
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "iam_words"
arch = "simpleHTR"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
# input_size = (1024, 128, 1)
# max_text_length = 128
input_size = (128, 32, 1)
max_text_length = 32
charset_base = string.printable[:80]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
source: ..\data\iam_words.hdf5
output ..\output\iam_words\simpleHTR
target ..\output\iam_words\simpleHTR\checkpoint_weights.hdf5
charset: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&'()*+,-./:;<
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
Train images: 28143
Validation images: 4088
Test images: 10586
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
# note: `learning_rate=None` will get architecture default value
model = HTRModel(architecture=arch, input_size=input_size, vocab_size=dtgen.tokenizer.vocab_size)
model.compile(learning_rate=0.001)
# save network summary
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) [(None, 128, 32, 1)] 0
_________________________________________________________________
conv2d (Conv2D) (None, 128, 32, 32) 832
_________________________________________________________________
batch_normalization (BatchNo (None, 128, 32, 32) 128
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 64, 16, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 64, 16, 64) 51264
_________________________________________________________________
batch_normalization_1 (Batch (None, 64, 16, 64) 256
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 32, 8, 64) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 32, 8, 128) 73856
_________________________________________________________________
batch_normalization_2 (Batch (None, 32, 8, 128) 512
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 32, 4, 128) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 32, 4, 128) 147584
_________________________________________________________________
batch_normalization_3 (Batch (None, 32, 4, 128) 512
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 32, 2, 128) 0
_________________________________________________________________
conv2d_4 (Conv2D) (None, 32, 2, 256) 295168
_________________________________________________________________
batch_normalization_4 (Batch (None, 32, 2, 256) 1024
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 32, 1, 256) 0
_________________________________________________________________
reshape (Reshape) (None, 32, 256) 0
_________________________________________________________________
bidirectional (Bidirectional (None, 32, 512) 1050624
_________________________________________________________________
bidirectional_1 (Bidirection (None, 32, 512) 1574912
_________________________________________________________________
dropout (Dropout) (None, 32, 512) 0
_________________________________________________________________
dense (Dense) (None, 32, 83) 42579
=================================================================
Total params: 3,239,251
Trainable params: 3,238,035
Non-trainable params: 1,216
_________________________________________________________________
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
Train for 1759 steps, validate for 256 steps
Epoch 1/1000
803/1759 [============>.................] - ETA: 50s - loss: 15.8346
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'],
norm_accentuation=False,
norm_punctuation=False)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'])
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://www.transcriptorium.eu/~tsdata/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](https://fki.tic.heia-fr.ch/databases/saint-gall-database)e. [Washington](https://fki.tic.heia-fr.ch/databases/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print("Found GPU at: {}".format(device_name))
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
source: ../data/bentham.hdf5
output ../output/bentham/flor
target ../output/bentham/flor/checkpoint_weights.hdf5
charset: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
Train images: 8807
Validation images: 1372
Test images: 820
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
ground_truth = [x.decode() for x in dtgen.dataset['test']['gt']]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, ground_truth):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(ground_truth[i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts, ground_truth)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.0This tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── generator.py │ ├── preproc.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py ├── transform │ ├── bentham.py │ ├── iam.py │ ├── rimes.py │ ├── saintgall.py │ └── washington.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --dataset= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── generator.py │ ├── preproc.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py ├── transform │ ├── bentham.py │ ├── iam.py │ ├── rimes.py │ ├── saintgall.py │ └── washington.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.0 Make sure the jupyter notebook is using GPU mode. Try to use **Tesla T4** instead of Tesla K80 (faster).
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install TensorFlow 2.0 with GPU support.
###Code
!pip install -q tensorflow-gpu==2.0.0-beta1
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive")
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
# define parameters
dataset = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
hdf5_src = os.path.join("..", "data", f"{dataset}.hdf5")
output_path = os.path.join("..", "output", dataset, arch)
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = "".join([chr(i) for i in range(32, 127)])
print("source:", hdf5_src)
print("output", output_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(hdf5_src=hdf5_src,
batch_size=batch_size,
max_text_length=max_text_length)
print(f"Train images: {dtgen.total_train}")
print(f"Validation images: {dtgen.total_valid}")
print(f"Test images: {dtgen.total_test}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER);* Save and load models;* Load weights in the models, if exists;* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *input_layer* and *output_layer* from your own network (default code has Bluche and Puigcerver implementations on **network/architecture.py**).The last parameter is the list of chars you want to work with (default is 96 chars from ASCII).
###Code
import time
from network.model import HTRModel
from network import architecture, callbacks
# get the input_layer, output_layer and optimizer from default network
network_func = getattr(architecture, arch)
ioo = network_func(input_size=input_size, output_size=len(charset_base) + 1)
# initiate and compile the HTRModel
model = HTRModel(inputs=ioo[0], outputs=ioo[1], charset=charset_base)
model.compile(optimizer=ioo[2])
# save network summary
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
checkpoint = "checkpoint_weights.hdf5"
cbs = callbacks.setup(logdir=output_path, hdf5=checkpoint)
model.load_checkpoint(target=os.path.join(output_path, checkpoint))
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit_generator* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = time.time()
h = model.fit_generator(generator=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.train_steps,
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.valid_steps,
callbacks=cbs,
shuffle=True,
verbose=1)
total_time = time.time() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
train_corpus = "\n".join([
f"Total train images: {dtgen.total_train}",
f"Total validation images: {dtgen.total_valid}",
f"Batch: {batch_size}\n",
f"Total time: {total_time:.8f} sec",
f"Average time per epoch: {(total_time / len(loss)):.8f} sec\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(train_corpus)
print(train_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict_generator* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = time.time()
predicts = model.predict_generator(generator=dtgen.next_test_batch(),
steps=dtgen.test_steps,
use_multiprocessing=True,
verbose=1)
predicts = pp.decode_ctc(predicts, charset_base)
total_time = time.time() - start_time
ground_truth = pp.decode_ctc(dtgen.dataset["test"]["gt"], charset_base)
pred_corpus = []
for pd, gt in zip(predicts, ground_truth):
pred_corpus.append(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset["test"]["dt"][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(pred_corpus[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to create your metrics instead. In the function, we have three parameters: * predicts and labels (predict_generator output: predicts[0] and predicts[1])* norm_accentuation: calculation with/without accentuation * norm_punctuation: calculation with/without punctuation marks
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=ground_truth,
norm_accentuation=False,
norm_punctuation=False)
eval_corpus = "\n".join([
f"Total test images: {dtgen.total_test}",
f"Total time: {total_time:.8f} sec",
f"Time per item: {(total_time / dtgen.total_test):.8f} sec\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(eval_corpus)
print(eval_corpus)
###Output
_____no_output_____
###Markdown
 RedisAI is a Redis module for serving tensors and executing deep learning models. While RedisAI takes care of making a runtime for your model by supporting tensor datatypes and manages downstream runtimes built on top of Tensorflow, PyTorch and ONNXRuntime, Redis itself helps you handle the scale, for which Redis is known for.Detailed blog: https://medium.com/tensorwerk/redisai-thors-stormbreaker-for-deep-learning-deployment-a3fe7569c559 But before we jumping in, let's take a step back and look into the current Deep Learning / Machine Learning world Deep Learning / Machine Learning - A precursor- Matrices / Tensors- Fundamental Operations- Regression, Classification, Neural Networks & Deep Networks- **Scikit Learn** example- **Tensorflow** example- **PyTorch** example
###Code
import numpy as np
np.random.random((5, 2))
a = np.random.random((5, 7))
b = np.random.random((7, 2))
c = a @ b
print(f'On matrix multiplication: {c.shape}')
a = np.random.random((5, 3))
b = np.random.random((5, 3))
c = a + b
print(f'On addition: {c.shape}')
# Other math operations, activation functions etc
###Output
_____no_output_____
###Markdown
Linear Regression with Scikit Learn
###Code
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_boston
boston = load_boston()
x, y = boston.data, boston.target
print(boston.DESCR)
(x.shape, y.shape)
testindex = 25
testarray = x[testindex]
testtarget = y[testindex]
for i in range(len(boston.feature_names)):
print(f'{boston.feature_names[i]:<10}: {testarray[i]}')
testtarget
model = LinearRegression()
model.fit(x, y)
model.predict(testarray.reshape(1, -1))
# needed later
sklearn_model = model
###Output
_____no_output_____
###Markdown
Object Detection with Tensorflow Load graph and required variables
###Code
import tensorflow as tf
# download the model from https://app.box.com/s/ottxnjxc1ev9wlqiw0gmsb87r9xzjkyd
frozen_graph = "../assets/yolo.pb"
with tf.gfile.GFile(frozen_graph, "rb") as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
restored_graph_def,
input_map=None,
return_elements=None,
name="")
inputs = graph.get_tensor_by_name('input_1:0')
image_shape = graph.get_tensor_by_name('input_image_shape:0')
boxes = graph.get_tensor_by_name('concat_11:0')
classes = graph.get_tensor_by_name('concat_13:0')
###Output
_____no_output_____
###Markdown
Load image
###Code
from PIL import Image, ImageDraw
image = '../assets/nightking.jpg'
img_jpg = Image.open(image)
###Output
_____no_output_____
###Markdown
preprocess
###Code
np_img = np.array(img_jpg)
img = np_img.astype(np.float32)
img = np.expand_dims(img, axis=0)
img /= 255.0
img.shape
###Output
_____no_output_____
###Markdown
Run model
###Code
with tf.Session(graph=graph) as sess:
sess.run([tf.global_variables_initializer()])
ret_b, ret_c = sess.run([boxes, classes], feed_dict={inputs: img, image_shape: np.array((416, 416))})
shape = np_img.shape
###Output
_____no_output_____
###Markdown
Post process
###Code
new_shape = 416
def post_process(classes, boxes, shapes=(416, 416)):
pad_x = max(shapes[0] - shapes[1], 0) * (new_shape / max(shapes))
pad_y = max(shapes[1] - shapes[0], 0) * (new_shape / max(shapes))
unpad_h = new_shape - pad_y
unpad_w = new_shape - pad_x
for ind, class_val in enumerate(classes):
top, left, bottom, right = boxes[ind]
top = ((top.astype('int32') - pad_y // 2) / unpad_h) * shapes[0]
left = ((left.astype('int32') - pad_x // 2) / unpad_w) * shapes[1]
bottom = ((bottom.astype('int32') - pad_y // 2) / unpad_h) * shapes[0]
right = ((right.astype('int32') - pad_x // 2) / unpad_w) * shapes[1]
yield left, top, right, bottom
ret_b.dtype
###Output
_____no_output_____
###Markdown
Process output
###Code
draw = ImageDraw.Draw(img_jpg)
for left, top, right, bottom in post_process(ret_c, ret_b, shape):
draw.rectangle(((left, top), (right, bottom)), outline='green')
from IPython.display import display
display(img_jpg)
###Output
_____no_output_____
###Markdown
Chatbot with PyTorch Importing torch
###Code
import torch
###Output
_____no_output_____
###Markdown
Utilities, helper constants etc.
###Code
import json
import re
voc_file = '../assets/voc.json'
with open(voc_file)as f:
voc_content = json.load(f)
w2i = voc_content['word2index']
i2w = voc_content['index2word']
PAD_token = 0
SOS_token = 1
EOS_token = 2
max_len = 30
def normalize_string(s):
s = s.lower()
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def str2indices(sentence):
return [[el for el in [w2i.get(word) for word in sentence.split()] if el is not None] + [EOS_token]]
def indices2str(indices):
return ' '.join([i2w[str(ind)] for ind in indices])
###Output
_____no_output_____
###Markdown
Encoder & Decoder using PyTorch JIT
###Code
encoder = torch.jit.load('../assets/encoder.pt')
decoder = torch.jit.load('../assets/decoder.pt')
###Output
_____no_output_____
###Markdown
Processing input
###Code
import numpy as np
d_input = torch.tensor(SOS_token, dtype=torch.int64).reshape(1, 1)
mysentence = "Are you crazy?"
indices = str2indices(normalize_string(mysentence))
array = torch.from_numpy(np.array(indices, dtype=np.long).transpose())
length = torch.from_numpy(np.array([len(array)], dtype=np.long))
###Output
_____no_output_____
###Markdown
Running encoder & decoder
###Code
e_out, hidden = encoder(array, length)
decoder_out = []
for i in range(max_len):
out, hidden = decoder(d_input, hidden, e_out)
spanned_out = out.reshape(1, -1)
ind = spanned_out.argmax(1).item()
if ind == EOS_token:
break
d_input = torch.tensor(ind, dtype=torch.int64).reshape(1, 1)
if ind == PAD_token:
continue
decoder_out.append(ind)
###Output
_____no_output_____
###Markdown
Processing output
###Code
print(indices2str(decoder_out))
###Output
_____no_output_____
###Markdown
What do we cover today? Getting Started with RedisAIWe'll start with installing RedisAI. Setting it up in a bare machine is few lines of commands and even for scaling it up for millions request per second, couple of config changes should be good enough.You can find the examples at https://github.com/RedisAI/redisai-examples A quick introduction about the features of RedisAI?? Setting up a new machine```bashsudo apt update sudo apt install -y build-essential tcl libjemalloc-dev git cmake unzipsudo ufw allow 6379sudo ufw allow 26379``` Installing Redis```bashcurl -O http://download.redis.io/redis-stable.tar.gztar xzvf redis-stable.tar.gzcd redis-stablemakesudo make installcd ~rm redis-stable.tar.gz``` Building RedisAI module```bashgit clone https://github.com/RedisAI/RedisAI.gitcd RedisAIbash get_deps.sh cpumkdir buildcd buildcmake -DDEPS_PATH=../deps/install ..makecd ..``` However ...```bashdocker run -p 6379:6379 -it --rm redisai/redisai``` Client Libraries redis-cli```bash Setting a tensor using redis-cliAI.TENSORSET foo FLOAT 2 2 VALUES 1 2 3 4``` Python Redis client```python Setting a tensor using python Redis clientimport redisr = redis.Redis(host='localhost', port=6379)r.execute_command('AI.TENSORSET', 'foo', 'FLOAT', 2, 2, 'VALUES', 1, 2, 3, 4)``` Python RedisAI client```python Setting a tensor using Python RedisAI clientimport redisai as raicon = rai.Client(host='localhost', port=6379)foo = rai.Tensor(rai.DType.float, [2, 2], [1, 2, 3, 4])con.tensorset('foo', foo)``` NodeJS Redis client```node// Setting a tensor using NodeJS Redis clientvar Redis = require('ioredis')let redis = new Redis({ parser: 'javascript' });redis.call('AI.TENSORSET','foo','FLOAT',2,2,'VALUES',1,2,3,4)``` Go Redis client```go// Setting a tensor using GoLang Redis clientpackage mainimport "github.com/go-redis/redis"func main() { client := redis.NewClient(&redis.Options{ Addr: "localhost:6379", Password: "", }) client.Do("AI.TENSORSET","foo","FLOAT",2,2,"VALUES",1,2,3,4)}```
###Code
import redisai as rai
con = rai.Client(host='localhost', port=6379)
foo = rai.Tensor(rai.DType.float, [2, 2], [1, 2, 3, 4])
con.tensorset('foo', foo)
###Output
_____no_output_____
###Markdown
A quick glimpse into redisai python client
###Code
import redisai
###Output
_____no_output_____
###Markdown
ValueTensor and BlobTensor
###Code
con = redisai.Client(host='localhost', port=6379)
foo = rai.Tensor(rai.DType.float, [2, 2], [1, 2, 3, 4])
con.tensorset('foo', foo)
import numpy as np
random_array = np.random.random((3, 4))
foo = redisai.BlobTensor.from_numpy(random_array)
con.tensorset('foo', foo)
###Output
_____no_output_____
###Markdown
Modelset & Scriptset```pythoncon.modelset('whaaaaaaaaaat?')con.scriptset('whaaaaaaaaat again??')``` ml2rt
###Code
import ml2rt
# prototype=x[0].reshape(1, -1).astype(np.float32) what is this?
# shape=(1, 13), dtype=np.float32, what is this?
# ml2rt.utils.guess_onnx_tensortype(x[0].reshape(1, -1).astype(np.float32))
# initial_types=[ml2rt.utils.guess_onnx_tensortype(x[0].reshape(1, -1).astype(np.float32))]
ml2rt.save_sklearn(sklearn_model, '../assets/boston.onnx')
dir(ml2rt)
sklearn_model = ml2rt.load_model('../assets/boston.onnx')
con.modelset('sklearn', redisai.Backend.onnx, redisai.Device.cpu, sklearn_model)
con.modelrun('Not now!!')
script_string = '''
def add_two(a, b):
return a + b
'''
con.scriptset('script', redisai.Device.cpu, script_string)
tensor = redisai.BlobTensor.from_numpy(np.array([1, 2]))
con.tensorset('a', tensor)
con.tensorset('b', tensor)
con.scriptrun('script', 'add_two', input=['a', 'b'], output='c')
ret = con.tensorget('c')
ret
ret = con.tensorget('c', as_type=redisai.BlobTensor)
ret.to_numpy()
###Output
_____no_output_____
###Markdown
Enough of that!! Let's run through our examples-------------------------------------------------------------------------- Logistic regression with scikit-learn
###Code
sklearn_model = ml2rt.load_model('../assets/boston.onnx')
con.modelset('sklearn', redisai.Backend.onnx, redisai.Device.cpu, sklearn_model)
tensor = redisai.BlobTensor.from_numpy(x[testindex].reshape(1, -1).astype(np.float32))
con.tensorset('input', tensor)
con.modelrun('sklearn', input='input', output='out')
con.tensorget('out').value
###Output
_____no_output_____
###Markdown
Object Detection with Tensorflow
###Code
yolomodel = ml2rt.load_model('../assets/yolo.pb')
# input=['input_1', 'input_image_shape'], output=['concat_11', 'concat_13']
con.modelset('yolo', redisai.Backend.tf, redisai.Device.cpu, yolomodel)
image = '../assets/nightking.jpg'
img_jpg = Image.open(image)
script = '''
def normalize(img):
img /= 255
return img
def pre_process(img):
img = img.float()
img = img.unsqueeze(0)
normalize(img)
return img
'''
# A common error point
con.scriptset('script', redisai.Device.cpu, script)
tensor = redisai.BlobTensor.from_numpy(np.array(img_jpg))
img_new_size = redisai.Tensor(redisai.DType.float, (2,), (416, 416))
con.tensorset('image', tensor)
con.tensorset('input_size', img_new_size)
con.scriptrun('script', 'pre_process', 'image', 'processed_image')
con.tensorget('processed_image', as_type=redisai.BlobTensor)
con.modelrun('yolo', input=['processed_image', 'input_size'], output=['boxes', 'classes'])
boxes = con.tensorget('boxes', as_type=redisai.BlobTensor).to_numpy()
classes = con.tensorget('classes', as_type=redisai.BlobTensor).to_numpy()
coordinates = post_process(classes, boxes)
for left, top, right, bottom in coordinates:
draw.rectangle(((left, top), (right, bottom)), outline='green')
draw = ImageDraw.Draw(img_jpg)
display(img_jpg)
###Output
_____no_output_____
###Markdown
Chatbot with PyTorch
###Code
encoder = ml2rt.load_model('../assets/encoder.pt')
decoder = ml2rt.load_model('../assets/decoder.pt')
mysentence = "I don't like this people"
indices = str2indices(normalize_string(mysentence))
np_array = np.array(indices, dtype=np.long).transpose()
array = redisai.BlobTensor.from_numpy(np_array)
length = redisai.BlobTensor.from_numpy(np.array([len(np_array)], dtype=np.long))
d_input = redisai.Tensor(value=SOS_token, dtype=redisai.DType.int64, shape=(1, 1))
con.modelset('encoder', redisai.Backend.torch, redisai.Device.cpu, encoder)
con.modelset('decoder', redisai.Backend.torch, redisai.Device.cpu, decoder)
con.tensorset('sentence', array)
con.tensorset('length', length)
con.tensorset('d_input', d_input)
con.modelrun('encoder', input=['sentence', 'length'], output=['e_out', 'hidden'])
decoder_out = []
for i in range(max_len):
# Don't have to carry around `hidden` and `e_out`
con.modelrun('decoder', input=['d_input', 'hidden', 'e_out'], output=['out', 'hidden'])
out = con.tensorget('out', as_type=redisai.BlobTensor).to_numpy()
spanned_out = out.reshape(1, -1)
ind = spanned_out.argmax().item()
if ind == EOS_token:
break
d_input = redisai.Tensor(value=ind, dtype=redisai.DType.int64, shape=(1, 1))
con.tensorset('d_input', d_input)
if ind == PAD_token:
continue
decoder_out.append(ind)
indices2str(decoder_out)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch, input_size=input_size, vocab_size=dtgen.tokenizer.vocab_size)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'])
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://www.transcriptorium.eu/~tsdata/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](https://fki.tic.heia-fr.ch/databases/saint-gall-database)e. [Washington](https://fki.tic.heia-fr.ch/databases/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print("Found GPU at: {}".format(device_name))
###Output
Found GPU at: /device:GPU:0
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/HTR/src/"
!ls -l
###Output
Mounted at ./gdrive
/content/gdrive/My Drive/Colab Notebooks/HTR/src
total 37
drwx------ 2 root root 4096 Jul 7 03:27 data
-rw------- 1 root root 9074 Jul 7 01:22 main.py
drwx------ 2 root root 4096 Jul 7 03:27 network
-rw------- 1 root root 16138 Jul 6 00:24 tutorial.ipynb
drwx------ 2 root root 4096 Jul 13 02:27 wandb
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
# set of models previously trained on
data_sources = ['washington']
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 12
batch_size = 64
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
# Change
new_model = int(input ('Would you like to train a new model, or update the previous weights? (0:UPDATE , 1:NEW)'))
if new_model : # new_model
data_sources = [source]
output_path = os.path.join("..", "output", source, arch)
checkpoint_path = os.path.join(output_path, "checkpoint_weights.hdf5")
else : # update
data_sources.append(source)
output_path = os.path.join("..", "output", "_".join(data_sources) , arch)
checkpoint_path = os.path.join("..", "output", "_".join(data_sources[:-1]) , arch, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("data sources of the upcoming model :", *data_sources)
print("source:", source_path)
print("output", output_path)
print("checkpoint path", checkpoint_path)
print("charset:", charset_base)
###Output
Would you like to train a new model, or update the previous weights? (0:UPDATE , 1:NEW)0
data sources of the upcoming model : washington bentham
source: ../data/bentham.hdf5
output ../output/washington_bentham/flor
checkpoint path ../output/washington/flor/checkpoint_weights.hdf5
charset: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
Train images: 8807
Validation images: 1372
Test images: 820
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=checkpoint_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=checkpoint_path, verbose=1)
###Output
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) [(None, 1024, 128, 1)] 0
_________________________________________________________________
conv2d_6 (Conv2D) (None, 512, 64, 16) 160
_________________________________________________________________
p_re_lu_6 (PReLU) (None, 512, 64, 16) 16
_________________________________________________________________
batch_normalization_6 (Batch (None, 512, 64, 16) 112
_________________________________________________________________
full_gated_conv2d_5 (FullGat (None, 512, 64, 16) 4640
_________________________________________________________________
conv2d_7 (Conv2D) (None, 512, 64, 32) 4640
_________________________________________________________________
p_re_lu_7 (PReLU) (None, 512, 64, 32) 32
_________________________________________________________________
batch_normalization_7 (Batch (None, 512, 64, 32) 224
_________________________________________________________________
full_gated_conv2d_6 (FullGat (None, 512, 64, 32) 18496
_________________________________________________________________
conv2d_8 (Conv2D) (None, 256, 16, 40) 10280
_________________________________________________________________
p_re_lu_8 (PReLU) (None, 256, 16, 40) 40
_________________________________________________________________
batch_normalization_8 (Batch (None, 256, 16, 40) 280
_________________________________________________________________
full_gated_conv2d_7 (FullGat (None, 256, 16, 40) 28880
_________________________________________________________________
dropout_3 (Dropout) (None, 256, 16, 40) 0
_________________________________________________________________
conv2d_9 (Conv2D) (None, 256, 16, 48) 17328
_________________________________________________________________
p_re_lu_9 (PReLU) (None, 256, 16, 48) 48
_________________________________________________________________
batch_normalization_9 (Batch (None, 256, 16, 48) 336
_________________________________________________________________
full_gated_conv2d_8 (FullGat (None, 256, 16, 48) 41568
_________________________________________________________________
dropout_4 (Dropout) (None, 256, 16, 48) 0
_________________________________________________________________
conv2d_10 (Conv2D) (None, 128, 4, 56) 21560
_________________________________________________________________
p_re_lu_10 (PReLU) (None, 128, 4, 56) 56
_________________________________________________________________
batch_normalization_10 (Batc (None, 128, 4, 56) 392
_________________________________________________________________
full_gated_conv2d_9 (FullGat (None, 128, 4, 56) 56560
_________________________________________________________________
dropout_5 (Dropout) (None, 128, 4, 56) 0
_________________________________________________________________
conv2d_11 (Conv2D) (None, 128, 4, 64) 32320
_________________________________________________________________
p_re_lu_11 (PReLU) (None, 128, 4, 64) 64
_________________________________________________________________
batch_normalization_11 (Batc (None, 128, 4, 64) 448
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 128, 2, 64) 0
_________________________________________________________________
reshape_1 (Reshape) (None, 128, 128) 0
_________________________________________________________________
bidirectional_2 (Bidirection (None, 128, 256) 198144
_________________________________________________________________
dense_2 (Dense) (None, 128, 256) 65792
_________________________________________________________________
bidirectional_3 (Bidirection (None, 128, 256) 296448
_________________________________________________________________
dense_3 (Dense) (None, 128, 104) 26728
=================================================================
Total params: 825,592
Trainable params: 824,312
Non-trainable params: 1,280
_________________________________________________________________
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
! pip install wandb
import wandb
from wandb.keras import WandbCallback
# 1. Start a new run
wandb.init(project='OCR', entity='ali_kfp')
# 2. Save model inputs and hyperparameters
config = wandb.config
config.learning_rate = 0.01
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=[WandbCallback(), callbacks],
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
Requirement already satisfied: wandb in /usr/local/lib/python3.7/dist-packages (0.10.33)
Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.8.1)
Requirement already satisfied: promise<3,>=2.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.3)
Requirement already satisfied: shortuuid>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.0.1)
Requirement already satisfied: GitPython>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.1.18)
Requirement already satisfied: Click!=8.0.0,>=7.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (7.1.2)
Requirement already satisfied: docker-pycreds>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (0.4.0)
Requirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (5.4.8)
Requirement already satisfied: sentry-sdk>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.3.0)
Requirement already satisfied: subprocess32>=3.5.3 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.5.4)
Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from wandb) (3.13)
Requirement already satisfied: pathtools in /usr/local/lib/python3.7/dist-packages (from wandb) (0.1.2)
Requirement already satisfied: requests<3,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (2.23.0)
Requirement already satisfied: configparser>=3.8.1 in /usr/local/lib/python3.7/dist-packages (from wandb) (5.0.2)
Requirement already satisfied: six>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (1.15.0)
Requirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.7/dist-packages (from wandb) (3.17.3)
Requirement already satisfied: typing-extensions>=3.7.4.0; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (3.7.4.3)
Requirement already satisfied: gitdb<5,>=4.0.1 in /usr/local/lib/python3.7/dist-packages (from GitPython>=1.0.0->wandb) (4.0.7)
Requirement already satisfied: urllib3>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from sentry-sdk>=0.4.0->wandb) (1.24.3)
Requirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from sentry-sdk>=0.4.0->wandb) (2021.5.30)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (2.10)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.0.0->wandb) (3.0.4)
Requirement already satisfied: smmap<5,>=3.0.1 in /usr/local/lib/python3.7/dist-packages (from gitdb<5,>=4.0.1->GitPython>=1.0.0->wandb) (4.0.0)
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
ground_truth = [x.decode() for x in dtgen.dataset['test']['gt']]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, ground_truth):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(ground_truth[i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts, ground_truth)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Tutorial
###Code
from webscraper import GlassdoorWebScraper
###Output
_____no_output_____
###Markdown
I wrote a few docstrings detailing:GlassdoorWebScraper - Installing chromedriver URL. - Glassdoor jobpage URL. - Design of GlassdoorWebScraper. - Table of Contents of all Functions in GlassdoorWebscraper. - All functions and their parameters with explanation. - Inspiration. - Method Resolution Order (MRO). - __init__ - All attributes.
###Code
help(GlassdoorWebScraper)
###Output
Help on class GlassdoorWebScraper in module webscraper:
class GlassdoorWebScraper(elements.ConfigElements, elements.WebScrapingElements)
| GlassdoorWebScraper(keyword, PATH='C:\\Program Files (x86)\\chromedriver.exe')
|
| A GlassdoorWebScraper obj will be able to configure filters and webscrape.
|
| Ensure that your chromedriver corresponds correctly to your current
| Google chrome version here:
|
| https://sites.google.com/a/chromium.org/chromedriver/downloads
|
|
|
| Here is the same URL with keyword="data scientist":
|
| https://www.glassdoor.com/Job/jobs.htm?sc.keyword="data scientist"
| &locT=C&locId=1147401&locKeyword=San%20Francisco,%20CA&jobType=all&
| fromAge=-1&minSalary=0&includeNoSalaryJobs=true&radius=100&cityId=
| -1&minRating=0.0&industryId=-1&sgocId=-1&seniorityType=all&companyId=
| -1&employerSizes=0&applicationType=0&remoteWorkType=0
|
|
|
| Design:
|
| - locators.py and elements.py are split into 2 components:
| filter configuration and webscraping.
| - The GlassdoorWebScraper class uses the low level functions
| of elements.py to create higher level functions.
| - GlassdoorWebScraper also inherits from elements.py to give
| access to the lower level functions.
|
|
|
| Table of Contents:
|
| - Basic Utility Functions
| - update keyword and URL
| - set implicit wait
| - get
| - close
| - Filter Configuration Functions
| - clear filters
| - init filters
| - init change filters
| - include no salary data
| - change *
| > keyword
| > location
| > jobtype
| > postdate
| > salary
| > city
| > industry
| > job function
| > seniority label
| > company
| > company size
| > Easy Apply Only/Work From Home Only
| > rating
| > sortby
| - Webscrape Function
|
|
|
| Functions:
|
| update_keyword_and_URL(keyword)
| Updates the keyword and URL simultaneously.
|
| set_implicitly_wait(implicitly_wait_time)
| Set the global implicit wait time.
|
| get(implicitly_wait_time=5, set_implicitly_wait=True)
| Creates a webdriver, maximizes window, sets the implicit wait time
| (which defaults to 5) if set_implicitly_wait is true, then
| finally opens the URL.
|
| close()
| Closes the current tab. This function is a wrapper just for
| convenience.
|
| change_keyword_to(keyword)
| Enter keyword into the keyword search bar and return.
|
| change_location_to(location)
| Enter location into the location search bar and return.
|
| init_filters(_filter=None)
| Initialize all filters if _filter is None else initialize _filter.
|
| reset_salary_slider(is_both=True, is_left=True)
| Reset salary slider. is_both decides if both sliders should be reset
| and is_left decides if the left or the right one should be reset
| (in the case that is_both is False).
|
| init_change_filters(filter_type)
| Initialize a filter, then print out possible filter options,
| then change the filter to specified input.
|
| change_jobtype_to(jobtype)
| Change to a specified jobtype filter option.
|
| change_postdate_to(postdate)
| Change to a specified postdate filter option.
|
| include_no_salary_data(include)
| The boolean include dictates whether or not the checkbox is checked.
|
| change_salary_to(begin_salary, end_salary)
| The salary range is in the form [a, b]. a is the begin_salary and is a string
| (e.g. "125K" where the K represents thousands). b is the end_salary and
| is also a string. include_no_salary_data defaults to True meaning
| it will by default include data with no salary.
|
| change_radius_to(radius)
| Change to a specified radius filter option.
|
| change_cityid_to(cityid)
| Change to a specified cityid filter option.
|
| change_industry_to(industry)
| Change to a specified industry filter option.
|
| change_jobfunction_to(job_function)
| Change to a specified job function filter option.
|
| change_senioritylabel_to(seniority_label)
| Change to a specified seniority label filter option.
|
| change_company_to(company)
| Change to a specified company filter option.
|
| change_companysize_to(company_size)
| Change to a specified company size filter option.
|
| easy_apply_work_home(is_eao, will_apply)
| If is_eao is true, then select the Easy Apply Only label button else
| select the Work From Home Only label button. Then, if will_apply is true,
| then apply.
|
| change_rating_to(rating)
| Change the rating.
|
| clear_filters()
| Clears all filters.
|
| sort_by(sort_type)
| Changes the "Most Relevant" dropdown (sortby) filter to a specified filter option.
|
| scrape_jobs(n_jobs)
| Webscrape jobs. n_jobs determines the size of the dataset.
|
|
| This project was created with inspiration from:
|
| https://github.com/arapfaik/scraping-glassdoor-selenium
|
| Method resolution order:
| GlassdoorWebScraper
| elements.ConfigElements
| elements.WebScrapingElements
| builtins.object
|
| Methods defined here:
|
| __init__(self, keyword, PATH='C:\\Program Files (x86)\\chromedriver.exe')
| The following attributes can be accessed and changed but it is advised not to do so directly.
|
| All attributes of a GlassdoorWebScraper obj include:
|
| self.PATH:
| The path to your chromedriver.exe.
|
|
| self.keyword:
| The keyword initialized by the user.
|
|
| self.URL_part_1:
| The first part of the Glassdoor URL.
|
|
| self.URL_part_2:
| The second part of the Glassdoor URL.
|
|
| self.URL:
| The concatenation of self.URL_part_1, self.keyword, and self.URL_part_2
| in that exact order.
|
|
| self.driver:
| The Selenium webdriver. Only created when the user creates a GlassdoorWebScraper obj and
| calls the get() method.
|
|
| self.filters:
| A dictionary of dictionaries and lists. It contains all the configurable filters
| of the current opened webpage. Only created when the init_configs() method is called.
|
|
| self.get_join_filters:
| A dictionary of dictionaries. The outer dict has keys for each filter. These keys correspond
| to dictionary values that hold: name, get fn, join fn, is_salary, and is_more. Name is the name
| of the filter. Get fn is the get locator function for that filter. Join fn is the function used to
| regex simplify and concatenate the preprocessed filter options corresponding to a filter. is_salary
| checks if the filter is the salary filter. is_more checks if the filter is under the more dropdown.
|
| change_cityid_to(self, cityid)
|
| change_company_to(self, company)
|
| change_companysize_to(self, company_size)
|
| change_industry_to(self, industry)
|
| change_jobfunction_to(self, job_function)
|
| change_jobtype_to(self, jobtype)
|
| change_keyword_to(self, keyword)
| # Change keyword (occupation).
|
| change_location_to(self, location)
|
| change_postdate_to(self, postdate)
|
| change_radius_to(self, radius)
|
| change_rating_to(self, rating)
|
| change_salary_to(self, begin_salary, end_salary)
| # Note: Glassdoor.com has a weird inconsistency with how the
| # salary filter is initialized and used (the salary ranges themselves
| # change as well depending on clicking the apply, or moving the sliders
| # left or right when they are already at the edges!).
| # Note: change_salary_to() works fine for the bell curve
| # histogram, but might take a few clear_filters() and init_filters("salaries")
| # to work somewhat consistently for the chaotic histogram.
|
| change_senioritylabel_to(self, seniority_label)
|
| clear_filters(self)
|
| close(self)
|
| easy_apply_work_home(self, is_eao, will_apply)
|
| get(self, implicitly_wait_time=5, set_implicitly_wait=True)
|
| include_no_salary_data(self, include)
|
| init_change_filter(self, filter_type)
| # A streamlined wrapper function to chain together common methods:
| # initialize a filter, print the viable filter options,
| # and change to the specified filter option(s).
| # Note: If you use this function to change "salaries",
| # then it will reset all filters and then initialize the salary filter.
| # This design choice was because there are certain
| # page loadups that have a weird looking salary histogram.
| # Note: Default to init_filters("salaries") and change_salary_to()
| # if the histogram is chaotic (this function doesn't work well with the
| # weird features of the chaotic histogram).
|
| init_filters(self, _filter=None)
| # Initialize all filter configurations and allows for initializing specific filters.
| # Note: filters change when other filters are updated.
| # Note: Make sure to glance the self.filters attribute and call init_filters() on
| # the filter that will be next changed.
|
| reset_salary_slider(self, is_both=True, is_left=True)
|
| scrape_jobs(self, n_jobs)
|
| set_implicitly_wait(self, implicitly_wait_time)
| # Sets the implicit wait time.
| # Note: Implicit wait time is set for life of the Webdriver obj once declared;
| # this means the get() function must be called again before setting a new
| # implicit wait time.
|
| sort_by(self, sort_type)
| # The sort by dropdown seems to be a little bugged.
| # It changes the order of the job listings which implies that it works
| # yet the checkmark for the dropdown (there is a checkmark next to the user's selected dropdown
| # choice) stays on "Most Relevant" regardless of what option ("Most Relevant" or "Most Recent")
| # the user chooses. Additionally, it is unsure whether or not the actual dropdown button
| # should change when one selects "Most Relevant" or "Most Recent". That could possibly
| # be bugged too.
| # Note: since this "filter" isn't a part of the DKFilters tag, it will not follow
| # the general pipeline for DKFilters for flexibility.
|
| update_keyword_and_URL(self, keyword)
|
| ----------------------------------------------------------------------
| Methods inherited from elements.ConfigElements:
|
| change_filter_to(self, name, choice, is_more=False)
| # Changes a filter option to.
| # Note: change_filter_to only applies to filters
| # in the self.get_join_filters attribute.
|
| clear_and_search(self, search, keyword)
| # Clear a search bar and return a keyword.
|
| click_more_dropdown(self, n_clicks=1)
| # Because the more dropdown filters don't close properly, this small
| # function is aimed at simply closing that dropdown.
| # It also doubles as a more dropdown clicker, as it takes an
| # n_clicks argument.
|
| get_and_parse_filters(self, filter_type_list, join_filters)
| Gets filter text and parses it; then, it calls a join_filters function.
|
| Parameters
| ----------
| filter_type_list : str
| A string that contains all the possible filter options for a given filter
| in an unparsed manner. A filter option is defined as: jobtype/full_time where
| "full_time" is a filter option of the filter "jobtype".
| join_filters : fn
| A function for a filter that will perform the concatenation
| of strings at the end of this get_and_parse_filters() method. This function
| varies depending on what special characters exist in the unparsed string
| "filter_type_list" and also on whether or not the filter options for a
| filter will have counts.
|
| Returns
| -------
| type
| Returns a dict or a list.
| describe : dict or list
| If the filter options for a filter
| contains counts a dict is returned (where the parsed
| filter options are keys and the values are the counts)
| else it will return a list of the parsed filter options.
|
| Examples
| --------
| input : "Full-time (4722)
| Part-time (482)"
| output : {"full_time": 4722, "part_time": 482}
|
| input : "5 Miles
| 10 Miles"
| output : ["5_miles", "10_miles"]
|
| get_clear_filter_span(self)
| # Gets DKFilters -> clear filter -> span.
|
| get_dropdown_ul(self)
| # Gets the primary dropdown -> ul.
|
| get_entire_filter(self)
|
| get_filters_by_type(self, filter_type)
| # Accesses the self.filters dictionary by key "filter_type"
| # and returns a list of keys (if the corresponding value to "filter_type"
| # is a dict), else it returns a list.
|
| get_filters_cityids(self)
|
| get_filters_companies(self)
|
| get_filters_companyratings_stars_divs(self)
| # Gets the company rating filter -> stars -> all divs.
|
| get_filters_companysizes(self)
|
| get_filters_eaowfho(self, is_eao)
|
| get_filters_eaowfho_label(self, is_eao, eaowfho=None)
| # Gets the EAO or WFHO filter's -> label.
|
| get_filters_industries(self)
|
| get_filters_jobfunctions(self)
|
| get_filters_jobtypes(self)
|
| get_filters_minsalaries(self)
|
| get_filters_minsalaries_applybutton(self)
| # Gets the minsalary -> apply button.
|
| get_filters_minsalaries_checkbox(self)
| # Gets the minsalary -> checkbox.
|
| get_filters_minsalaries_checkbox_label(self)
| # Gets the minsalary -> checkbox -> include no salary data label.
|
| get_filters_postdates(self)
|
| get_filters_radii(self)
|
| get_filters_senioritylabels(self)
|
| get_filters_sortby(self)
| # Gets the main col -> sortby filter.
|
| get_histogram_labels_header(self)
| # Gets histogram labels header.
|
| get_keyword_search(self)
|
| get_left_slider(self)
| # Gets the minsalary -> left slider.
|
| get_location_search(self)
|
| get_main_body(self)
|
| get_main_body_sortby_dropdown(self)
| # Gets the main body -> sortby dropdown.
|
| get_main_body_sortby_dropdown_ul(self)
| # Gets the main body -> sortby dropdown -> ul.
|
| get_main_body_sortby_dropdown_ul_li(self)
| # Gets the main body -> sortby dropdown -> ul -> all li.
|
| get_main_col(self)
|
| get_more_dropdown(self)
|
| get_primary_dropdown(self)
|
| get_primary_dropdown_histogram_container_all_div(self)
| # Gets the minsalary -> histogram container -> all divs.
|
| get_right_slider(self)
| # Gets the minsalary -> right slider.
|
| get_search_button(self)
|
| get_ul_all_li(self)
| # Gets the primary dropdown -> ul -> all li.
|
| init_filter(self, name, get_filters_, join_filters_=None, is_salary=False, is_more=False)
| This function initializes a single filter.
|
| Parameters
| ----------
| name : str
| Name of the filter.
| get_filters_ : fn
| The function that gets a certain filter.
| join_filters_ : fn, optional
| The join function for a certain filter.
| This is optional as some filters don't use a join filters function
| like salary range.
| is_salary : bool
| True if the filter passed in is the salary range filter (it
| is a special filter that requires special initialization).
| is_more : bool
| True if the filter is under the "More" dropdown.
|
| Returns
| -------
| type
| NoneType
| describe
| This function simply initializes all filter options and doesn't
| return anything.
|
| init_sortby(self)
| # Initialize the "Most Relevant" dropdown "filter".
| # This one is under the main body of the page rather than the
| # main group of filters and thus it is initialized separately
| # as it is not necessarily a filter.
|
| initialize_salary_bins(self)
| # Initializes the possible salary bins for filter minsalary.
|
| join_filters_cityids(self, cityids, cityids_counts)
|
| join_filters_companies(self, companies, companies_counts)
|
| join_filters_companysizes(self, companysizes, companysizes_counts)
|
| join_filters_industries(self, industries, industries_counts)
|
| join_filters_jobfunctions(self, jobfunctions, jobfunctions_counts)
|
| join_filters_jobtypes(self, jobtypes, jobtypes_counts)
|
| join_filters_postdates(self, postdates, postdates_counts)
|
| join_filters_radii(self, radius)
|
| join_filters_senioritylabels(self, senioritylabels, senioritylabels_counts)
|
| join_filters_sortby(self, sortbys)
| # Join filters function for sortby (not used in the regular filter pipeline).
|
| move_slider(self, slider, idx, current_idx, difference)
| # Moves left and right sliders for filter minsalary.
|
| regex_parse_salary(self, header)
|
| reset_salary_base_fn(self, slider, idx, key_fn, is_both=False)
|
| wait_until_element(self, locator, seconds_before_timeout=10)
|
| ----------------------------------------------------------------------
| Data descriptors inherited from elements.ConfigElements:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Data and other attributes inherited from elements.ConfigElements:
|
| seconds_before_timeout = 10
|
| ----------------------------------------------------------------------
| Methods inherited from elements.WebScrapingElements:
|
| close_popup(self)
|
| get_jdcol(self)
|
| get_jobinfo1(self)
|
| get_jobinfo2(self, is_insights=True)
|
| get_jobinfo3(self)
|
| get_jobinfo4(self)
|
| get_joblistings(self)
|
| get_page_count(self)
|
| get_page_nav(self)
###Markdown
Functions
###Code
# Initialize a GlassdoorWebScraper obj.
PATH = "C:\Program Files (x86)\chromedriver.exe" # The path to your chromedriver.
gd_scraper = GlassdoorWebScraper(keyword="data scientist", PATH=PATH) # Replace "data scientist" with your keyword.
# Initialize a gd_scraper.driver,
# sets an implicit wait of 5 seconds (configurable),
# maximizes window,
# gets gd_scraper.URL.
gd_scraper.get(implicitly_wait_time=5, set_implicitly_wait=True)
###Output
_____no_output_____
###Markdown
```python In the case you set set_implicitly_wait=False, you can personally set it like this (only called after get()):gd_scraper.set_implicitly_wait(implicitly_wait_time=5)``` Attributes
###Code
gd_scraper.PATH # The path to your chromedriver.
gd_scraper.URL_part_1 # First part of the URL.
gd_scraper.keyword # Your keyword.
gd_scraper.URL_part_2 # Second part of the URL.
gd_scraper.URL # The full URL with keyword.
gd_scraper.driver # The driver, only instantiated after you call get() and reinstantiated every subsequent time get() is called.
gd_scraper.filters # A dictionary of all possible filters and their filter options.
# Initially empty.
# This attribute is useless to the user, however
# it may showcase all the stateful filters:
# gd_scraper.get_join_filters.keys().
gd_scraper.get_join_filters # A dictionary of configurations/functions for all stateful filters.
# I used this hardcoded configuration for better streamlining the filtering process.
###Output
_____no_output_____
###Markdown
Functions Continued ```python This will update the keyword and URL simultaneously, but it will only apply the next time you call get().gd_scraper.update_keyword_and_URL("business analyst")```
###Code
# Initialize all possible stateful filters.
# Note: should be called right after get().
# Note: you can specify a _filter however,
# make sure this _filter is a stateful filter (i.e. in gd_scraper.get_join_filters.keys())
# and make sure it is on the webpage.
# For more info on stateful filters refer to:
# diagrams/filters.eddx or
# diagrams/filters.png.
gd_scraper.init_filters(_filter=None)
gd_scraper.filters # All filters are initialized.
###Output
_____no_output_____
###Markdown
+ Alternatively, you can simply initialize one stateful filter and not all stateful filters (this is much faster and usually done if you want to edit 1 stateful filter but not look at the options for the other possible stateful filters —you basically initialize the filter you need to change and not waste time initializing all the other filters).
###Code
# For example,
test_scraper = GlassdoorWebScraper("data scientist")
print(test_scraper.get_join_filters.keys())
test_scraper.get()
test_scraper.init_filters("postdates")
test_scraper.filters
test_scraper.change_postdate_to("last_3_days")
test_scraper.close()
###Output
_____no_output_____
###Markdown
+ However, if you do not aim to check what filters are on the webpage and also a stateful filter in gd_scraper.get_join_filters.keys(), then you can simply call gd_scraper.init_filters() and initialize all possible stateful filters. Then you can call gd_scraper.filters.keys() to get all possible stateful filters.
###Code
# Check out all the change functions in GlassdoorWebScraper in
# diagrams/structure.eddx or diagrams/structure.png.
# Note: Before calling a change function
# for a filter, make sure it is on the webpage
# and it is stateful (i.e. in gd_scraper.get_join_filters.keys()),
# and it has been initialized immediately before.
# Here is an example:
print(gd_scraper.filters.keys(), end="\n\n")
print(gd_scraper.filters["jobtypes"], end="\n\n")
print(gd_scraper.filters["jobtypes"].keys())
gd_scraper.change_jobtype_to("full_time")
###Output
dict_keys(['jobtypes', 'postdates', 'salaries', 'radii', 'cityids', 'industries', 'job_functions', 'seniority_labels', 'companies', 'company_sizes', 'sortbys'])
{'all_job_types': 4867, 'full_time': 4665, 'part_time': 97, 'contract': 19, 'internship': 41, 'temporary': 11, 'entry_level': 34}
dict_keys(['all_job_types', 'full_time', 'part_time', 'contract', 'internship', 'temporary', 'entry_level'])
###Markdown
+ **Note**: Always initialize a filter before changing it. ```python For example, if you want to change postdate after you changed jobtype, then do this:print(gd_scraper.filters.keys())gd_scraper.init_filters("postdates")print(gd_scraper.filters["postdates"].keys())gd_scraper.change_postdate_to(your_input)``` ```python change_salary_to() takes 2 inputs: begin_salary and end_salary. begin_salary corresponds to "left_slider" and end_salary corresponds to "right_slider".print(gd_scraper.filters.keys())gd_scraper.init_filters("salaries")print(gd_scraper.filters["salaries"])gd_scraper.change_salary_to(begin_salary, end_salary)``` ```python If the salary filter dropdown has a checkbox for including or excluding data with no salary info, then this function is applicable and it works the same way as easy_apply_work_home() below.gd_scraper.include_no_salary_data(include=bool)``` ```python If you want to reset the salary sliders, you may call this function. is_both decides if both sliders will be reset (if this parameter is True, then is_left's value doesn't matter). If is_both is False, then is_left decides whether you reset the left or right slider.gd_scraper.reset_salary_slider(is_both=True, is_left=True)```
###Code
# First and Second stateless filters.
# This one configures the Easy Apply Only
# and the Work From Home Only.
# is_eao decides whether you edit the Easy Apply Only or
# the Work From Home Only.
# will_apply decides if you want it on or off.
# e.g. if it is on, and you call this function to set it on,
# it will stay on.
gd_scraper.easy_apply_work_home(is_eao=True, will_apply=True)
# Second stateless filter.
gd_scraper.change_rating_to(3) # ratings go from 1-4.
gd_scraper.scrape_jobs(1) # Specify the number of joblistings you want to scrape.
gd_scraper.clear_filters() # Clears all filters.
# Closes the webpage,
# called after you are finished with everything.
gd_scraper.close()
###Output
_____no_output_____
###Markdown
+ With these top level functions, I believe it can be wrapped in yet another function to create easy automated pipelines for configuring and webscraping joblistings from Glassdoor.com. Additional Notes + sort_by() controls the "Most Relevant" dropdown. This is not part of the main filters (not in gd_scraper.get_join_filters) but it is still a filter and a stateful filter. And thus, to work with this filter, you can do this:```pythongd_scraper.init_filters("sortbys")print(gd_scraper.filters["sortbys"])gd_scraper.sort_by(your_input)``` + Because you have to initialize before changing any filter, I devised a function called init_change_filter() that will initialize your chosen filter, then prompt you for input to change that filter while printing out the possible filter options you have. Note: This only works for stateful filters. The 3 stateless filters do not work with this function.
###Code
gd_scraper.get()
# init_change_filter() can be called at any given point
# whether you have initialized all filters or just a few or
# none.
gd_scraper.init_change_filter("jobtypes")
###Output
Your options for filter jobtypes are :
----------------------------
jobtypes: {'all_job_types': 1345, 'full_time': 1269, 'part_time': 18, 'contract': 37, 'internship': 6, 'temporary': 15}
Enter a filter option: full_time
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://www.transcriptorium.eu/~tsdata/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](https://fki.tic.heia-fr.ch/databases/saint-gall-database)e. [Washington](https://fki.tic.heia-fr.ch/databases/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print("Found GPU at: {}".format(device_name))
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
ground_truth = [x.decode() for x in dtgen.dataset['test']['gt']]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, ground_truth):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(ground_truth[i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts, ground_truth)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Dataset downloading and pre-processingThis notebook ilustrates how to download and pre-process the MDA collision datasets.It uses the first session as an example. SetupBefore starting, copy your Kaggle API token to the `src/` directory.If you don't already have an API token, enter your Kaggle profile and click the _Create New API Token_ button under the **API** section:The browser will prompt you to download a file named `kaggle.json`; save it to this project's `src/` directory. DownloadTo download Session 1 to the `data` subdirectory under this project's directory, run the code below:
###Code
from datasets import Download
download_01 = Download.SESSIONS[0]
download_01.download()
###Output
_____no_output_____
###Markdown
Pre-processingRun the code below to parse the downloaded files into a dataset organized by trial:
###Code
dataset_01 = download_01.load()
dataset_01.save()
###Output
_____no_output_____
###Markdown
The dataset is stored under `data/sessions/`. VisualizationClass `viewers.VideoGrid` can be used to generate a video containing all videos of a trial in a grid pattern for convenient visualization. Additionally, the borders between individual videos are set red or blue depending on whether the arm was within collision range of the obstacle on the corresponding frames.
###Code
from viewers import VideoGrid
#import logging
#logging.disable(logging.WARNING)
grid = VideoGrid('../data/sessions/2020-10-05/01')
grid.save('../data/sessions/2020-10-05/01/collage.mp4')
###Output
_____no_output_____
###Markdown
After the collated video is generated it can be displayed with:
###Code
from IPython.display import Video
Video('../data/sessions/2020-10-05/01/collage.mp4', width=1024)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch, input_size=input_size, vocab_size=dtgen.tokenizer.vocab_size)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'])
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
RISE Camp 2018: Introduction to FlorWelcome to RISE Camp 2018 and the Flor demo! By now you've probably had at least a little experience doing some ML development: writing modeling code, running training data through that code to generate models, and scoring the resulting models on test data. Have you ever found yourself asking questions like these: - "Why does this input data look wrong? How was it generated?"- "What changed about my model since yesterday?"- "Which test data have I already used?"- "Where did I store that model that worked well last week?"- "If I change this code, what will be affected?"Flor is a system for managing the machine learning lifecycle, and helping ensure that questions like these can be easily answered with a minimum of intrusion into the development lifecycle. Flor enables data scientists to capture the full *context* of their ML pipelines -- including versions of data, code and models, hyperparameter tuning and tracking, data lineage and more.Flor's underlying model captures ML workflows as directed acyclic graphs (DAGs) of Actions (code invocations), Artifacts (files of various kinds), and Literals (parameters) and allows developers to experiment with different configurations quickly by running multi-trial experiments. The purpose of this notebook is to give you hands on experience using Flor to naviagte through different parts of the ML lifecycle.As you work through this notebook, you will learn:* How to define/use experiments, literals, artifacts and actions.* How to run experiments with different configurations.* How to compare models with other past versions in order to select the best model.We will be working on a sentiment analysis task for a movie ratings dataset. Our goal is to build a model that predicts whether a movie review is positive or negative based on its text.**Data science is a collaborative activity - we encourage you to work with those around you and ask questions!** ImportsBefore we get started, run the cell below to load standard Python libraries for ML, along with Flor.
###Code
#General imports
import pandas as pd
import numpy as np
import nltk
import json
import time
import random
import scipy.sparse
import flor
#Pre-processing imports
from stop_words import get_stop_words
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
#Model training and testing imports
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import average_precision_score, recall_score, precision_recall_fscore_support
###Output
_____no_output_____
###Markdown
Now let's tell Flor about this notebook we're working in.
###Code
# If the notebook name has not already been set, you are able to set the name in code.
flor.setNotebookName('tutorial.ipynb')
###Output
_____no_output_____
###Markdown
We're going to be modifying our code as we go, so let's get Jupyter to reload code immediately after we modify it.
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Preprocessing DataData preparation and feature engineering are important aspects of nearly every ML exercise. Before building our model, we will define a pipeline to pre-process the text of the reviews we're training and testing on. We have used the following techniques to pre-process the reviews:* *Removal of Stop Words*: removing noisy common words that we'll ignore like "the" and "and"* *Stemming*: keeping just the canonical "stem" of inflected words (e.g. "coding" -> "code", "coded" -> "code", etc.)* *Lemmatization*: using grammar rules to do even better canonicalization than stemming
###Code
# Run me
from preprocess import preprocessing
###Output
_____no_output_____
###Markdown
Train Test SplitWe have created a function to split our data into train/test sets. Since we would like this to be a Flor Function, we must wrap it with the @flor.func decorator so it is able to be referenced by Flor actions. **Please navigate to the florfunctions.py file and wrap the `traintest_split` function with the Flor decorator.**
###Code
# Run me
from florfunctions import traintest_split
###Output
_____no_output_____
###Markdown
Model Training and EvaluationNow, we can create a Flor function to train and evaluate a model to classify reviews into rating buckets. **Please navigate to the florfunctions.py file and complete the `train_test` function; fill in the Random Forest model with an n_estimators parameter of 5.**
###Code
from florfunctions import train_test
###Output
_____no_output_____
###Markdown
Experiment SetupFinally, we will now define our Flor experiment using the Flor functions we created above. Proceed through the following cells and run the experiment.
###Code
ex = flor.Experiment('risecamp_demo').__enter__()
data = ex.artifact('data.json', 'data_loc')
do_preproc = ex.action(preprocessing, [data,])
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', parent=do_preproc)
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', parent=do_preproc)
ex.__exit__()
data_x.pull(utag='second_preproc')
###Output
Data Preprocessing
--- 197.00618505477905 seconds ---
###Markdown
Next, we need to load the data we want to analyze. We can load the data by creating **artifacts**, which are pointers to data we want. In this case, we have already generated cleaned data from a previous experiment run; we can retrieve the cleaned data by referencing the tag of the particular run.
###Code
# Defines preprocessing action and resulting intermediary artifacts
#TODO: double check syntax
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', utag="first")
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', utag="first")
###Output
_____no_output_____
###Markdown
Now that we have our data points, we need to perform a train/test split. Using the `traintest_split` function we imported earlier, let's create a flor action as well as the intermediary artifacts generated by the split.
###Code
#traintest_split is the function to run, data_x and data_y are arguments
do_split = ex.action(traintest_split, [data_x, data_y])
#artifacts have a pointer (filename), internal name, and (optional) parent
X_train = ex.artifact('x_train.npz', 'X_train', do_split)
X_test = ex.artifact('x_test.npz', 'X_test', do_split)
y_train = ex.artifact('y_train.json', 'y_train', do_split)
y_test = ex.artifact('y_test.json', 'y_test', do_split)
###Output
_____no_output_____
###Markdown
Lastly, we can specify the hyperparameter with a **literal**, an explicit value stored in Flor, and create an action for our `train_test` function and an artifact for our result. We can wrap up the experiment and close it with `__exit__()`.
###Code
hyperparameter = ex.literal(v = 5, name="hyperparameters")
#Define the model training and evaluation action and final artifacts
do_test = ex.action(train_test, [X_train, X_test, y_train, y_test, hyperparameter])
report = ex.artifact('report.csv', 'report', do_test)
flor.Experiment("risecamp_demo").__exit__()
###Output
_____no_output_____
###Markdown
PullFinally, we are ready to run the experiment! We can do so by running `pull()` on our output artifacts. Before doing this, however, it is helpful to use `plot()` to generate a florplan, a graph representation of the artifact's lineage.
###Code
report.plot()
#Run the experiment
report.pull("first_pull")
###Output
_____no_output_____
###Markdown
Hyperparameter TuningFlor makes it convenient to run models using different hyperparameters and track the results. In the `train_test` we created function, notice that we pass in `hyperparameters` in addition to the train and test data. These hyperparameters will allow us to tune our model and track results with ease; let's define them in our experiment setup.Notice that the Random Forest Classifier contains `n_estimators` as a hyperparameter. We would like to tune this hyperparameter and track model performance. In order to specify the hyperparameters, we must make a `literalForEach` within our experiment. **Fill in the `literalForEach` with values 5, 50 and 75 within the experiment below.**
###Code
#Note: We have copied the same experiment below for convenience.
#We can also create flor experiments using a context manager.
# Create a context manager for the experiment and is named 'risecamp_demo'
with flor.Experiment('risecamp_demo') as ex:
ex.groundClient('git') #use "git" from grit and "ground" for ground
# Defines artifacts
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', utag='first')
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', utag='first')
#Define split action and resulting output artifacts
do_split = ex.action(traintest_split, [data_x, data_y])
X_train = ex.artifact('x_train.npz', 'X_train', do_split)
X_test = ex.artifact('x_test.npz', 'X_test', do_split)
y_train = ex.artifact('y_train.json', 'y_train', do_split)
y_test = ex.artifact('y_test.json', 'y_test', do_split)
#Define the hyperparameters for the models
random_forest_Nestimators = ex.literal(v = 5, name="hyperparameters")
# random_forest_Nestimators = ex.literalForEach(v=[5, 50, 75], name="hyperparameters", default=50) #SOLUTION
#Define the model training and evaluation action and final artifacts
do_test = ex.action(train_test, [X_train, X_test, y_train, y_test, random_forest_Nestimators])
report = ex.artifact('report.csv', 'report', do_test)
report.plot()
#Run the experiment
report.pull(utag="hyperparameter_tuning")
###Output
_____no_output_____
###Markdown
Peeking at ResultsAfter running the model with different hyperparameters above, we are able to peek at our output artifact, containing precision and recall metrics for the different models. Run the following cell - **which hyperparameter yields the best model?**
###Code
#Run me
report.peek() #Depends on dataframe
###Output
_____no_output_____
###Markdown
Building a Better ModelNow that you have some experience using flor, let's try using a different model to see if we can improve the results. Some of the classifiers we recommend trying are the Multilayer Perceptron Classifier, Naive Bayes Classifier, and K-neighbors Classifier.After implementing your model of choice in the `train_test` function in **florfunctions.py**, run the cells below to reimport the function and run the experiment.
###Code
from florfunctions import train_test
#Note: We have copied the same experiment below for convenience.
#We can also create flor experiments using a context manager.
# Create a context manager for the experiment and is named 'risecamp_demo'
with flor.Experiment('risecamp_demo') as ex:
ex.groundClient('git') #use "git" from grit and "ground" for ground
# Defines artifacts
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', utag='first')
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', utag='first')
#Define split action and resulting output artifacts
do_split = ex.action(traintest_split, [data_x, data_y])
X_train = ex.artifact('x_train.npz', 'X_train', do_split)
X_test = ex.artifact('x_test.npz', 'X_test', do_split)
y_train = ex.artifact('y_train.json', 'y_train', do_split)
y_test = ex.artifact('y_test.json', 'y_test', do_split)
#Define the hyperparameters for the models
#hyperparameter = ex.literal(v = 5, name="hyperparameters")
random_forest_Nestimators = ex.literalForEach(v=[5, 50, 75], name="hyperparameters", default=50) #SOLUTION
#MLP_hidden_layer_size = ex.literalForEach(v=[(1, ), (2, ), (3, )], name="hyperparameters", default=(2, ))
#Define the model training and evaluation action and final artifacts
#change to MLP_hidden_layer_size
do_test = ex.action(train_test, [X_train, X_test, y_train, y_test, random_forest_Nestimators])
#for naive bayes
#do_test = ex.action(train_test, [X_train, X_test, y_train, y_test])
report = ex.artifact('report.csv', 'report', do_test)
report.pull(utag="improved_model")
report.peek()
###Output
_____no_output_____
###Markdown
Data MishapsIt turns out, the data that we have been working had not been cleaned optimally to begin with. In fact, we can observe the exact cleaning process in the `pre-processing` function within florfunctions.py. We can re-clean raw data by adding another flor action and intermediate artifacts. Fortunately, however, we already ran an experiment with more optimal preprocessing. We can checkout the artifacts by using `utag = 'second'`.
###Code
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', utag='first')
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', utag='first')
###Output
_____no_output_____
###Markdown
Lets see how the new data impacts our results.
###Code
#Note: We have copied the same experiment below for convenience.
# Create a context manager for the experiment and is named 'risecamp_demo'
with flor.Experiment('risecamp_demo') as ex:
ex.groundClient('git') #use "git" from grit and "ground" for ground
# Defines artifacts
data_x = ex.artifact('data_clean_X.json', 'intermediate_X', utag='second')
data_y = ex.artifact('data_clean_y.json', 'intermediate_y', utag='second')
#Define split action and resulting output artifacts
do_split = ex.action(traintest_split, [data_x, data_y])
X_train = ex.artifact('x_train.npz', 'X_train', do_split)
X_test = ex.artifact('x_test.npz', 'X_test', do_split)
y_train = ex.artifact('y_train.json', 'y_train', do_split)
y_test = ex.artifact('y_test.json', 'y_test', do_split)
#Define the hyperparameters for the models
#hyperparameter = ex.literal(v = 5, name="hyperparameters")
random_forest_Nestimators = ex.literalForEach(v=[5, 50, 75], name="hyperparameters", default=50) #SOLUTION
#MLP_hidden_layer_size = ex.literalForEach(v=[(1, ), (2, ), (3, )], name="hyperparameters", default=(2, ))
#Define the model training and evaluation action and final artifacts
#change to MLP_hidden_layer_size
do_test = ex.action(train_test, [X_train, X_test, y_train, y_test, random_forest_Nestimators])
#for naive bayes
#do_test = ex.action(train_test, [X_train, X_test, y_train, y_test])
report = ex.artifact('report.csv', 'report', do_test)
report.pull(utag="better_data")
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.0This tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.0 Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install TensorFlow 2.0 with GPU support.
###Code
!pip install -q tensorflow-gpu==2.1.0-rc2
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
# note: `learning_rate=None` will get architecture default value
model = HTRModel(architecture=arch, input_size=input_size, vocab_size=dtgen.tokenizer.vocab_size)
model.compile(learning_rate=0.001)
# save network summary
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'],
norm_accentuation=False,
norm_punctuation=False)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
# note: `learning_rate=None` will get architecture default value
model = HTRModel(architecture=arch, input_size=input_size, vocab_size=dtgen.tokenizer.vocab_size)
model.compile(learning_rate=0.001)
# save network summary
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'],
norm_accentuation=False,
norm_punctuation=False)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'])
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print("Found GPU at: {}".format(device_name))
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
ground_truth = [x.decode() for x in dtgen.dataset['test']['gt']]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, ground_truth):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(ground_truth[i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts, ground_truth)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
# create and compile HTRModel
model = HTRModel(architecture=arch,
input_size=input_size,
vocab_size=dtgen.tokenizer.vocab_size,
beam_width=10,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
ground_truth = [x.decode() for x in dtgen.dataset['test']['gt']]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, ground_truth):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(ground_truth[i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
6 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts, ground_truth)
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Handwritten Text Recognition using TensorFlow 2.xThis tutorial shows how you can use the project [Handwritten Text Recognition](https://github.com/arthurflor23/handwritten-text-recognition) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets in HDF5. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [Bentham](http://transcriptorium.eu/datasets/bentham-collection/)b. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)c. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)d. [Saint Gall](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/saint-gall-database)e. [Washington](http://www.fki.inf.unibe.ch/databases/iam-historical-document-database/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset (or all if you prefer) in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 HDF5 filesNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bentham.hdf5│ ├── iam.hdf5│ ├── rimes.hdf5│ ├── saintgall.hdf5│ └── washington.hdf5├── raw│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── preproc.py │ ├── reader.py │ ├── similar_error_analysis.py ├── main.py ├── network │ ├── architecture.py │ ├── layers.py │ ├── model.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Now, we'll install and switch to TensorFlow 2.x.
###Code
!pip install -q tensorflow-gpu
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print(f"Found GPU at: {device_name}")
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/handwritten-text-recognition/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/handwritten-text-recognition/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, like input size, batch size, number of epochs and list of characters. This make compatible with **main.py** and jupyter notebook:* **dataset**: "bentham", "iam", "rimes", "saintgall", "washington"* **arch**: network to run: "bluche", "puigcerver", "flor"* **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bentham"
arch = "flor"
epochs = 1000
batch_size = 16
# define paths
source_path = os.path.join("..", "data", f"{source}.hdf5")
output_path = os.path.join("..", "output", source, arch)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define input size, number max of chars per line and list of valid chars
input_size = (1024, 128, 1)
max_text_length = 128
charset_base = string.printable[:95]
print("source:", source_path)
print("output", output_path)
print("target", target_path)
print("charset:", charset_base)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=charset_base,
max_text_length=max_text_length)
print(f"Train images: {dtgen.size['train']}")
print(f"Validation images: {dtgen.size['valid']}")
print(f"Test images: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 HTRModel Class The third class is **HTRModel()**, was developed to be easy to use and to abstract the complicated flow of a HTR system. It's responsible for:* Create model with Handwritten Text Recognition flow, in which calculate the loss function by CTC and decode output to calculate the HTR metrics (CER, WER and SER);* Save and load model;* Load weights in the models (train/infer);* Make Train/Predict process using *generator*.To make a dynamic HTRModel, its parameters are the *architecture*, *input_size* and *vocab_size*.
###Code
from network.model import HTRModel
input_size = (1024, 128, 1)
# create and compile HTRModel
model = HTRModel(architecture='flor', input_size=input_size, vocab_size=97)
model.compile(learning_rate=0.001)
model.summary('/data2/pavan/handwritten-text-recognition/output/bentham/flor/', "summary.txt")
# get default callbacks and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target='/data2/pavan/handwritten-text-recognition/output/bentham/flor/checkpoint_weights.hdf5')
# callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
Model: "model_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) [(None, 1024, 128, 1)] 0
_________________________________________________________________
conv2d_36 (Conv2D) (None, 512, 64, 16) 160
_________________________________________________________________
p_re_lu_36 (PReLU) (None, 512, 64, 16) 16
_________________________________________________________________
batch_normalization_36 (Batc (None, 512, 64, 16) 112
_________________________________________________________________
full_gated_conv2d_30 (FullGa (None, 512, 64, 16) 4640
_________________________________________________________________
conv2d_37 (Conv2D) (None, 512, 64, 32) 4640
_________________________________________________________________
p_re_lu_37 (PReLU) (None, 512, 64, 32) 32
_________________________________________________________________
batch_normalization_37 (Batc (None, 512, 64, 32) 224
_________________________________________________________________
full_gated_conv2d_31 (FullGa (None, 512, 64, 32) 18496
_________________________________________________________________
conv2d_38 (Conv2D) (None, 256, 16, 40) 10280
_________________________________________________________________
p_re_lu_38 (PReLU) (None, 256, 16, 40) 40
_________________________________________________________________
batch_normalization_38 (Batc (None, 256, 16, 40) 280
_________________________________________________________________
full_gated_conv2d_32 (FullGa (None, 256, 16, 40) 28880
_________________________________________________________________
dropout_18 (Dropout) (None, 256, 16, 40) 0
_________________________________________________________________
conv2d_39 (Conv2D) (None, 256, 16, 48) 17328
_________________________________________________________________
p_re_lu_39 (PReLU) (None, 256, 16, 48) 48
_________________________________________________________________
batch_normalization_39 (Batc (None, 256, 16, 48) 336
_________________________________________________________________
full_gated_conv2d_33 (FullGa (None, 256, 16, 48) 41568
_________________________________________________________________
dropout_19 (Dropout) (None, 256, 16, 48) 0
_________________________________________________________________
conv2d_40 (Conv2D) (None, 128, 4, 56) 21560
_________________________________________________________________
p_re_lu_40 (PReLU) (None, 128, 4, 56) 56
_________________________________________________________________
batch_normalization_40 (Batc (None, 128, 4, 56) 392
_________________________________________________________________
full_gated_conv2d_34 (FullGa (None, 128, 4, 56) 56560
_________________________________________________________________
dropout_20 (Dropout) (None, 128, 4, 56) 0
_________________________________________________________________
conv2d_41 (Conv2D) (None, 128, 4, 64) 32320
_________________________________________________________________
p_re_lu_41 (PReLU) (None, 128, 4, 64) 64
_________________________________________________________________
batch_normalization_41 (Batc (None, 128, 4, 64) 448
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 128, 2, 64) 0
_________________________________________________________________
reshape_6 (Reshape) (None, 128, 128) 0
_________________________________________________________________
bidirectional_12 (Bidirectio (None, 128, 256) 198144
_________________________________________________________________
dense_12 (Dense) (None, 128, 256) 65792
_________________________________________________________________
bidirectional_13 (Bidirectio (None, 128, 256) 296448
_________________________________________________________________
dense_13 (Dense) (None, 128, 98) 25186
=================================================================
Total params: 824,050
Trainable params: 822,770
Non-trainable params: 1,280
_________________________________________________________________
###Markdown
4 Tensorboard To facilitate the visualization of the model's training, you can instantiate the Tensorboard. **Note**: All data is saved in the output folder
###Code
%load_ext tensorboard
%tensorboard --reload_interval=300 --logdir={output_path}
from PIL import Image
import cv2
x= cv2.imread('/data2/pavan/Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow/src/data/8.jpg',0)
print(x.shape)
# x=Image.open('/data2/pavan/Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow/src/data/8.jpg')
cv2.imshow('sam',x)
cv2.waitKey()
# model.predict()
###Output
_____no_output_____
###Markdown
5 Training The training process is similar to the *fit()* of the Keras. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
val_loss = h.history['val_loss']
min_val_loss = min(val_loss)
min_val_loss_i = val_loss.index(min_val_loss)
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
t_corpus = "\n".join([
f"Total train images: {dtgen.size['train']}",
f"Total validation images: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Total epochs: {len(loss)}",
f"Best epoch {min_val_loss_i + 1}\n",
f"Training loss: {loss[min_val_loss_i]:.8f}",
f"Validation loss: {min_val_loss:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
6 Predict The predict process is similar to the *predict* of the Keras:
###Code
from data import preproc as pp
from google.colab.patches import cv2_imshow
start_time = datetime.datetime.now()
# predict() function will return the predicts with the probabilities
predicts, _ = model.predict(x=dtgen.next_test_batch(),
steps=dtgen.steps['test'],
ctc_decode=True,
verbose=1)
# decode to string
predicts = [dtgen.tokenizer.decode(x[0]) for x in predicts]
total_time = datetime.datetime.now() - start_time
# mount predict corpus file
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
for pd, gt in zip(predicts, dtgen.dataset['test']['gt']):
lg.write(f"TE_L {gt}\nTE_P {pd}\n")
for i, item in enumerate(dtgen.dataset['test']['dt'][:10]):
print("=" * 1024, "\n")
cv2_imshow(pp.adjust_to_see(item))
print(dtgen.dataset['test']['gt'][i])
print(predicts[i], "\n")
###Output
_____no_output_____
###Markdown
7 Evaluate Evaluation process is more manual process. Here we have the `ocr_metrics`, but feel free to implement other metrics instead. In the function, we have three parameters: * predicts* ground_truth* norm_accentuation (calculation with/without accentuation)* norm_punctuation (calculation with/without punctuation marks)
###Code
from data import evaluation
evaluate = evaluation.ocr_metrics(predicts=predicts,
ground_truth=dtgen.dataset['test']['gt'])
e_corpus = "\n".join([
f"Total test images: {dtgen.size['test']}",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics:",
f"Character Error Rate: {evaluate[0]:.8f}",
f"Word Error Rate: {evaluate[1]:.8f}",
f"Sequence Error Rate: {evaluate[2]:.8f}"
])
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____
###Markdown
Spelling Correction using TensorFlow 2.xThis tutorial shows how you can use the project [Spelling Correction](https://github.com/arthurflor23/text-corretion) in your Google Colab. 1 Localhost EnvironmentWe'll make sure you have the project in your Google Drive with the datasets folders. If you already have structured files in the cloud, skip this step. 1.1 DatasetsThe datasets that you can use:a. [BEA2019](https://www.cl.cam.ac.uk/research/nl/bea2019st/)b. [Bentham](http://www.transcriptorium.eu/~tsdata/)c. [CoNLL13](https://www.comp.nus.edu.sg/~nlp/conll13st.html)d. [CoNLL14](https://www.comp.nus.edu.sg/~nlp/conll14st.html)e. [Google](https://ai.google/research/pubs/pub41880)f. [IAM](http://www.fki.inf.unibe.ch/databases/iam-handwriting-database)g. [Rimes](http://www.a2ialab.com/doku.php?id=rimes_database:start)h. [Saint Gall](https://fki.tic.heia-fr.ch/databases/saint-gall-database)i. [Washington](https://fki.tic.heia-fr.ch/databases/washington-database) 1.2 Raw folderOn localhost, download the code project from GitHub and extract the chosen dataset in the **raw** folder. Don't change anything of the structure of the dataset, since the scripts were made from the **original structure** of them. Your project directory will be like this:```.├── raw│ ├── bea2019│ │ ├── json│ │ ├── json_to_m2.py│ │ ├── licence.wi.txt│ │ ├── license.locness.txt│ │ ├── m2│ │ └── readme.txt│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── conll13│ │ ├── m2scorer│ │ ├── original│ │ ├── README│ │ ├── revised│ │ └── scripts│ ├── conll14│ │ ├── alt│ │ ├── noalt│ │ ├── README│ │ └── scripts│ ├── google│ │ ├── europarl-v6.cs│ │ ├── europarl-v6.de│ │ ├── europarl-v6.en│ │ ├── europarl-v6.es│ │ ├── europarl-v6.fr│ │ ├── news.2007.cs.shuffled│ │ ├── news.2007.de.shuffled│ │ ├── news.2007.en.shuffled│ │ ├── news.2007.es.shuffled│ │ ├── news.2007.fr.shuffled│ │ ├── news.2008.cs.shuffled│ │ ├── news.2008.de.shuffled│ │ ├── news.2008.en.shuffled│ │ ├── news.2008.es.shuffled│ │ ├── news.2008.fr.shuffled│ │ ├── news.2009.cs.shuffled│ │ ├── news.2009.de.shuffled│ │ ├── news.2009.en.shuffled│ │ ├── news.2009.es.shuffled│ │ ├── news.2009.fr.shuffled│ │ ├── news.2010.cs.shuffled│ │ ├── news.2010.de.shuffled│ │ ├── news.2010.en.shuffled│ │ ├── news.2010.es.shuffled│ │ ├── news.2010.fr.shuffled│ │ ├── news.2011.cs.shuffled│ │ ├── news.2011.de.shuffled│ │ ├── news.2011.en.shuffled│ │ ├── news.2011.es.shuffled│ │ ├── news.2011.fr.shuffled│ │ ├── news-commentary-v6.cs│ │ ├── news-commentary-v6.de│ │ ├── news-commentary-v6.en│ │ ├── news-commentary-v6.es│ │ └── news-commentary-v6.fr│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── __init__.py │ ├── preproc.py │ └── reader.py ├── main.py ├── tool │ ├── __init__.py │ ├── seq2seq.py │ ├── statistical.py │ └── transformer.py └── tutorial.ipynb```After that, create virtual environment and install the dependencies with python 3 and pip:> ```python -m venv .venv && source .venv/bin/activate```> ```pip install -r requirements.txt``` 1.3 Dataset foldersNow, you'll run the *transform* function from **main.py**. For this, execute on **src** folder:> ```python main.py --source= --transform```Your data will be preprocess and encode, creating and saving in the **data** folder. Now your project directory will be like this:```.├── data│ ├── bea2019.txt│ ├── bentham.txt│ ├── conll13.txt│ ├── conll14.txt│ ├── google.txt│ ├── iam.txt│ ├── rimes.txt│ ├── saintgall.txt│ └── washington.txt├── raw│ ├── bea2019│ │ ├── json│ │ ├── json_to_m2.py│ │ ├── licence.wi.txt│ │ ├── license.locness.txt│ │ ├── m2│ │ └── readme.txt│ ├── bentham│ │ ├── BenthamDatasetR0-GT│ │ └── BenthamDatasetR0-Images│ ├── conll13│ │ ├── m2scorer│ │ ├── original│ │ ├── README│ │ ├── revised│ │ └── scripts│ ├── conll14│ │ ├── alt│ │ ├── noalt│ │ ├── README│ │ └── scripts│ ├── google│ │ ├── europarl-v6.cs│ │ ├── europarl-v6.de│ │ ├── europarl-v6.en│ │ ├── europarl-v6.es│ │ ├── europarl-v6.fr│ │ ├── news.2007.cs.shuffled│ │ ├── news.2007.de.shuffled│ │ ├── news.2007.en.shuffled│ │ ├── news.2007.es.shuffled│ │ ├── news.2007.fr.shuffled│ │ ├── news.2008.cs.shuffled│ │ ├── news.2008.de.shuffled│ │ ├── news.2008.en.shuffled│ │ ├── news.2008.es.shuffled│ │ ├── news.2008.fr.shuffled│ │ ├── news.2009.cs.shuffled│ │ ├── news.2009.de.shuffled│ │ ├── news.2009.en.shuffled│ │ ├── news.2009.es.shuffled│ │ ├── news.2009.fr.shuffled│ │ ├── news.2010.cs.shuffled│ │ ├── news.2010.de.shuffled│ │ ├── news.2010.en.shuffled│ │ ├── news.2010.es.shuffled│ │ ├── news.2010.fr.shuffled│ │ ├── news.2011.cs.shuffled│ │ ├── news.2011.de.shuffled│ │ ├── news.2011.en.shuffled│ │ ├── news.2011.es.shuffled│ │ ├── news.2011.fr.shuffled│ │ ├── news-commentary-v6.cs│ │ ├── news-commentary-v6.de│ │ ├── news-commentary-v6.en│ │ ├── news-commentary-v6.es│ │ └── news-commentary-v6.fr│ ├── iam│ │ ├── ascii│ │ ├── forms│ │ ├── largeWriterIndependentTextLineRecognitionTask│ │ ├── lines│ │ └── xml│ ├── rimes│ │ ├── eval_2011│ │ ├── eval_2011_annotated.xml│ │ ├── training_2011│ │ └── training_2011.xml│ ├── saintgall│ │ ├── data│ │ ├── ground_truth│ │ ├── README.txt│ │ └── sets│ └── washington│ ├── data│ ├── ground_truth│ ├── README.txt│ └── sets└── src ├── data │ ├── evaluation.py │ ├── generator.py │ ├── __init__.py │ ├── preproc.py │ └── reader.py ├── main.py ├── tool │ ├── __init__.py │ ├── seq2seq.py │ ├── statistical.py │ └── transformer.py └── tutorial.ipynb```Then upload the **data** and **src** folders in the same directory in your Google Drive. 2 Google Drive Environment 2.1 TensorFlow 2.x Make sure the jupyter notebook is using GPU mode.
###Code
!nvidia-smi
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
raise SystemError("GPU device not found")
print("Found GPU at: {}".format(device_name))
###Output
_____no_output_____
###Markdown
2.2 Google Drive Mount your Google Drive partition.**Note:** *\"Colab Notebooks/spelling-correction/src/\"* was the directory where you put the project folders, specifically the **src** folder.
###Code
from google.colab import drive
drive.mount("./gdrive", force_remount=True)
%cd "./gdrive/My Drive/Colab Notebooks/spelling-correction/src/"
!ls -l
###Output
_____no_output_____
###Markdown
After mount, you can see the list os files in the project folder. 3 Set Python Classes 3.1 Environment First, let's define our environment variables.Set the main configuration parameters, such as dataset, method, number of epochs and batch size. This make compatible with **main.py** and jupyter notebook:* **dataset**: * **``bea2019``**, **``bentham``**, **``conll13``**, **``conll14``**, **``google``**, **``iam``**, **``rimes``**, **``saintgall``**, **``washington``*** **mode**: * neural network: **``luong``**, **``bahdanau``**, **``transformer``** * statistical (localhost only): **``similarity``**, **``norvig``**, **``symspell``*** **epochs**: number of epochs* **batch_size**: number size of the batch
###Code
import os
import datetime
import string
# define parameters
source = "bea2019"
mode = "luong"
epochs = 1000
batch_size = 64
# define paths
data_path = os.path.join("..", "data")
source_path = os.path.join(data_path, f"{source}.txt")
output_path = os.path.join("..", "output", source, mode)
target_path = os.path.join(output_path, "checkpoint_weights.hdf5")
os.makedirs(output_path, exist_ok=True)
# define number max of chars per line and list of valid chars
max_text_length = 128
charset_base = string.printable[:95]
charset_special = """ÀÁÂÃÄÅÇÈÉÊËÌÍÎÏÑÒÓÔÕÖÙÚÛÜÝàáâãäåçèéêëìíîïñòóôõöùúûüý"""
print("output", output_path)
print("target", target_path)
print("charset:", charset_base + charset_special)
###Output
_____no_output_____
###Markdown
3.2 DataGenerator Class The second class is **DataGenerator()**, responsible for:* Load the dataset partitions (train, valid, test);* Manager batchs for train/validation/test process.
###Code
from data.generator import DataGenerator
dtgen = DataGenerator(source=source_path,
batch_size=batch_size,
charset=(charset_base + charset_special),
max_text_length=max_text_length)
print(f"Train sentences: {dtgen.size['train']}")
print(f"Validation sentences: {dtgen.size['valid']}")
print(f"Test sentences: {dtgen.size['test']}")
###Output
_____no_output_____
###Markdown
3.3 Neural Network Model In this step, the model will be created/loaded and default callbacks setup.
###Code
from data import preproc as pp, evaluation as ev
from tool.seq2seq import Seq2SeqAttention
from tool.transformer import Transformer
if mode == "transformer":
# disable one hot encode (seq2seq) to use transformer model
dtgen.one_hot_process = False
model = Transformer(dtgen.tokenizer,
num_layers=6,
units=512,
d_model=256,
num_heads=8,
dropout=0.1,
stop_tolerance=20,
reduce_tolerance=15)
else:
model = Seq2SeqAttention(dtgen.tokenizer,
mode,
units=512,
dropout=0.2,
stop_tolerance=20,
reduce_tolerance=15)
model.compile(learning_rate=0.001)
model.summary(output_path, "summary.txt")
# get default callbacks list and load checkpoint weights file (HDF5) if exists
model.load_checkpoint(target=target_path)
callbacks = model.get_callbacks(logdir=output_path, checkpoint=target_path, verbose=1)
###Output
_____no_output_____
###Markdown
4 Training The training process using *fit_generator()* to fit memory. After training, the information (epochs and minimum loss) is save.
###Code
# to calculate total and average time per epoch
start_time = datetime.datetime.now()
h = model.fit(x=dtgen.next_train_batch(),
epochs=epochs,
steps_per_epoch=dtgen.steps['train'],
validation_data=dtgen.next_valid_batch(),
validation_steps=dtgen.steps['valid'],
callbacks=callbacks,
shuffle=True,
verbose=1)
total_time = datetime.datetime.now() - start_time
loss = h.history['loss']
accuracy = h.history['accuracy']
val_loss = h.history['val_loss']
val_accuracy = h.history['val_accuracy']
time_epoch = (total_time / len(loss))
total_item = (dtgen.size['train'] + dtgen.size['valid'])
best_epoch_index = val_loss.index(min(val_loss))
t_corpus = "\n".join([
f"Total train sentences: {dtgen.size['train']}",
f"Total validation sentences: {dtgen.size['valid']}",
f"Batch: {dtgen.batch_size}\n",
f"Total epochs: {len(accuracy)}",
f"Total time: {total_time}",
f"Time per epoch: {time_epoch}",
f"Time per item: {time_epoch / total_item}\n",
f"Best epoch {best_epoch_index + 1}",
f"Training loss: {loss[best_epoch_index]:.8f}",
f"Training accuracy: {accuracy[best_epoch_index]:.8f}\n",
f"Validation loss: {val_loss[best_epoch_index]:.8f}",
f"Validation accuracy: {val_accuracy[best_epoch_index]:.8f}"
])
with open(os.path.join(output_path, "train.txt"), "w") as lg:
lg.write(t_corpus)
print(t_corpus)
###Output
_____no_output_____
###Markdown
5 Predict and Evaluate Since the goal is to correct text, the metrics (CER, WER and SER) are calculated before and after of the correction.The predict process also using the *predict_generator()*:
###Code
start_time = datetime.datetime.now()
predicts = model.predict(x=dtgen.next_test_batch(), steps=dtgen.steps['test'], verbose=1)
predicts = [pp.text_standardize(x) for x in predicts]
total_time = datetime.datetime.now() - start_time
# calculate metrics (before and after)
old_metric, new_metric = ev.ocr_metrics(ground_truth=dtgen.dataset['test']['gt'],
data=dtgen.dataset['test']['dt'],
predict=predicts)
# generate report
e_corpus = "\n".join([
f"Total test sentences: {dtgen.size['test']}\n",
f"Total time: {total_time}",
f"Time per item: {total_time / dtgen.size['test']}\n",
f"Metrics (before):",
f"Character Error Rate: {old_metric[0]:.8f}",
f"Word Error Rate: {old_metric[1]:.8f}",
f"Sequence Error Rate: {old_metric[2]:.8f}\n",
f"Metrics (after):",
f"Character Error Rate: {new_metric[0]:.8f}",
f"Word Error Rate: {new_metric[1]:.8f}",
f"Sequence Error Rate: {new_metric[2]:.8f}"
])
p_corpus = []
for i in range(dtgen.size['test']):
p_corpus.append(f"GT {dtgen.dataset['test']['gt'][i]}")
p_corpus.append(f"DT {dtgen.dataset['test']['dt'][i]}")
p_corpus.append(f"PD {predicts[i]}\n")
# write report
with open(os.path.join(output_path, "predict.txt"), "w") as lg:
lg.write("\n".join(p_corpus))
print("\n".join(p_corpus[:30]))
with open(os.path.join(output_path, "evaluate.txt"), "w") as lg:
lg.write(e_corpus)
print(e_corpus)
###Output
_____no_output_____ |
notebook/Chapter_04.ipynb | ###Markdown
--- Chapter 4: Linear Algebra --- VectorVector may be a list of one dimension numbers with each index stored the same symantics in my understanding\begin{equation}\vec{v_1} = \begin{bmatrix}2 \\ 1 \end{bmatrix} =\begin{bmatrix}v_1 \\ v_2 \end{bmatrix}=\begin{bmatrix}v_i \\ v_j \end{bmatrix}=\begin{bmatrix}v_x \\ v_y \end{bmatrix}\end{equation}\begin{equation}\vec{v_2} = \begin{bmatrix}4 \\ -3 \\2 \end{bmatrix}=\begin{bmatrix}v_1 \\ v_2 \\ v_3 \end{bmatrix} =\begin{bmatrix}v_i \\ v_j \\ v_k \end{bmatrix} =\begin{bmatrix}v_x \\ v_y \\ v_z \end{bmatrix} \end{equation}\begin{equation}\overrightarrow{grades} = \begin{bmatrix}exam_1 \\ exam_2 \\ exam_3 \\ exam_4 \end{bmatrix}=\begin{bmatrix}95 \\ 80 \\ 75 \\ 62 \end{bmatrix} \end{equation}Simple built in ``list`` is satisfied but most of the time we use numpy. In this chapter the book leads us to build our own vector data type from scratch. Create data type ``Vector`` from ``typing`` using ``List``. We will use the ``Vector`` to define the function arguements and return value. ``height_weight_age`` is a vector store heigh,weight, and age, while ``grades`` the score from exams 4 times of a one person.
###Code
from typing import List
Vector = List[float]
height_weight_age = [70, # inches,
170, # pounds,
40 ] # years
grades = [95, # exam1
80, # exam2
75, # exam3
62 ] # exam4
###Output
_____no_output_____
###Markdown
Add/SubtractTo add/subract vector is, add/subract each element together. The vector must be the same dimension.\begin{equation}\vec{v} = \begin{bmatrix}3 \\ -1 \end{bmatrix} \;\;\; \vec{s} = \begin{bmatrix}2 \\ 4 \end{bmatrix}\end{equation} \begin{equation}\begin{aligned}\vec{t}= \vec{v} + \vec{s} &= \begin{bmatrix}v_x + s_x \\ v_y + s_y \end{bmatrix} \\ &= \begin{bmatrix}3 +2 \\ -1 + 4 \end{bmatrix} \\ &= \begin{bmatrix}5 \\ 3\end{bmatrix} \\ \end{aligned}\tag{Vector plus Vecotr}\label{Vector plus Vecotr}\end{equation}
###Code
def add(v: Vector, w: Vector) -> Vector:
"""Adds corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i + w_i for v_i, w_i in zip(v, w)]
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
def subtract(v: Vector, w: Vector) -> Vector:
"""Subtracts corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i - w_i for v_i, w_i in zip(v, w)]
assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3]
grades2 = [45, # exam1
50, # exam2
47, # exam3
42 ] # exam4
add(grades,grades2), subtract(grades,grades2)
###Output
_____no_output_____
###Markdown
Sum of VectorsInteresting technic to assert and summary in python style, ``all()`` and using the list comprehension to sum the list of vectors instead loop call function ``add``.
###Code
def vector_sum(vectors: List[Vector]) -> Vector:
"""Sums all corresponding elements"""
# Check that vectors is not empty
assert vectors, "no vectors provided!"
# Check the vectors are all the same size
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
# the i-th element of the result is the sum of every vector[i]
return [sum(vector[i] for vector in vectors)
for i in range(num_elements)]
assert vector_sum([[1, 2], [3, 4], [5, 6], [7, 8]]) == [16, 20]
###Output
_____no_output_____
###Markdown
Vector scalar multiplication and meanVector can multiply by scalar as decribe below.\begin{equation}\begin{aligned}\lambda\vec{v}&= \lambda \times \begin{bmatrix}v_x \\ v_y \end{bmatrix} = \begin{bmatrix}\lambda\times v_x \\ \lambda\times v_y \end{bmatrix} \\ \vec{v_2}&= 3 \times \vec{v_1} = 3 \times \begin{bmatrix}v_x \\ v_y \end{bmatrix} = \begin{bmatrix}3\times 2 \\ 3\times 1 \end{bmatrix} = \begin{bmatrix}6 \\ 3 \end{bmatrix} \\ &= \vec{v_1} + \vec{v_1} + \vec{v_1} = \begin{bmatrix}v_x \\ v_y \end{bmatrix} + \begin{bmatrix}v_x \\ v_y \end{bmatrix} + \begin{bmatrix}v_x \\ v_y \end{bmatrix} = \begin{bmatrix} 2+2+2 \\ 1+1+1 \end{bmatrix} = \begin{bmatrix}6 \\ 3 \end{bmatrix}\end{aligned}\tag{Scalar Multiply}\label{Scalar Multiply}\end{equation}
###Code
def scalar_multiply(c: float, v: Vector) -> Vector:
"""Multiplies every element by c"""
return [c * v_i for v_i in v]
assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6]
def vector_mean(vectors: List[Vector]) -> Vector:
"""Computes the element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3, 4]
###Output
_____no_output_____
###Markdown
DOT and sum sqaureVector DOT products of two vectors define by the equations, note that the dot produc is commutative\begin{equation}\begin{aligned}\vec{v} \cdot \vec{s} = \vec{s} \cdot \vec{v} = \mathbf{v}^\mathbf{T}\mathbf{s} &=\sum_{i=0}^{i=n} {v_i \cdot s_i} \\ &= (v_{1} \cdot s_{1}) + (v_{2} \cdot s_{2}) ... + \; (v_{n} \cdot s_{n}) \\ &= (v_{x} \cdot s_{x}) + (v_{y} \cdot s_{y}) ... + \; (v_{n} \cdot s_{n}) \\ &= (3 \cdot 2 ) + (-1 \cdot 4) \\ &= (3 \times 2) + (-1 \times 4) \\&= 2 \\ \end{aligned}\tag{Dot Product}\label{Dot Product}\end{equation} Dot product of a vector with itself equivalent to the sum square of all elements.\begin{equation}\begin{aligned}\vec{v} \cdot \vec{v} &=\sum_{i=0}^{i=n} {v_i \cdot v_i} \\&= (v_{1} \cdot v_{1}) + (v_{2} \cdot v_{2}) ... + \; (v_{n} \cdot v_{n}) \\ &= (1 \cdot 1) + (2 \cdot 2) + (3 \cdot 3) \\ &= (1 \times 1) + (2 \times 2) + (3 \times 3) \\&= (1^2) + (2^2) + (3^2) \\&= 14 \\ \end{aligned}\tag{Sum of Sqaure}\label{Sum of Square}\end{equation}
###Code
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "vectors must be same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
assert dot([1, 2, 3], [4, 5, 6]) == 32 # 1 * 4 + 2 * 5 + 3 * 6
def sum_of_squares(v: Vector) -> float:
"""Returns v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
assert sum_of_squares([1, 2, 3]) == 14 # 1 * 1 + 2 * 2 + 3 * 3
###Output
_____no_output_____
###Markdown
Vector magnitude and DistancesMagnitude of the vector is the square root of the sum of square of all elements \begin{equation}\begin{aligned}\|\vec{v}\| &= \sqrt{v_{1}^{2} + v_{2}^{2} ... + v_{n}^{2}} \\\|\vec{v}\| &= \sqrt{v_{1}^{2} + v_{2}^{2}} \\ &= \sqrt{3^{2} + 4^{2}} \\ &= \sqrt{25} \; = 5 \end{aligned} \tag{Magnitude}\label{Magnitude}\end{equation} Calculate the distance of vectors in the cartesian system or points co-ordinate $$\vec{v}=\begin{bmatrix}3 \\ 4 \end{bmatrix} \;\;\; \vec{w} = \begin{bmatrix}8 \\ 6 \end{bmatrix} \\ \\$$\begin{equation}\begin{aligned}Square\;Distance(\vec{v},\vec{w}) &= \sum_{i=0}^{i=n} {(v_i - w_i)^2} \\ &=(v_{1} - w_{1})^{2} + (v_{2} - w_{2})^{2} \\ &=(3 - 8)^{2} + (4 -6)^{2} \\ &=-5^{2} + -2^{2} \\ &=29 \end{aligned} \tag{Sqaure Disctance}\label{Sqaure Disctance}\end{equation} \begin{equation}\begin{aligned}Distance(\vec{v},\vec{w}) &= \sqrt{ \sum_{i=0}^{i=n} {(v_i - w_i)^2}} \\ &= \sqrt{ (v_{1} - w_{1})^{2} + (v_{2} - w_{2})^{2}} \\ &= \sqrt{ (3 - 8)^{2} + (4 -6)^{2}} \\ &= \sqrt{-5^{2} + -2^{2}} \\ &= \sqrt{29} \; \approx 5.385\end{aligned} \tag{Distance}\label{Distance}\end{equation}
###Code
import math
def magnitude(v: Vector) -> float:
"""Returns the magnitude (or length) of v"""
return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
assert magnitude([3, 4]) == 5
def squared_distance(v: Vector, w: Vector) -> float:
"""Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(subtract(v, w))
print(squared_distance([3, 4],[8,6]))
def distance(v: Vector, w: Vector) -> float:
"""Computes the distance between v and w"""
return math.sqrt(squared_distance(v, w))
print(distance([3, 4],[8,6]))
def distance(v: Vector, w: Vector) -> float: # type: ignore
return magnitude(subtract(v, w))
print(distance([3, 4],[8,6]))
###Output
29
5.385164807134504
5.385164807134504
###Markdown
--- MatrixTwo dimensions row and column data. \begin{align*}A_{M\times N} &=\begin{bmatrix} a_{11} & a_{12} & \dots & a_{1N} \\ a_{21} & a_{22} & \dots & a_{2N} \\ \vdots & \vdots & \ddots & \vdots \\ a_{M1} & a_{M2} & \dots & a_{MN}\end{bmatrix} \\ \\A_{3\times3} &= \begin{bmatrix} v_1 & v_2 & v_3 \\ v_4 & v_5 & v_6 \\ v_7 & v_8 & v_9 \end{bmatrix} = \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \end{bmatrix} \end{align*} Note that if a dimension is one, the matrix can be considerd as vector. If the row is fixed to one, we call **row vector** and if the vector is fixed column to be one, we call it is a **column vector**.\begin{align*}\text{Column vector} &= \begin{bmatrix}2 \\ 1 \\ 3 \end{bmatrix} \\\text{Row vector} &= \begin{bmatrix}2 & 1 & 3 \end{bmatrix} \end{align*}And column vector can be **Transpose** to row vector and vice versa. One dimension which one element may be considered as **Saclar**, isnt't it?In python usually use **numpy** but simple ``list`` or ``List`` also implement the matrix, in the book it intruduce the Matrix type from ``List``.
###Code
# Another type alias
Matrix = List[List[float]]
A = [[1, 2, 3], # A has 2 rows and 3 columns
[4, 5, 6]]
B = [[1, 2], # B has 3 rows and 2 columns
[3, 4],
[5, 6]]
ROW_VECTOR = [[1,2,3]]
COL_VECTOR = [[1],[2],[3]]
###Output
_____no_output_____
###Markdown
Matrix DimensionDimension of the matrix determine by the shape of the matrix list (or numpy array)
###Code
from typing import Tuple
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns (# of rows of A, # of columns of A)"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0 # number of elements in first row
return num_rows, num_cols
assert shape([[1, 2, 3], [4, 5, 6]]) == (2, 3) # 2 rows, 3 columns
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of A (as a Vector)"""
return A[i] # A[i] is already the ith row
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of A (as a Vector)"""
return [A_i[j] # jth element of row A_i
for A_i in A] # for each row A_i
print(shape(A))
print(shape(B))
print('Row vector ',ROW_VECTOR, ' shape ',shape(ROW_VECTOR))
print('Row vector ',COL_VECTOR, ' shape ',shape(COL_VECTOR))
###Output
(2, 3)
(3, 2)
Row vector [[1, 2, 3]] shape (1, 3)
Row vector [[1], [2], [3]] shape (3, 1)
###Markdown
Identity MatrixMatrix that all diagonal element ($a_{1 1},a_{2 2}, \dots a_{n n}$) is one and the remains are all zero called **identity matrix**\begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix}Function ``make_metrix(num_rows, num_cols, entry_fn)`` accepts number of rows and columns with a ``Callabel[[int,int]],float`` to return a matrix. The function ``identity_matrix(n:int)`` that retrun $ N\times N$ identity matrix, noticed the ``lambda`` send to ``make_matrix`` as a callable object.
###Code
from typing import Callable
def make_matrix(num_rows: int,
num_cols: int,
entry_fn: Callable[[int, int], float]) -> Matrix:
"""
Returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] # create one list for each i
def identity_matrix(n: int) -> Matrix:
"""Returns the n x n identity matrix"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
###Output
_____no_output_____
###Markdown
Remains code from the book is about matrix representation for data from chapter one.
###Code
data = [[70, 170, 40],
[65, 120, 26],
[77, 250, 19],
# ....
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# user 0 1 2 3 4 5 6 7 8 9
#
friend_matrix = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
assert friend_matrix[0][2] == 1, "0 and 2 are friends"
assert friend_matrix[0][8] == 0, "0 and 8 are not friends"
# only need to look at one row
friends_of_five = [i
for i, is_friend in enumerate(friend_matrix[5])
if is_friend]
import numpy as np
a = np.array([1, 2, 3])
display(a)
display(a.transpose())
display(a.dot(a.transpose()))
a.shape = (3,1)
display(a)
display(a.transpose())
display(a.dot(a.transpose()))
###Output
_____no_output_____ |
SoftSensingTransformer_public.ipynb | ###Markdown
Baseline models for SST paper* autoencoder* vwmhqae* LSTM 1. SST model
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
from math import exp
import time
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Layer, Dropout
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import pytz
from datetime import datetime
import os
import csv
import sys
from scipy.stats import mode
import matplotlib.pyplot as plt
# hyper parameters
reg_kernal = 1e-4
#reg_activity = 1e-4
kernel_regularizer=tf.keras.regularizers.l2(reg_kernal)
#activity_regularizer=tf.keras.regularizers.l2(reg_activity)
#kernel_regularizer=None
activity_regularizer=None
earlystop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0.0001,
patience=100,restore_best_weights=True)
variance_on = False
single_meas = False
# hyper parameters
num_layers = 3
d_model=128
dff = 128
num_heads = 1
dropout_rate = 0.5
lr = 1e-4
lr_factor = 0.5
warmup = 4000
epochs = 1000
batch_size = 2048
wafer_col = 'substrate'
model_name = 'layer_%s_dff_%s_head_%s_kernel_%s_act_%s_drop_%s_epoch_%s_batch_%s_dmodel_%s_lrf_%s'%(num_layers,
dff, num_heads, reg_kernal, reg_activity, dropout_rate, epochs, batch_size, d_model,lr_factor)
if variance_on:
model_name += '_vw'
model_name += '_embed'
model_name
###Output
_____no_output_____
###Markdown
read and process the data
###Code
tool_set = 'P1'
grouping = 'time_step'
experiment = 'SST'
np_path = 'npdata/%s/'%(tool_set)
X_train = np.load(np_path+'X_train.npy')
X_val = np.load(np_path+'X_val.npy')
X_test = np.load(np_path+'X_test.npy')
label_train = np.load(np_path+'label_train.npy')
label_val = np.load(np_path+'label_val.npy')
label_test = np.load(np_path+'label_test.npy')
label_cols = np.array(range(label_train.shape[1]//2))
X_train = tf.convert_to_tensor(X_train, dtype=tf.float32)
X_val = tf.convert_to_tensor(X_val, dtype=tf.float32)
X_test = tf.convert_to_tensor(X_test, dtype=tf.float32)
label_train = tf.convert_to_tensor(label_train, dtype=tf.float32)
label_val = tf.convert_to_tensor(label_val, dtype=tf.float32)
label_test = tf.convert_to_tensor(label_test, dtype=tf.float32)
print(X_train.shape, X_val.shape, X_test.shape)
print(label_train.shape, label_val.shape, label_test.shape)
###Output
(194042, 2, 817) (34122, 2, 817) (27326, 2, 817)
(194042, 22) (34122, 22) (27326, 22)
###Markdown
class weight
###Code
class_weight = np.array([label_train.shape[0]/label_train.shape[1]/label_train.numpy()[:,i].sum() for i in range(label_train.shape[1])])
class_weight
def loss_func(w):
def inner_loss(y,y_pred):
loss = 0
weights = w
for i in range(len(weights)//2):
y_i = y[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
#y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_i = tf.reduce_sum(loss_category*w_i)
loss += loss_i
return loss
return inner_loss
###Output
_____no_output_____
###Markdown
The transformer model
###Code
# positional encoding
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
# masking
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq[:,:,-1], 1), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
# attention
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model,activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)
self.wk = tf.keras.layers.Dense(d_model,activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)
self.wv = tf.keras.layers.Dense(d_model,activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)
self.dense = tf.keras.layers.Dense(d_model,activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
# feed forward
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model,
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer) # (batch_size, seq_len, d_model)
])
#encoder
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
# x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
###Output
_____no_output_____
###Markdown
Training Variance weighted model
###Code
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs, **kwargs):
self.nb_outputs = nb_outputs # nb_outputs = 2*#meas_steps
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=variance_on)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, y_true, y_pred, weights):
loss = 0
# cross-entropy part
for i in range(self.nb_outputs//2):
y_i = y_true[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
# y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_category*=w_i
vw = self.log_vars[2*i:2*i+2]
p1, p2 = K.exp(-vw[0][0]*2), K.exp(-vw[1][0]*2)
loss_i = p1*loss_category[0] + vw[0][0] + p2*loss_category[1] + vw[1][0]
# sigma = self.log_vars[i][0]
# loss_i = K.exp(-sigma*2)*loss_category[0] + sigma + K.exp(-sigma*2)*loss_category[1] + sigma
# loss_i = K.exp(-2*sigma)*tf.reduce_sum(loss_category) + sigma
# loss_i += (p1+p2)*1e-10
loss += loss_i
return loss
def call(self, inputs, weights):
y_true = inputs[0]
y_pred = inputs[1]
loss = self.multi_loss(y_true, y_pred, weights)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return y_pred
def get_sst_model(inp_dim1, inp_dim2, out_dim, weights, num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size, pe_input, rate):
inp = Input(shape=(inp_dim1, inp_dim2,))
tokenizer = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
#final_layer = Dense(target_vocab_size, activation='sigmoid',
# activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)
final_layer = tf.keras.Sequential([Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
tf.keras.layers.Dense(target_vocab_size, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)])
enc_padding_mask = create_padding_mask(inp)
embed = tf.keras.layers.Dense(d_model)(inp)
enc_output = tokenizer(embed, True, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
dec_output = layers.GlobalAveragePooling1D()(enc_output) # to do: change to flatten (need to support masking)
y_pred = final_layer(dec_output)
y_true = Input(shape=(out_dim,), name='y_true')
out = CustomMultiLossLayer(nb_outputs=out_dim)([y_true, y_pred],weights)
return Model([inp, y_true], out)
# learning rate decay
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=warmup):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) * lr_factor
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
inp_dim1 = X_train.shape[1]
inp_dim2 = X_train.shape[2]
out_dim= label_train.shape[1]
#input_vocab_size = d_model
#target_vocab_size
out_dim= label_train.shape[1]
transformer = get_sst_model(
inp_dim1 = inp_dim1,
inp_dim2 = inp_dim2,
out_dim = out_dim,
weights = class_weight,
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
input_vocab_size=d_model,
target_vocab_size=out_dim,
pe_input=1000,
rate=dropout_rate)
transformer.compile(optimizer=optimizer, loss=None)
#transformer.compile(optimizer=tf.keras.optimizers.Adam(lr=lr), loss=None)
%%time
hist = transformer.fit([X_train, label_train],label_train,
epochs=epochs,
batch_size = batch_size,
verbose=1,
callbacks=[earlystop_callback],
validation_data=([X_val,label_val], label_val))
# plot the loss
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment)
plt.show()
print('weights:', [np.exp(-K.get_value(log_var[0])) for log_var in transformer.layers[-1].log_vars])
transformer.summary()
###Output
weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Model: "model_2"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_4 (InputLayer) [(None, 2, 817)] 0
__________________________________________________________________________________________________
tf_op_layer_strided_slice_2 (Te [(None, 2)] 0 input_4[0][0]
__________________________________________________________________________________________________
tf_op_layer_Equal_1 (TensorFlow [(None, 2)] 0 tf_op_layer_strided_slice_2[0][0]
__________________________________________________________________________________________________
tf_op_layer_Cast_1 (TensorFlowO [(None, 2)] 0 tf_op_layer_Equal_1[0][0]
__________________________________________________________________________________________________
dense_52 (Dense) (None, 2, 128) 104704 input_4[0][0]
__________________________________________________________________________________________________
tf_op_layer_strided_slice_3 (Te [(None, 1, 1, 2)] 0 tf_op_layer_Cast_1[0][0]
__________________________________________________________________________________________________
encoder_2 (Encoder) (None, 2, 128) 298752 dense_52[0][0]
__________________________________________________________________________________________________
global_average_pooling1d_1 (Glo (None, 128) 0 encoder_2[0][0]
__________________________________________________________________________________________________
y_true (InputLayer) [(None, 22)] 0
__________________________________________________________________________________________________
sequential_14 (Sequential) (None, 22) 35862 global_average_pooling1d_1[0][0]
__________________________________________________________________________________________________
custom_multi_loss_layer_2 (Cust (None, 22) 22 y_true[0][0]
sequential_14[0][0]
==================================================================================================
Total params: 439,340
Trainable params: 439,318
Non-trainable params: 22
__________________________________________________________________________________________________
###Markdown
Evaluation
###Code
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib.pyplot as plt
def plot_roc_cur(fper, tper, title):
plt.plot(fper, tper, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-' + title)
plt.legend()
# plt.savefig(plots_folder + 'roc.png')
plt.show()
def evaluate(classifier, X, label, label_cols, model_name, split_name, plot=False, save=True):
thresh = 0.5
label_pred = transformer.predict([X,label])
label_true = label
# softmax for pos/neg predictions
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
print(split_name, ': the overall accuracy is:', sum(sum(prob.numpy()>thresh))/sum(sum(label.numpy()>0)))
print(split_name, ': the overall recall is:', sum(sum(prob.numpy()>thresh)[1::2])/sum(sum(label.numpy()>0)[1::2]))
meas_steps = label_cols
result = pd.DataFrame(index=meas_steps, columns = ['model', 'split', 'tp','tn','fn','fp','tpr','fpr','min_dis', 'auc'])
for i in range(len(meas_steps)):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[pos!=0].numpy()
neg = neg[neg!=0].numpy()
tp = sum(pos>thresh)
fn = sum(pos<thresh)
tn = sum(neg>thresh)
fp = sum(neg<thresh)
tpr = tp/(tp+fn+1e-9)
fpr = fp/(fp+tn+1e-9)
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
if len(pos) and len(neg):
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
min_dis = np.sqrt(fper**2 + (1-tper)**2).min()
else:
min_dis = None
auc = None
result.iloc[i] = [model_name,split_name,tp,tn,fn,fp,tpr,fpr,min_dis,auc]
if plot:
plot_roc_cur(fper, tper, meas_steps[i])
display(result)
%%time
evaluate(transformer, X_train, label_train, label_cols, model_name, 'train', plot=False)
evaluate(transformer, X_val, label_val, label_cols, model_name, 'val', plot=False)
evaluate(transformer, X_test, label_test, label_cols, model_name, 'test', plot=False)
###Output
test : the overall accuracy is: 0.7289602963678827
test : the overall recall is: 0.5856443719412724
###Markdown
2. AutoEncoder
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
from math import exp
import time
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Layer, Dropout
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import pytz
from datetime import datetime
import os
import csv
import sys
from scipy.stats import mode
import matplotlib.pyplot as plt
layers = [512,256,128]
n_layer = len(layers)
dff = 128
dropout_rate = 0.5
model_name = 'SAE' + str(layers)
earlystop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0.0001,
patience=100,restore_best_weights=True)
reg_kernal, reg_activity = 1e-4,1e-4
kernel_regularizer=tf.keras.regularizers.l2(reg_kernal)
activity_regularizer=tf.keras.regularizers.l2(reg_activity)
#kernel_regularizer=None
activity_regularizer=None
lr = 0.005
epochs = 1000
batch_size = 2048
model_name
###Output
_____no_output_____
###Markdown
read and process the data
###Code
experiment = 'SAE'
X_train = tf.reshape(X_train, [X_train.shape[0], X_train.shape[1]*X_train.shape[2]])
X_val = tf.reshape(X_val, [X_val.shape[0], X_val.shape[1]*X_val.shape[2]])
X_test = tf.reshape(X_test, [X_test.shape[0], X_test.shape[1]*X_test.shape[2]])
print(X_train.shape, X_val.shape, X_test.shape)
print(label_train.shape, label_val.shape, label_test.shape)
class_weight = np.array([label_train.shape[0]/label_train.shape[1]/label_train.numpy()[:,i].sum() for i in range(label_train.shape[1])])
class_weight
class Autoencoder(Model):
def __init__(self, latent_dim, input_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential(tf.keras.layers.Dense(latent_dim,activation='sigmoid'))
self.decoder = tf.keras.Sequential(tf.keras.layers.Dense(input_dim,activation='sigmoid'))
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
for i in range(n_layer):
input_dim = X_train.shape[1]
latent_dim = layers[i]
autoencoder = Autoencoder(latent_dim, input_dim)
autoencoder.compile(loss=losses.MeanSquaredError(), optimizer=tf.keras.optimizers.Adam(lr=lr))
hist = autoencoder.fit(X_train, X_train,
epochs=1000,
batch_size = 2048,
verbose=0,
callbacks=[earlystop_callback],
validation_data=(X_val, X_val))
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment + 'layer%s'%(i))
plt.savefig(plots_folder + 'training and validation loss')
plt.show()
X_train, X_val = autoencoder.encoder.predict(X_train),autoencoder.encoder.predict(X_val)
X_test = autoencoder.encoder.predict(X_test)
print(X_train.shape)
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs, **kwargs):
self.nb_outputs = nb_outputs # nb_outputs = 2*#meas_steps
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=variance_on)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, y_true, y_pred, weights):
loss = 0
# cross-entropy part
for i in range(self.nb_outputs//2):
y_i = y_true[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
# y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_category*=w_i
vw = self.log_vars[2*i:2*i+2]
p1, p2 = K.exp(-vw[0][0]*2), K.exp(-vw[1][0]*2)
loss_i = p1*loss_category[0] + vw[0][0] + p2*loss_category[1] + vw[1][0]
# sigma = self.log_vars[i][0]
# loss_i = K.exp(-sigma*2)*loss_category[0] + sigma + K.exp(-sigma*2)*loss_category[1] + sigma
# loss_i = K.exp(-2*sigma)*tf.reduce_sum(loss_category) + sigma
# loss_i += (p1+p2)*1e-10
loss += loss_i
return loss
def call(self, inputs, weights):
y_true = inputs[0]
y_pred = inputs[1]
loss = self.multi_loss(y_true, y_pred, weights)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return y_pred
def get_classifier_model(inp_dim, out_dim, weights, dff, rate):
inp = Input(shape=(inp_dim,))
final_layer = tf.keras.Sequential([Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
tf.keras.layers.Dense(out_dim, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)])
y_pred = final_layer(inp)
y_true = Input(shape=(out_dim,), name='y_true')
out = CustomMultiLossLayer(nb_outputs=out_dim)([y_true, y_pred],weights)
return Model([inp, y_true], out)
classifier = get_classifier_model(inp_dim = X_train.shape[1], out_dim=label_train.shape[1],
weights=class_weight, dff=dff, rate=dropout_rate)
classifier.compile(loss=None, optimizer=tf.keras.optimizers.Adam(lr=lr))
hist = classifier.fit([X_train, label_train],label_train,
epochs=epochs,
batch_size = batch_size,
verbose=0,
callbacks=[earlystop_callback],
validation_data=([X_val,label_val], label_val))
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment)
plt.show()
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib.pyplot as plt
def plot_roc_cur(fper, tper, title):
plt.plot(fper, tper, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-' + title)
plt.legend()
# plt.savefig(plots_folder + 'roc.png')
plt.show()
def evaluate(classifier, X, label, label_cols, model_name, split_name, plot=False, save=True):
thresh = 0.5
label_pred = classifier.predict([X,label])
label_true = label
# softmax for pos/neg predictions
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
print(split_name, ': the overall accuracy is:', sum(sum(prob.numpy()>thresh))/sum(sum(label.numpy()>0)))
print(split_name, ': the overall recall is:', sum(sum(prob.numpy()>thresh)[1::2])/sum(sum(label.numpy()>0)[1::2]))
meas_steps = label_cols
result = pd.DataFrame(index=meas_steps, columns = ['model', 'split', 'tp','tn','fn','fp','tpr','fpr','min_dis', 'auc'])
for i in range(len(meas_steps)):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[pos!=0].numpy()
neg = neg[neg!=0].numpy()
tp = sum(pos>thresh)
fn = sum(pos<thresh)
tn = sum(neg>thresh)
fp = sum(neg<thresh)
tpr = tp/(tp+fn+1e-9)
fpr = fp/(fp+tn+1e-9)
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
if len(pos) and len(neg):
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
min_dis = np.sqrt(fper**2 + (1-tper)**2).min()
else:
min_dis = None
auc = None
result.iloc[i] = [model_name,split_name,tp,tn,fn,fp,tpr,fpr,min_dis,auc]
if plot:
plot_roc_cur(fper, tper, meas_steps[i])
display(result)
evaluate(classifier, X_train, label_train, label_cols, model_name, 'train', plot=False)
evaluate(classifier, X_val, label_val, label_cols, model_name, 'val', plot=False)
evaluate(classifier, X_test, label_test, label_cols, model_name, 'test', plot=False)
###Output
train : the overall accuracy is: 0.7914223243819027
train : the overall recall is: 0.6443925233644859
###Markdown
3. QAE
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
from math import exp
import time
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Layer, Dropout
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import pytz
from datetime import datetime
import os
import csv
import sys
from scipy.stats import mode
import matplotlib.pyplot as plt
layers = [512,256,128]
n_layer = len(layers)
dff = 128
dropout_rate = 0.5
model_name = 'QAE' + str(layers)
activation='sigmoid'
earlystop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0.0001,
patience=100,restore_best_weights=True)
reg_kernal, reg_activity = 1e-4,1e-4
kernel_regularizer=tf.keras.regularizers.l2(reg_kernal)
activity_regularizer=tf.keras.regularizers.l2(reg_activity)
#kernel_regularizer=None
activity_regularizer=None
lr = 0.005
epochs = 1000
batch_size = 2048
model_name
###Output
_____no_output_____
###Markdown
read and process the data
###Code
experiment = 'QAE'
np_path = 'npdata/%s/'%(tool_set)
X_train = np.load(np_path+'X_train.npy')
X_val = np.load(np_path+'X_val.npy')
X_test = np.load(np_path+'X_test.npy')
label_train = np.load(np_path+'label_train.npy')
label_val = np.load(np_path+'label_val.npy')
label_test = np.load(np_path+'label_test.npy')
label_cols = np.array(range(label_train.shape[1]//2))
X_train = tf.convert_to_tensor(X_train, dtype=tf.float32)
X_val = tf.convert_to_tensor(X_val, dtype=tf.float32)
X_test = tf.convert_to_tensor(X_test, dtype=tf.float32)
label_train = tf.convert_to_tensor(label_train, dtype=tf.float32)
label_val = tf.convert_to_tensor(label_val, dtype=tf.float32)
label_test = tf.convert_to_tensor(label_test, dtype=tf.float32)
X_train = tf.reshape(X_train, [X_train.shape[0], X_train.shape[1]*X_train.shape[2]])
X_val = tf.reshape(X_val, [X_val.shape[0], X_val.shape[1]*X_val.shape[2]])
X_test = tf.reshape(X_test, [X_test.shape[0], X_test.shape[1]*X_test.shape[2]])
print(X_train.shape, X_val.shape, X_test.shape)
print(label_train.shape, label_val.shape, label_test.shape)
class_weight = np.array([label_train.shape[0]/label_train.shape[1]/label_train.numpy()[:,i].sum() for i in range(label_train.shape[1])])
class_weight
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs=2, **kwargs):
self.nb_outputs = nb_outputs # nb_outputs = 2*#meas_steps+1
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=variance_on)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, ys_true, ys_pred, weights):
assert len(ys_true) == 2 and len(ys_pred) == 2
loss = 0
# 1 for mse and 2 for crossentropy
y1_true, y2_true = ys_true
y1_pred, y2_pred = ys_pred
assert y2_pred.shape[1] == self.nb_outputs-1 and y2_true.shape[1] == self.nb_outputs-1
# mse part
log_var1 = self.log_vars[0] # log_var = log(sigma^2)
p1 = K.exp(-log_var1[0]*2)
loss += 0.5*p1 * tf.keras.losses.mean_squared_error(y1_true, y1_pred) + log_var1[0]
# cross-entropy part
for i in range((self.nb_outputs-1)//2):
y_i = y2_true[:,2*i:2*i+2]
y_pred_i = y2_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
vw = self.log_vars[2*i+1:2*i+3]
p1, p2 = K.exp(-vw[0][0]*2), K.exp(-vw[1][0]*2),
#y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_category*=w_i
loss_i = p1*loss_category[0] + vw[0][0] + p2*loss_category[1] + vw[1][0]
loss += loss_i
return loss
#for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
# precision = K.exp(-log_var[0])
# loss += K.sum(precision * (y_true - y_pred)**2. + log_var[0], -1)
#return K.mean(loss)
def call(self, x, inputs, weights):
ys_true = inputs[:2]
ys_pred = inputs[2:]
loss = self.multi_loss(ys_true, ys_pred, weights)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return x
class CustomMultiLossLayer2(Layer):
def __init__(self, nb_outputs=2, **kwargs):
self.nb_outputs = nb_outputs # nb_outputs = 2*#meas_steps
self.is_placeholder = True
super(CustomMultiLossLayer2, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=variance_on)]
super(CustomMultiLossLayer2, self).build(input_shape)
def multi_loss(self, y_true, y_pred, weights):
loss = 0
# cross-entropy part
for i in range(self.nb_outputs//2):
y_i = y_true[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
vw = self.log_vars[2*i:2*i+2]
p1, p2 = K.exp(-vw[0][0]*2), K.exp(-vw[1][0]*2),
#y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_category*=w_i
loss_i = p1*loss_category[0] + vw[0][0] + p2*loss_category[1] + vw[1][0]
loss += loss_i
return loss
def call(self, inputs,weights):
y_true = inputs[0]
y_pred = inputs[1]
loss = self.multi_loss(y_true, y_pred, weights)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return y_pred
def get_qae_model(nb_features, Q, D1, D2, weights):
inp = Input(shape=(Q,), name='inp')
x = Dense(nb_features, activation=activation)(inp)
#x = Dropout(0.2)(x)
y1_pred = Dense(D1,activation='sigmoid')(x)
y2_pred = Dense(D2,activation='sigmoid')(x)
# y1_true = Input(shape=(D1,), name='y1_true')
y2_true = Input(shape=(D2,), name='y2_true')
out = CustomMultiLossLayer(nb_outputs=D2+1)(x, [inp, y2_true, y1_pred, y2_pred],weights)
return Model([inp, y2_true], out)
def get_classifier_model(inp_dim, out_dim, weights, dff, rate):
inp = Input(shape=(inp_dim,))
final_layer = tf.keras.Sequential([Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
tf.keras.layers.Dense(out_dim, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)])
y_pred = final_layer(inp)
y_true = Input(shape=(out_dim,), name='y_true')
out = CustomMultiLossLayer2(nb_outputs=out_dim)([y_true, y_pred],weights)
return Model([inp, y_true], out)
%%time
for i in range(n_layer):
nb_features = layers[i]
Q = X_test.shape[1]
D1 = Q
D2 = label_test.shape[1]
K.clear_session()
qae = get_qae_model(nb_features, Q, D1, D2,class_weight)
qae.compile(optimizer=tf.keras.optimizers.Adam(lr=lr), loss=None)
hist = qae.fit([X_train, label_train],
epochs=epochs,
batch_size = batch_size,
verbose=0,
callbacks=[earlystop_callback],
validation_data=([X_val, label_val], None))
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment)
plt.show()
X_train = qae.predict([X_train, label_train], batch_size=batch_size)
X_val = qae.predict([X_val, label_val], batch_size=batch_size)
X_test = qae.predict([X_test, label_test], batch_size=batch_size)
print('training set shapes:', X_train.shape, label_train.shape)
classifier = get_classifier_model(inp_dim = X_train.shape[1], out_dim=label_train.shape[1],weights=class_weight, dff=dff, rate=dropout_rate)
classifier.compile(loss=None, optimizer=tf.keras.optimizers.Adam(lr=lr))
hist = classifier.fit([X_train, label_train],label_train,
epochs=epochs,
batch_size = batch_size,
verbose=0,
callbacks=[earlystop_callback],
validation_data=([X_val,label_val], label_val))
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment)
plt.show()
evaluate(classifier, X_train, label_train, label_cols, model_name, 'train', plot=False)
evaluate(classifier, X_val, label_val, label_cols, model_name, 'val', plot=False)
evaluate(classifier, X_test, label_test, label_cols, model_name, 'test', plot=False)
###Output
train : the overall accuracy is: 0.7787436228065258
train : the overall recall is: 0.7703271028037383
###Markdown
4. LSTM
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
from math import exp
import time
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Layer, Dropout
from tensorflow.keras.initializers import Constant
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import pytz
from datetime import datetime
import os
import csv
import sys
from scipy.stats import mode
import matplotlib.pyplot as plt
layers = [512,256,128]
n_layer = len(layers)
dff = 128
dropout_rate = 0.5
model_name = 'LSTM' + str(layers)
earlystop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0.0001,
patience=100,restore_best_weights=True)
reg_kernal, reg_activity = 1e-4,1e-4
kernel_regularizer=tf.keras.regularizers.l2(reg_kernal)
activity_regularizer=tf.keras.regularizers.l2(reg_activity)
#kernel_regularizer=None
activity_regularizer=None
lr = 0.005
epochs = 1000
batch_size = 2048
model_name
###Output
_____no_output_____
###Markdown
read and process the data
###Code
experiment = 'LSTM'
np_path = 'npdata/%s/'%(tool_set)
X_train = np.load(np_path+'X_train.npy')
X_val = np.load(np_path+'X_val.npy')
X_test = np.load(np_path+'X_test.npy')
label_train = np.load(np_path+'label_train.npy')
label_val = np.load(np_path+'label_val.npy')
label_test = np.load(np_path+'label_test.npy')
label_cols = np.array(range(label_train.shape[1]//2))
X_train = tf.convert_to_tensor(X_train, dtype=tf.float32)
X_val = tf.convert_to_tensor(X_val, dtype=tf.float32)
X_test = tf.convert_to_tensor(X_test, dtype=tf.float32)
label_train = tf.convert_to_tensor(label_train, dtype=tf.float32)
label_val = tf.convert_to_tensor(label_val, dtype=tf.float32)
label_test = tf.convert_to_tensor(label_test, dtype=tf.float32)
print(X_train.shape, X_val.shape, X_test.shape)
print(label_train.shape, label_val.shape, label_test.shape)
class_weight = np.array([label_train.shape[0]/label_train.shape[1]/label_train.numpy()[:,i].sum() for i in range(label_train.shape[1])])
class_weight
def loss_func(w):
def inner_loss(y,y_pred):
loss = 0
weights = w
for i in range(len(weights)//2):
y_i = y[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
#y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_i = tf.reduce_sum(loss_category*w_i)
loss += loss_i
return loss
return inner_loss
class CustomMultiLossLayer(Layer):
def __init__(self, nb_outputs, **kwargs):
self.nb_outputs = nb_outputs # nb_outputs = 2*#meas_steps
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=variance_on)]
super(CustomMultiLossLayer, self).build(input_shape)
def multi_loss(self, y_true, y_pred, weights):
loss = 0
# cross-entropy part
for i in range(self.nb_outputs//2):
y_i = y_true[:,2*i:2*i+2]
y_pred_i = y_pred[:,2*i:2*i+2]
w_i = weights[2*i:2*i+2]
# y_pred_i /= tf.reduce_sum(y_pred_i, axis=1, keepdims=True)
y_pred_i = tf.nn.softmax(y_pred_i,axis=1)
loss_category = -tf.reduce_mean(y_i*tf.math.log(tf.clip_by_value(y_pred_i,1e-10,1.0)),axis=0)
loss_category*=w_i
vw = self.log_vars[2*i:2*i+2]
p1, p2 = K.exp(-vw[0][0]*2), K.exp(-vw[1][0]*2)
loss_i = p1*loss_category[0] + vw[0][0] + p2*loss_category[1] + vw[1][0]
# sigma = self.log_vars[i][0]
# loss_i = K.exp(-sigma*2)*loss_category[0] + sigma + K.exp(-sigma*2)*loss_category[1] + sigma
# loss_i = K.exp(-2*sigma)*tf.reduce_sum(loss_category) + sigma
# loss_i += (p1+p2)*1e-10
loss += loss_i
return loss
def call(self, inputs, weights):
y_true = inputs[0]
y_pred = inputs[1]
loss = self.multi_loss(y_true, y_pred, weights)
self.add_loss(loss, inputs=inputs)
# We won't actually use the output.
return y_pred
def get_classifier_model(inp_dim1, inp_dim2, out_dim, weights, dff, rate):
inp = Input(shape=(inp_dim1,inp_dim2))
final_layer = tf.keras.Sequential([tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(dff)),
Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
Dense(dff, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer),
tf.keras.layers.Dropout(rate),
tf.keras.layers.Dense(out_dim, activation='sigmoid',
activity_regularizer=activity_regularizer, kernel_regularizer=kernel_regularizer)])
y_pred = final_layer(inp)
y_true = Input(shape=(out_dim,), name='y_true')
out = CustomMultiLossLayer(nb_outputs=out_dim)([y_true, y_pred],weights)
return Model([inp, y_true], out)
classifier = get_classifier_model(inp_dim1 = X_train.shape[1],inp_dim2 = X_train.shape[2], out_dim=label_train.shape[1],
weights=class_weight, dff=dff, rate=dropout_rate)
classifier.compile(loss=None, optimizer=tf.keras.optimizers.Adam(lr=lr))
hist = classifier.fit([X_train, label_train],label_train,
epochs=epochs,
batch_size = batch_size,
verbose=0,
callbacks=[earlystop_callback],
validation_data=([X_val,label_val], label_val))
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='val')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
#plt.yscale('log')
plt.title(experiment)
plt.savefig(plots_folder + 'training and validation loss')
plt.show()
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib.pyplot as plt
def plot_roc_cur(fper, tper, title):
plt.plot(fper, tper, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-' + title)
plt.legend()
# plt.savefig(plots_folder + 'roc.png')
plt.show()
def evaluate(classifier, X, label, label_cols, model_name, split_name, plot=False, save=True):
thresh = 0.5
label_pred = classifier.predict([X,label])
label_true = label
# softmax for pos/neg predictions
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
print(split_name, ': the overall accuracy is:', sum(sum(prob.numpy()>thresh))/sum(sum(label.numpy()>0)))
print(split_name, ': the overall recall is:', sum(sum(prob.numpy()>thresh)[1::2])/sum(sum(label.numpy()>0)[1::2]))
meas_steps = label_cols
result = pd.DataFrame(index=meas_steps, columns = ['model', 'split', 'tp','tn','fn','fp','tpr','fpr','min_dis', 'auc'])
for i in range(len(meas_steps)):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[pos!=0].numpy()
neg = neg[neg!=0].numpy()
tp = sum(pos>thresh)
fn = sum(pos<thresh)
tn = sum(neg>thresh)
fp = sum(neg<thresh)
tpr = tp/(tp+fn+1e-9)
fpr = fp/(fp+tn+1e-9)
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
if len(pos) and len(neg):
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
min_dis = np.sqrt(fper**2 + (1-tper)**2).min()
else:
min_dis = None
auc = None
result.iloc[i] = [model_name,split_name,tp,tn,fn,fp,tpr,fpr,min_dis,auc]
if plot:
plot_roc_cur(fper, tper, meas_steps[i])
display(result)
evaluate(classifier, X_train, label_train, label_cols, model_name, 'train', plot=False)
evaluate(classifier, X_val, label_val, label_cols, model_name, 'val', plot=False)
evaluate(classifier, X_test, label_test, label_cols, model_name, 'test', plot=False)
###Output
train : the overall accuracy is: 0.8242137130683411
train : the overall recall is: 0.6967289719626168
###Markdown
5. Plot ROC
###Code
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tool = 'P1'
def plot_roc_cur(fper, tper, title):
plt.plot(fper, tper, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-' + title)
plt.legend()
# plt.savefig(plots_folder + 'roc.png')
plt.show()
def evaluate(classifier, X, label, label_cols, model_name, split_name, plot=False, save=True):
thresh = 0.5
label_pred = classifier.predict([X,label])
label_true = label
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
print(split_name, ': the overall accuracy is:', sum(sum(prob.numpy()>thresh))/sum(sum(label.numpy()>0)))
print(split_name, ': the overall recall is:', sum(sum(prob.numpy()>thresh)[1::2])/sum(sum(label.numpy()>0)[1::2]))
meas_steps = [ms.split(':')[0] for ms in label_cols][::2]
result = pd.DataFrame(index=meas_steps, columns = ['model', 'split', 'tp','tn','fn','fp','tpr','fpr','min_dis', 'auc', 'data_path'])
for i in range(len(meas_steps)):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[label_true[:,2*i+1]!=0].numpy()
neg = neg[label_true[:,2*i]!=0].numpy()
tp = sum(pos>thresh)
fn = sum(pos<thresh)
tn = sum(neg>thresh)
fp = sum(neg<thresh)
tpr = tp/(tp+fn+1e-9)
fpr = fp/(fp+tn+1e-9)
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
if len(pos) and len(neg):
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
min_dis = np.sqrt(fper**2 + (1-tper)**2).min()
else:
min_dis = None
auc = None
result.iloc[i] = [model_name,split_name,tp,tn,fn,fp,tpr,fpr,min_dis,auc,np_path]
if plot:
plot_roc_cur(fper, tper, meas_steps[i])
display(result)
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-Compare:%s'%(tool))
for model in ['SST', 'LSTM', 'QAE']:
results_folder = '/%s/'%(model)
label_true = np.load(results_folder+'labels_test.np.npy')
label_pred = np.load(results_folder+'predictions_test.np.npy')
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
i = 3
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[label_true[:,2*i+1]!=0]
neg = neg[label_true[:,2*i]!=0]
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
if model == 'QAE':
model = 'VWMHQAE'
if model == 'LSTM':
model = 'Bi-LSTM'
plt.plot(fper, tper, label=model+':' + "%.2f"%(auc))
plt.legend()
plt.savefig('roc-%s.png'%(tool))
plt.show()
from sklearn.metrics import roc_curve,roc_auc_score
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tool = 'P1'
def plot_roc_cur(fper, tper, title):
plt.plot(fper, tper, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-' + title)
plt.legend()
# plt.savefig(plots_folder + 'roc.png')
plt.show()
def evaluate(classifier, X, label, label_cols, model_name, split_name, plot=False, save=True):
thresh = 0.5
label_pred = classifier.predict([X,label],batch_size=2048)
label_true = label
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
print(split_name, ': the overall accuracy is:', sum(sum(prob.numpy()>thresh))/sum(sum(label.numpy()>0)))
print(split_name, ': the overall recall is:', sum(sum(prob.numpy()>thresh)[1::2])/sum(sum(label.numpy()>0)[1::2]))
meas_steps = [ms.split(':')[0] for ms in label_cols][::2]
result = pd.DataFrame(index=meas_steps, columns = ['model', 'split', 'tp','tn','fn','fp','tpr','fpr','min_dis', 'auc', 'data_path'])
for i in range(len(meas_steps)):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[label_true[:,2*i+1]!=0].numpy()
neg = neg[label_true[:,2*i]!=0].numpy()
tp = sum(pos>thresh)
fn = sum(pos<thresh)
tn = sum(neg>thresh)
fp = sum(neg<thresh)
tpr = tp/(tp+fn+1e-9)
fpr = fp/(fp+tn+1e-9)
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
if len(pos) and len(neg):
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
min_dis = np.sqrt(fper**2 + (1-tper)**2).min()
else:
min_dis = None
auc = None
result.iloc[i] = [model_name,split_name,tp,tn,fn,fp,tpr,fpr,min_dis,auc,np_path]
if plot:
plot_roc_cur(fper, tper, meas_steps[i])
if save and split_name=='test':
result.to_csv(results_folder + 'result_np.csv', mode='a')
np.save(results_folder + 'predictions_%s.np'%(split_name),label_pred)
np.save(results_folder + 'labels_%s.np'%(split_name),label)
print('saved to', results_folder)
display(result)
fig, ax = plt.subplots(3,4, figsize=(16, 12))
fig.suptitle('All ROC Curves: %s'%(tool),fontsize=24)
for model in ['SST', 'LSTM', 'QAE']:
results_folder = 'results/%smod_name/%s/'%(tool_set,model)
label_true = np.load(results_folder+'labels_test.np.npy')
label_pred = np.load(results_folder+'predictions_test.np.npy')
for i in range(label_true.shape[1]//2):
label_pred[:,2*i:2*i+2] = tf.nn.softmax(label_pred[:,2*i:2*i+2],axis=1)
prob = label_pred*label_true
for i in range(prob.shape[1]//2):
neg = prob[:,2*i]
pos = prob[:,2*i+1]
pos = pos[label_true[:,2*i+1]!=0]
neg = neg[label_true[:,2*i]!=0]
y_prob = np.append(pos, neg)
y_true = np.append([1]*len(pos), [0]*len(neg))
fper, tper, thresholds = roc_curve(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
auc = roc_auc_score(np.append([1]*len(pos), [0]*len(neg)),np.append(pos, 1-neg))
if model == 'QAE':
model = 'VWMHQAE'
if model == 'LSTM':
model = 'Bi-LSTM'
ax[i//4, i%4].plot([0, 1], [0, 1], color='darkblue', linestyle='--')
ax[i//4, i%4].plot(fper, tper, label=model+':' + "%.2f"%(auc))
ax[i//4, i%4].legend()
plt.savefig('rocs-%s.png'%(tool), dpi=600)
###Output
_____no_output_____ |
CKA.ipynb | ###Markdown
Test CKA
###Code
import numpy as np
import pickle
import gzip
import cca_core
from CKA import linear_CKA, kernel_CKA
X = np.random.randn(100, 64)
Y = np.random.randn(100, 64)
print('Linear CKA, between X and Y: {}'.format(linear_CKA(X, Y)))
print('Linear CKA, between X and X: {}'.format(linear_CKA(X, X)))
print('RBF Kernel CKA, between X and Y: {}'.format(kernel_CKA(X, Y)))
print('RBF Kernel CKA, between X and X: {}'.format(kernel_CKA(X, X)))
###Output
Linear CKA, between X and Y: 0.4080416615691328
Linear CKA, between X and X: 1.0000000000000002
RBF Kernel CKA, between X and Y: 0.5327389546914577
RBF Kernel CKA, between X and X: 1.0
###Markdown
MNIST Example of CKA The minist layers are: 784(input)--500--500--10(output)
###Code
# Load up second hidden layer of MNIST networks and compare
with open("model_activations/MNIST/model_0_lay01.p", "rb") as f:
acts1 = pickle.load(f)
with open("model_activations/MNIST/model_1_lay01.p", "rb") as f:
acts2 = pickle.load(f)
print("activation shapes", acts1.shape, acts2.shape)
#results = cca_core.get_cca_similarity(acts1, acts2, epsilon=1e-10, verbose=False)
# The problem of CKA: time-consuming with large data points
print('Linear CKA: {}'.format(linear_CKA(acts1.T, acts2.T)))
print('RBF Kernel: {}'.format(kernel_CKA(acts1.T, acts2.T)))
###Output
Linear CKA: 0.8539226154965
RBF Kernel: 0.8674208076036767
###Markdown
The results of CCA for the same feature
###Code
# similarity index by CCA
results = cca_core.get_cca_similarity(acts1, acts2, epsilon=1e-10, verbose=False)
print("Mean CCA similarity", np.mean(results["cca_coef1"]))
###Output
Mean CCA similarity 0.45669867603921466
###Markdown
CKA for Conv Nets with SVHNSVHN consists of images that are 32 x 32 (height 32, width 32). Our architecture looks like:**conv1(3x3,32 channels)-->maxpool(2x2)-->conv2(3x3,64 channels)-->maxpool(2x2)-->batchnorm-->fc(200)-->fc(10)**
###Code
# Load up conv 2 activations from SVHN
with gzip.open("model_activations/SVHN/model_0_lay03.p", "rb") as f:
acts1 = pickle.load(f)
with gzip.open("model_activations/SVHN/model_1_lay03.p", "rb") as f:
acts2 = pickle.load(f)
print(acts1.shape, acts2.shape)
###Output
(1000, 16, 16, 64) (1000, 16, 16, 64)
###Markdown
Average Pool for the features
###Code
avg_acts1 = np.mean(acts1, axis=(1,2))
avg_acts2 = np.mean(acts2, axis=(1,2))
print(avg_acts1.shape, avg_acts2.shape)
# CKA
print('Linear CKA: {}'.format(linear_CKA(avg_acts1, avg_acts2)))
print('RBF Kernel CKA: {}'.format(kernel_CKA(avg_acts1, avg_acts2)))
# CCA
a_results = cca_core.get_cca_similarity(avg_acts1.T, avg_acts2.T, epsilon=1e-10, verbose=False)
print("Mean CCA similarity", np.mean(a_results["cca_coef1"]))
###Output
Linear CKA: 0.9241440273864195
RBF Kernel CKA: 0.9197327226169598
Mean CCA similarity 0.6382306681306912
###Markdown
Interpolate for the features
###Code
with gzip.open("./model_activations/SVHN/model_1_lay04.p", "rb") as f:
pool2 = pickle.load(f)
print("shape of first conv", acts1.shape, "shape of second conv", pool2.shape)
from scipy import interpolate
num_d, h, w, _ = acts1.shape
num_c = pool2.shape[-1]
pool2_interp = np.zeros((num_d, h, w, num_c))
for d in range(num_d):
for c in range(num_c):
# form interpolation function
idxs1 = np.linspace(0, pool2.shape[1],
pool2.shape[1],
endpoint=False)
idxs2 = np.linspace(0, pool2.shape[2],
pool2.shape[2],
endpoint=False)
arr = pool2[d,:,:,c]
f_interp = interpolate.interp2d(idxs1, idxs2, arr)
# creater larger arr
large_idxs1 = np.linspace(0, pool2.shape[1],
acts1.shape[1],
endpoint=False)
large_idxs2 = np.linspace(0, pool2.shape[2],
acts1.shape[2],
endpoint=False)
pool2_interp[d, :, :, c] = f_interp(large_idxs1, large_idxs2)
print("new shape", pool2_interp.shape)
num_datapoints, h, w, channels = acts1.shape
f_acts1 = acts1.reshape((num_datapoints*h*w, channels))
num_datapoints, h, w, channels = pool2_interp.shape
f_pool2 = pool2_interp.reshape((num_datapoints*h*w, channels))
# CCA
f_results = cca_core.get_cca_similarity(f_acts1.T[:,::5], f_pool2.T[:,::5], epsilon=1e-10, verbose=False)
print("Mean CCA similarity", np.mean(f_results["cca_coef1"]))
# CKA
#print('Linear CKA: {}'.format(linear_CKA(f_acts1, f_pool2))) # the shape is too large for CKA
#print('RBF Kernel CKA: {}'.format(kernel_CKA(f_acts1, f_pool2))) # the shape is too large for CKA
f_acts1.shape
###Output
_____no_output_____ |
_notebooks/2021-11-6-EDA_ConversionRate.ipynb | ###Markdown
REF [Hands-On-Data-Science-for-Marketing/ part2 ](https://github.com/PacktPublishing/Hands-On-Data-Science-for-Marketing)
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Conversion Rate
###Code
df = pd.read_csv('bank-additional-full.csv', sep=';')
df.shape
df.head()
df['conversion'] = df['y'].apply(lambda x: 1 if x == 'yes' else 0)
df.head()
###Output
_____no_output_____
###Markdown
1. Aggregate Conversion Rate
###Code
print('total conversions: %i out of %i' % (df.conversion.sum(), df.shape[0]))
print('conversion rate: %0.2f%%' % (df.conversion.sum() / df.shape[0] * 100.0))
###Output
conversion rate: 11.27%
###Markdown
2. Conversion Rates by Number of Contacts
###Code
pd.DataFrame(
df.groupby(
by='campaign'
)['conversion'].sum()
)
pd.DataFrame(
df.groupby(
by='campaign'
)['conversion'].count()
)
conversions_by_contacts = df.groupby(
by='campaign'
)['conversion'].sum() / df.groupby(
by='campaign'
)['conversion'].count() * 100.0
pd.DataFrame(conversions_by_contacts)
ax = conversions_by_contacts[:10].plot(
grid=True,
figsize=(10, 7),
xticks=conversions_by_contacts.index[:10],
title='Conversion Rates by Number of Contacts'
)
ax.set_ylim([0, 15])
ax.set_xlabel('number of contacts')
ax.set_ylabel('conversion rate (%)')
plt.show()
###Output
_____no_output_____
###Markdown
3. Conversion Rates by Age - Line Chart
###Code
pd.DataFrame(
df.groupby(
by='age'
)['conversion'].sum()
)
pd.DataFrame(
df.groupby(
by='age'
)['conversion'].count()
)
conversions_by_age = df.groupby(
by='age'
)['conversion'].sum() / df.groupby(
by='age'
)['conversion'].count() * 100.0
pd.DataFrame(conversions_by_age)
ax = conversions_by_age.plot(
grid=True,
figsize=(10, 7),
title='Conversion Rates by Age'
)
ax.set_xlabel('age')
ax.set_ylabel('conversion rate (%)')
plt.show()
###Output
_____no_output_____
###Markdown
- Age Groups
###Code
df['age_group'] = df['age'].apply(
lambda x: '[18, 30)' if x < 30 else '[30, 40)' if x < 40 \
else '[40, 50)' if x < 50 else '[50, 60)' if x < 60 \
else '[60, 70)' if x < 70 else '70+'
)
df.head()
pd.DataFrame(
df.groupby(
by='age_group'
)['conversion'].sum()
)
pd.DataFrame(
df.groupby(
by='age_group'
)['conversion'].count()
)
conversions_by_age_group = df.groupby(
by='age_group'
)['conversion'].sum() / df.groupby(
by='age_group'
)['conversion'].count() * 100.0
pd.DataFrame(conversions_by_age_group)
ax = conversions_by_age_group.loc[
['[18, 30)', '[30, 40)', '[40, 50)', '[50, 60)', '[60, 70)', '70+']
].plot(
kind='bar',
color='skyblue',
grid=True,
figsize=(10, 7),
title='Conversion Rates by Age Groups'
)
ax.set_xlabel('age')
ax.set_ylabel('conversion rate (%)')
plt.show()
###Output
_____no_output_____
###Markdown
4. Conversions vs. Non-Conversions 4.1. Marital Status
###Code
conversions_by_marital_status_df = pd.pivot_table(df, values='y', index='marital', columns='conversion', aggfunc=len)
conversions_by_marital_status_df
conversions_by_marital_status_df.columns = ['non_conversions', 'conversions']
conversions_by_marital_status_df
###Output
_____no_output_____
###Markdown
4.2. Education
###Code
conversions_by_education_df = pd.pivot_table(df, values='y', index='education', columns='conversion', aggfunc=len)
conversions_by_education_df
conversions_by_education_df.columns = ['non_conversions', 'conversions']
conversions_by_education_df
conversions_by_education_df.plot(
kind='pie',
figsize=(15, 7),
startangle=90,
subplots=True,
autopct=lambda x: '%0.1f%%' % x,
legend=False
)
plt.show()
###Output
_____no_output_____
###Markdown
4.3. Last Contact Duration
###Code
df.groupby('conversion')['duration'].describe()
duration_df = pd.concat([
df.loc[df['conversion'] == 1, 'duration'].reset_index(drop=True),
df.loc[df['conversion'] == 0, 'duration'].reset_index(drop=True)
], axis=1)
duration_df.columns = ['conversions', 'non_conversions']
duration_df = duration_df / (60*60)
duration_df
ax = duration_df.plot(
kind='box',
grid=True,
figsize=(10, 10),
)
ax.set_ylabel('last contact duration (hours)')
ax.set_title('Last Contact Duration')
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order)
###Markdown
5. Conversions by Age Groups & Marital Status
###Code
age_marital_df = df.groupby(['age_group', 'marital'])['conversion'].sum().unstack('marital').fillna(0)
age_marital_df
age_marital_df = age_marital_df.divide(
df.groupby(
by='age_group'
)['conversion'].count(),
axis=0
)
age_marital_df
ax = age_marital_df.loc[
['[18, 30)', '[30, 40)', '[40, 50)', '[50, 60)', '[60, 70)', '70+']
].plot(
kind='bar',
grid=True,
figsize=(10,7)
)
ax.set_title('Conversion rates by Age & Marital Status')
ax.set_xlabel('age group')
ax.set_ylabel('conversion rate (%)')
plt.show()
ax = age_marital_df.loc[
['[18, 30)', '[30, 40)', '[40, 50)', '[50, 60)', '[60, 70)', '70+']
].plot(
kind='bar',
stacked=True,
grid=True,
figsize=(10,7)
)
ax.set_title('Conversion rates by Age & Marital Status')
ax.set_xlabel('age group')
ax.set_ylabel('conversion rate (%)')
plt.show()
conversions_by_marital_status_df.plot(
kind='pie',
figsize=(15, 7),
startangle=90,
subplots=True,
autopct=lambda x: '%0.1f%%' % x
)
plt.show()
###Output
_____no_output_____ |
wip/entropy.ipynb | ###Markdown
Evolutionary Shape PredictionAn experiment in evolutionary software using *reinforcement learning* to discover interesting data objects within a given set of graph data.
###Code
import kglab
namespaces = {
"nom": "http://example.org/#",
"wtm": "http://purl.org/heals/food/",
"ind": "http://purl.org/heals/ingredient/",
"skos": "http://www.w3.org/2004/02/skos/core#",
}
kg = kglab.KnowledgeGraph(
name = "A recipe KG example based on Food.com",
base_uri = "https://www.food.com/recipe/",
language = "en",
namespaces = namespaces,
)
kg.load_rdf("dat/recipes.ttl")
import sys
import inspect
__name__ = "kglab"
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
clsmembers
###Output
_____no_output_____
###Markdown
Graph measures and topological analysis Let's measure this graph, to develop some estimators that we'll use later...
###Code
import pandas as pd
pd.set_option("max_rows", None)
measure = kglab.Measure()
measure.measure_graph(kg)
print("edges", measure.edge_count)
print("nodes", measure.node_count)
measure.s_gen.get_tally()
measure.p_gen.get_tally()
measure.o_gen.get_tally()
measure.l_gen.get_tally()
df, link_map = measure.n_gen.get_tally_map()
df
df, link_map = measure.e_gen.get_tally_map()
print(link_map)
###Output
defaultdict(<class 'set'>, {rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'): {rdflib.term.URIRef('http://example.org/#Pancake'), rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#ConceptScheme'), rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#Concept'), rdflib.term.URIRef('http://example.org/#Component'), rdflib.term.URIRef('http://purl.org/heals/food/Recipe'), rdflib.term.URIRef('http://example.org/#Noodle')}, rdflib.term.URIRef('http://purl.org/heals/food/hasIngredient'): {rdflib.term.URIRef('http://purl.org/heals/ingredient/Water'), rdflib.term.URIRef('http://purl.org/heals/ingredient/AppleCiderVinegar'), rdflib.term.URIRef('http://purl.org/heals/ingredient/AllPurposeFlour'), rdflib.term.URIRef('http://purl.org/heals/ingredient/CowMilk'), rdflib.term.URIRef('http://purl.org/heals/ingredient/VanillaExtract'), rdflib.term.URIRef('http://purl.org/heals/ingredient/WholeWheatFlour'), rdflib.term.URIRef('http://purl.org/heals/ingredient/Garlic'), rdflib.term.URIRef('http://purl.org/heals/ingredient/WhiteSugar'), rdflib.term.URIRef('http://purl.org/heals/ingredient/ChickenEgg'), rdflib.term.URIRef('http://purl.org/heals/ingredient/BlackPepper'), rdflib.term.URIRef('http://purl.org/heals/ingredient/Honey'), rdflib.term.URIRef('http://purl.org/heals/ingredient/Bacon'), rdflib.term.URIRef('http://purl.org/heals/ingredient/Butter'), rdflib.term.URIRef('http://purl.org/heals/ingredient/Salt'), rdflib.term.URIRef('http://purl.org/heals/ingredient/OliveOil'), rdflib.term.URIRef('http://purl.org/heals/ingredient/BrownSugar')}, rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#domain'): {rdflib.term.URIRef('http://example.org/#Component')}, rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#narrower'): {rdflib.term.URIRef('http://www.wikidata.org/entity/Q20065'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q1025010'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q815898'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q899392'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q178')}, rdflib.term.URIRef('http://purl.org/dc/terms/identifier'): {rdflib.term.URIRef('http://www.wikidata.org/entity/Q746549'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q178024'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q192874'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q29493'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q44541'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q3089784'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q627371')}, rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#range'): {rdflib.term.URIRef('http://example.org/#Component'), rdflib.term.URIRef('http://example.org/#Process')}, rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#closeMatch'): {rdflib.term.URIRef('http://www.wikidata.org/entity/Q12200'), rdflib.term.URIRef('https://www.foodonline.com/solution/mixing-and-blending'), rdflib.term.URIRef('http://www.wikidata.org/entity/Q1854639')}, rdflib.term.URIRef('http://example.org/#madeFrom'): {rdflib.term.URIRef('http://example.org/#Batter'), rdflib.term.URIRef('http://example.org/#Dough')}, rdflib.term.URIRef('http://example.org/#usesProcess'): {rdflib.term.URIRef('http://example.org/#Mixing'), rdflib.term.URIRef('http://example.org/#Kneading')}, rdflib.term.URIRef('http://purl.org/dc/terms/publisher'): {rdflib.term.URIRef('https://derwen.ai/')}})
###Markdown
ShapeFactory and evolved shapes
###Code
factory = kglab.ShapeFactory(kg, measure)
subgraph = factory.subgraph
es0 = factory.new_shape()
print(es0.serialize(subgraph))
[ print(r) for r in es0.get_rdf() ];
###Output
[6, (-1, [(299, 298)]), (298, [])]
_:Ndff2769ab22c49cdb1c0fe121b1aeb39 rdf:type nom:Component .
###Markdown
Now we can use this `ShapeFactory` object to evolve a *shape* within the graph, then generate a SPARQL query to test its cardinality:
###Code
sparql, bindings = es0.get_sparql()
print(sparql)
print(bindings)
for row in kg.query(sparql):
print(row)
###Output
(rdflib.term.URIRef('https://www.food.com/recipe/175046'),)
(rdflib.term.URIRef('https://www.food.com/recipe/382101'),)
(rdflib.term.URIRef('https://www.food.com/recipe/161962'),)
(rdflib.term.URIRef('https://www.food.com/recipe/16163'),)
(rdflib.term.URIRef('https://www.food.com/recipe/273527'),)
(rdflib.term.URIRef('https://www.food.com/recipe/111008'),)
(rdflib.term.URIRef('https://www.food.com/recipe/157367'),)
(rdflib.term.URIRef('https://www.food.com/recipe/35345'),)
(rdflib.term.URIRef('https://www.food.com/recipe/85817'),)
(rdflib.term.URIRef('https://www.food.com/recipe/423015'),)
(rdflib.term.URIRef('http://example.org/#Pancake'),)
(rdflib.term.URIRef('https://www.food.com/recipe/441475'),)
(rdflib.term.URIRef('https://www.food.com/recipe/118047'),)
(rdflib.term.URIRef('https://www.food.com/recipe/272433'),)
(rdflib.term.URIRef('https://www.food.com/recipe/437729'),)
(rdflib.term.URIRef('https://www.food.com/recipe/471598'),)
(rdflib.term.URIRef('https://www.food.com/recipe/438738'),)
(rdflib.term.URIRef('https://www.food.com/recipe/40772'),)
(rdflib.term.URIRef('https://www.food.com/recipe/154808'),)
(rdflib.term.URIRef('https://www.food.com/recipe/277824'),)
(rdflib.term.URIRef('https://www.food.com/recipe/274637'),)
(rdflib.term.URIRef('https://www.food.com/recipe/324267'),)
(rdflib.term.URIRef('https://www.food.com/recipe/144841'),)
(rdflib.term.URIRef('https://www.food.com/recipe/163724'),)
(rdflib.term.URIRef('https://www.food.com/recipe/159039'),)
(rdflib.term.URIRef('http://example.org/#Process'),)
(rdflib.term.URIRef('https://www.food.com/recipe/221279'),)
(rdflib.term.URIRef('https://www.food.com/recipe/368491'),)
(rdflib.term.URIRef('https://www.food.com/recipe/73828'),)
(rdflib.term.URIRef('https://www.food.com/recipe/64564'),)
(rdflib.term.URIRef('https://www.food.com/recipe/220361'),)
(rdflib.term.URIRef('https://www.food.com/recipe/91311'),)
(rdflib.term.URIRef('https://www.food.com/recipe/96087'),)
(rdflib.term.URIRef('https://www.food.com/recipe/459'),)
(rdflib.term.URIRef('https://www.food.com/recipe/152895'),)
(rdflib.term.URIRef('https://www.food.com/recipe/58476'),)
(rdflib.term.URIRef('https://www.food.com/recipe/93990'),)
(rdflib.term.URIRef('https://www.food.com/recipe/80546'),)
(rdflib.term.URIRef('https://www.food.com/recipe/437607'),)
(rdflib.term.URIRef('https://www.food.com/recipe/186504'),)
(rdflib.term.URIRef('https://www.food.com/recipe/16051'),)
(rdflib.term.URIRef('https://www.food.com/recipe/123656'),)
(rdflib.term.URIRef('https://www.food.com/recipe/229516'),)
(rdflib.term.URIRef('https://www.food.com/recipe/146591'),)
(rdflib.term.URIRef('http://example.org/#Dough'),)
(rdflib.term.URIRef('https://www.food.com/recipe/5386'),)
(rdflib.term.URIRef('https://www.food.com/recipe/105830'),)
(rdflib.term.URIRef('https://www.food.com/recipe/430777'),)
(rdflib.term.URIRef('https://www.food.com/recipe/359334'),)
(rdflib.term.URIRef('https://www.food.com/recipe/50402'),)
(rdflib.term.URIRef('https://www.food.com/recipe/67535'),)
(rdflib.term.URIRef('https://www.food.com/recipe/12402'),)
(rdflib.term.URIRef('https://www.food.com/recipe/31540'),)
(rdflib.term.URIRef('https://www.food.com/recipe/43126'),)
(rdflib.term.URIRef('https://www.food.com/recipe/136571'),)
(rdflib.term.URIRef('https://www.food.com/recipe/311010'),)
(rdflib.term.URIRef('https://www.food.com/recipe/535853'),)
(rdflib.term.URIRef('https://www.food.com/recipe/358908'),)
(rdflib.term.URIRef('https://www.food.com/recipe/124176'),)
(rdflib.term.URIRef('https://www.food.com/recipe/262038'),)
(rdflib.term.URIRef('https://www.food.com/recipe/273987'),)
(rdflib.term.URIRef('http://example.org/#Kneading'),)
(rdflib.term.URIRef('https://www.food.com/recipe/77354'),)
(rdflib.term.URIRef('https://www.food.com/recipe/373964'),)
(rdflib.term.URIRef('https://www.food.com/recipe/180624'),)
(rdflib.term.URIRef('https://www.food.com/recipe/235292'),)
(rdflib.term.URIRef('https://www.food.com/recipe/371414'),)
(rdflib.term.URIRef('https://www.food.com/recipe/101876'),)
(rdflib.term.URIRef('https://www.food.com/recipe/497918'),)
(rdflib.term.URIRef('https://www.food.com/recipe/126223'),)
(rdflib.term.URIRef('https://www.food.com/recipe/356829'),)
(rdflib.term.URIRef('https://www.food.com/recipe/104441'),)
(rdflib.term.URIRef('https://www.food.com/recipe/262538'),)
(rdflib.term.URIRef('https://www.food.com/recipe/271545'),)
(rdflib.term.URIRef('https://www.food.com/recipe/49904'),)
(rdflib.term.URIRef('https://www.food.com/recipe/327593'),)
(rdflib.term.URIRef('https://www.food.com/recipe/61108'),)
(rdflib.term.URIRef('https://www.food.com/recipe/324639'),)
(rdflib.term.URIRef('https://www.food.com/recipe/127989'),)
(rdflib.term.URIRef('https://www.food.com/recipe/124131'),)
(rdflib.term.URIRef('https://www.food.com/recipe/328388'),)
(rdflib.term.URIRef('https://www.food.com/recipe/268209'),)
(rdflib.term.URIRef('https://www.food.com/recipe/86710'),)
(rdflib.term.URIRef('https://www.food.com/recipe/440398'),)
(rdflib.term.URIRef('https://www.food.com/recipe/139989'),)
(rdflib.term.URIRef('https://www.food.com/recipe/15984'),)
(rdflib.term.URIRef('https://www.food.com/recipe/201097'),)
(rdflib.term.URIRef('https://www.food.com/recipe/94616'),)
(rdflib.term.URIRef('https://www.food.com/recipe/72402'),)
(rdflib.term.URIRef('https://www.food.com/recipe/120884'),)
(rdflib.term.URIRef('https://www.food.com/recipe/499338'),)
(rdflib.term.URIRef('https://www.food.com/recipe/353290'),)
(rdflib.term.URIRef('https://www.food.com/recipe/137158'),)
(rdflib.term.URIRef('http://example.org/#Mixing'),)
(rdflib.term.URIRef('https://www.food.com/recipe/216474'),)
(rdflib.term.URIRef('https://www.food.com/recipe/14862'),)
(rdflib.term.URIRef('https://www.food.com/recipe/170296'),)
(rdflib.term.URIRef('https://www.food.com/recipe/264735'),)
(rdflib.term.URIRef('https://www.food.com/recipe/503199'),)
(rdflib.term.URIRef('https://www.food.com/recipe/350593'),)
(rdflib.term.URIRef('https://www.food.com/recipe/64793'),)
(rdflib.term.URIRef('http://example.org/#Component'),)
(rdflib.term.URIRef('https://www.food.com/recipe/109179'),)
(rdflib.term.URIRef('https://www.food.com/recipe/151617'),)
(rdflib.term.URIRef('https://www.food.com/recipe/471396'),)
(rdflib.term.URIRef('https://www.food.com/recipe/262949'),)
(rdflib.term.URIRef('https://www.food.com/recipe/135405'),)
(rdflib.term.URIRef('https://www.food.com/recipe/9037'),)
(rdflib.term.URIRef('https://www.food.com/recipe/7536'),)
(rdflib.term.URIRef('https://www.food.com/recipe/71214'),)
(rdflib.term.URIRef('https://www.food.com/recipe/351327'),)
(rdflib.term.URIRef('https://www.food.com/recipe/235671'),)
(rdflib.term.URIRef('https://www.food.com/recipe/60149'),)
(rdflib.term.URIRef('https://www.food.com/recipe/308224'),)
(rdflib.term.URIRef('https://www.food.com/recipe/103073'),)
(rdflib.term.URIRef('https://www.food.com/recipe/440609'),)
(rdflib.term.URIRef('https://www.food.com/recipe/320182'),)
(rdflib.term.URIRef('https://www.food.com/recipe/12055'),)
(rdflib.term.URIRef('https://www.food.com/recipe/76907'),)
(rdflib.term.URIRef('https://www.food.com/recipe/138985'),)
(rdflib.term.URIRef('https://www.food.com/recipe/117025'),)
(rdflib.term.URIRef('https://www.food.com/recipe/95116'),)
(rdflib.term.URIRef('https://www.food.com/recipe/150765'),)
(rdflib.term.URIRef('https://www.food.com/recipe/384137'),)
(rdflib.term.URIRef('https://www.food.com/recipe/48178'),)
(rdflib.term.URIRef('https://www.food.com/recipe/90391'),)
(rdflib.term.URIRef('https://www.food.com/recipe/23715'),)
(rdflib.term.URIRef('https://www.food.com/recipe/100230'),)
(rdflib.term.URIRef('https://www.food.com/recipe/72759'),)
(rdflib.term.URIRef('https://www.food.com/recipe/144415'),)
(rdflib.term.URIRef('https://www.food.com/recipe/359793'),)
(rdflib.term.URIRef('https://www.food.com/recipe/34614'),)
(rdflib.term.URIRef('https://www.food.com/recipe/74250'),)
(rdflib.term.URIRef('https://www.food.com/recipe/426449'),)
(rdflib.term.URIRef('https://www.food.com/recipe/284620'),)
(rdflib.term.URIRef('https://www.food.com/recipe/252783'),)
(rdflib.term.URIRef('https://www.food.com/recipe/37825'),)
(rdflib.term.URIRef('https://www.food.com/recipe/238849'),)
(rdflib.term.URIRef('https://www.food.com/recipe/317697'),)
(rdflib.term.URIRef('https://www.food.com/recipe/483334'),)
(rdflib.term.URIRef('https://www.food.com/recipe/234763'),)
(rdflib.term.URIRef('https://www.food.com/recipe/362055'),)
(rdflib.term.URIRef('https://www.food.com/recipe/137357'),)
(rdflib.term.URIRef('https://www.food.com/recipe/470634'),)
(rdflib.term.URIRef('https://www.food.com/recipe/371915'),)
(rdflib.term.URIRef('https://www.food.com/recipe/45057'),)
(rdflib.term.URIRef('https://www.food.com/recipe/315525'),)
(rdflib.term.URIRef('https://www.food.com/recipe/31041'),)
(rdflib.term.URIRef('https://www.food.com/recipe/1975'),)
(rdflib.term.URIRef('https://www.food.com/recipe/35828'),)
(rdflib.term.URIRef('https://www.food.com/recipe/284530'),)
(rdflib.term.URIRef('http://example.org/#Batter'),)
(rdflib.term.URIRef('https://www.food.com/recipe/175652'),)
(rdflib.term.URIRef('https://www.food.com/recipe/136111'),)
(rdflib.term.URIRef('https://www.food.com/recipe/4643'),)
(rdflib.term.URIRef('https://www.food.com/recipe/1079'),)
(rdflib.term.URIRef('https://www.food.com/recipe/103964'),)
(rdflib.term.URIRef('https://www.food.com/recipe/75949'),)
(rdflib.term.URIRef('https://www.food.com/recipe/93428'),)
(rdflib.term.URIRef('https://www.food.com/recipe/220141'),)
(rdflib.term.URIRef('https://www.food.com/recipe/489'),)
(rdflib.term.URIRef('https://www.food.com/recipe/458'),)
(rdflib.term.URIRef('https://www.food.com/recipe/139338'),)
(rdflib.term.URIRef('https://www.food.com/recipe/113757'),)
(rdflib.term.URIRef('https://www.food.com/recipe/424328'),)
(rdflib.term.URIRef('https://www.food.com/recipe/23496'),)
(rdflib.term.URIRef('https://www.food.com/recipe/107499'),)
(rdflib.term.URIRef('https://www.food.com/recipe/97832'),)
(rdflib.term.URIRef('https://www.food.com/recipe/109196'),)
(rdflib.term.URIRef('https://www.food.com/recipe/173544'),)
(rdflib.term.URIRef('https://www.food.com/recipe/232248'),)
(rdflib.term.URIRef('https://www.food.com/recipe/331765'),)
(rdflib.term.URIRef('https://www.food.com/recipe/137046'),)
(rdflib.term.URIRef('https://www.food.com/recipe/72129'),)
(rdflib.term.URIRef('https://www.food.com/recipe/272746'),)
(rdflib.term.URIRef('https://www.food.com/recipe/194304'),)
(rdflib.term.URIRef('https://www.food.com/recipe/357947'),)
(rdflib.term.URIRef('https://www.food.com/recipe/440451'),)
(rdflib.term.URIRef('https://www.food.com/recipe/15219'),)
(rdflib.term.URIRef('https://www.food.com/recipe/183115'),)
(rdflib.term.URIRef('https://www.food.com/recipe/299759'),)
(rdflib.term.URIRef('https://www.food.com/recipe/58590'),)
(rdflib.term.URIRef('https://www.food.com/recipe/37971'),)
(rdflib.term.URIRef('https://www.food.com/recipe/482308'),)
(rdflib.term.URIRef('https://www.food.com/recipe/261361'),)
(rdflib.term.URIRef('https://www.food.com/recipe/45260'),)
(rdflib.term.URIRef('https://www.food.com/recipe/186892'),)
(rdflib.term.URIRef('https://www.food.com/recipe/382707'),)
(rdflib.term.URIRef('https://www.food.com/recipe/165947'),)
(rdflib.term.URIRef('https://www.food.com/recipe/284518'),)
(rdflib.term.URIRef('https://www.food.com/recipe/60413'),)
(rdflib.term.URIRef('https://www.food.com/recipe/268242'),)
(rdflib.term.URIRef('https://www.food.com/recipe/145953'),)
(rdflib.term.URIRef('https://www.food.com/recipe/125648'),)
(rdflib.term.URIRef('https://www.food.com/recipe/142565'),)
(rdflib.term.URIRef('https://www.food.com/recipe/256577'),)
(rdflib.term.URIRef('https://www.food.com/recipe/279314'),)
(rdflib.term.URIRef('https://www.food.com/recipe/19097'),)
(rdflib.term.URIRef('https://www.food.com/recipe/508734'),)
(rdflib.term.URIRef('http://example.org/#NOM_Vocab'),)
(rdflib.term.URIRef('https://www.food.com/recipe/255660'),)
(rdflib.term.URIRef('https://www.food.com/recipe/446011'),)
(rdflib.term.URIRef('https://www.food.com/recipe/20191'),)
(rdflib.term.URIRef('https://www.food.com/recipe/400'),)
(rdflib.term.URIRef('https://www.food.com/recipe/160167'),)
(rdflib.term.URIRef('https://www.food.com/recipe/164636'),)
(rdflib.term.URIRef('https://www.food.com/recipe/140570'),)
(rdflib.term.URIRef('https://www.food.com/recipe/86356'),)
(rdflib.term.URIRef('https://www.food.com/recipe/112374'),)
(rdflib.term.URIRef('https://www.food.com/recipe/504246'),)
(rdflib.term.URIRef('https://www.food.com/recipe/272027'),)
(rdflib.term.URIRef('https://www.food.com/recipe/154001'),)
(rdflib.term.URIRef('https://www.food.com/recipe/175450'),)
(rdflib.term.URIRef('https://www.food.com/recipe/280663'),)
(rdflib.term.URIRef('https://www.food.com/recipe/148900'),)
(rdflib.term.URIRef('https://www.food.com/recipe/62799'),)
(rdflib.term.URIRef('https://www.food.com/recipe/419998'),)
(rdflib.term.URIRef('https://www.food.com/recipe/160402'),)
(rdflib.term.URIRef('https://www.food.com/recipe/124106'),)
(rdflib.term.URIRef('https://www.food.com/recipe/197303'),)
(rdflib.term.URIRef('https://www.food.com/recipe/171106'),)
(rdflib.term.URIRef('https://www.food.com/recipe/202806'),)
(rdflib.term.URIRef('https://www.food.com/recipe/200871'),)
(rdflib.term.URIRef('https://www.food.com/recipe/162011'),)
(rdflib.term.URIRef('https://www.food.com/recipe/41305'),)
(rdflib.term.URIRef('https://www.food.com/recipe/255748'),)
(rdflib.term.URIRef('https://www.food.com/recipe/399'),)
(rdflib.term.URIRef('https://www.food.com/recipe/186307'),)
(rdflib.term.URIRef('https://www.food.com/recipe/208673'),)
(rdflib.term.URIRef('http://example.org/#Noodle'),)
(rdflib.term.URIRef('https://www.food.com/recipe/157638'),)
(rdflib.term.URIRef('https://www.food.com/recipe/21301'),)
(rdflib.term.URIRef('https://www.food.com/recipe/447296'),)
(rdflib.term.URIRef('https://www.food.com/recipe/406738'),)
(rdflib.term.URIRef('https://www.food.com/recipe/78459'),)
(rdflib.term.URIRef('https://www.food.com/recipe/38959'),)
(rdflib.term.URIRef('https://www.food.com/recipe/189437'),)
(rdflib.term.URIRef('https://www.food.com/recipe/320154'),)
(rdflib.term.URIRef('https://www.food.com/recipe/183808'),)
(rdflib.term.URIRef('https://www.food.com/recipe/40276'),)
(rdflib.term.URIRef('https://www.food.com/recipe/349278'),)
(rdflib.term.URIRef('https://www.food.com/recipe/188921'),)
(rdflib.term.URIRef('https://www.food.com/recipe/467335'),)
(rdflib.term.URIRef('https://www.food.com/recipe/117827'),)
(rdflib.term.URIRef('https://www.food.com/recipe/82226'),)
(rdflib.term.URIRef('https://www.food.com/recipe/19104'),)
(rdflib.term.URIRef('https://www.food.com/recipe/511497'),)
(rdflib.term.URIRef('https://www.food.com/recipe/141995'),)
(rdflib.term.URIRef('https://www.food.com/recipe/335865'),)
(rdflib.term.URIRef('http://example.org/#madeFrom'),)
(rdflib.term.URIRef('http://example.org/#usesProcess'),)
###Markdown
We can also use this library to construct a specific shape programatically, e.g., a recipe:
###Code
es1 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(type_uri, terminal=True)
es1.add_link(es1.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/VanillaExtract"
edge_node = kglab.EvoShapeNode(edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/AllPurposeFlour"
edge_node = kglab.EvoShapeNode(edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Salt"
edge_node = kglab.EvoShapeNode(edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/ChickenEgg"
edge_node = kglab.EvoShapeNode(edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
[ print(r) for r in es1.get_rdf() ]
es1.serialize(subgraph)
sparql, bindings = es1.get_sparql()
print(sparql)
print(bindings)
###Output
SELECT DISTINCT ?v2 WHERE { ?v2 ?pred2_0 ?obj2_0 . ?v2 ?pred2_1 ?obj2_1 . ?v2 ?pred2_1 ?node0 . ?v2 ?pred2_1 ?node1 . ?v2 ?pred2_1 ?obj2_4 }
{'?node0': rdflib.term.URIRef('http://purl.org/heals/ingredient/AllPurposeFlour'), '?node1': rdflib.term.URIRef('http://purl.org/heals/ingredient/Salt'), '?pred2_0': rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), '?obj2_0': rdflib.term.URIRef('http://purl.org/heals/food/Recipe'), '?pred2_1': rdflib.term.URIRef('http://purl.org/heals/food/hasIngredient'), '?obj2_1': rdflib.term.URIRef('http://purl.org/heals/ingredient/VanillaExtract'), '?obj2_4': rdflib.term.URIRef('http://purl.org/heals/ingredient/ChickenEgg')}
###Markdown
Query to find matching instances for this shape `es1` within the graph:
###Code
for row in kg.query(sparql, bindings=bindings):
print(row)
###Output
(rdflib.term.URIRef('https://www.food.com/recipe/371414'),)
(rdflib.term.URIRef('https://www.food.com/recipe/62799'),)
(rdflib.term.URIRef('https://www.food.com/recipe/20191'),)
(rdflib.term.URIRef('https://www.food.com/recipe/135405'),)
(rdflib.term.URIRef('https://www.food.com/recipe/144415'),)
(rdflib.term.URIRef('https://www.food.com/recipe/151617'),)
(rdflib.term.URIRef('https://www.food.com/recipe/73828'),)
(rdflib.term.URIRef('https://www.food.com/recipe/272433'),)
(rdflib.term.URIRef('https://www.food.com/recipe/430777'),)
(rdflib.term.URIRef('https://www.food.com/recipe/362055'),)
(rdflib.term.URIRef('https://www.food.com/recipe/76907'),)
(rdflib.term.URIRef('https://www.food.com/recipe/268242'),)
(rdflib.term.URIRef('https://www.food.com/recipe/123656'),)
(rdflib.term.URIRef('https://www.food.com/recipe/137158'),)
###Markdown
Leaderboard which can be distributed across a cluster We can calculate metrics to describe how these shapes `es0` and `es1` might rank on a *leaderboard*:
###Code
es0.get_cardinality()
es1.get_cardinality()
###Output
_____no_output_____
###Markdown
Then calculate a vector distance between `es1` and `es0` which we'd generated earlier:
###Code
es0.calc_distance(es1)
###Output
_____no_output_____
###Markdown
Now we can generate a compact, ordinal representation for the `es1` shape, which can be serialized as a string, transferred across a network to an actor, then deserialized as the same shape -- *as long as we use a similarly structured subgraph*
###Code
import json
ser = es1.serialize(subgraph)
j_ser = json.dumps(ser)
print(j_ser)
ser = json.loads(j_ser)
ser
print(subgraph.id_list)
###Output
['http://example.org/#Batter', 'http://example.org/#Component', 'http://example.org/#Dough', 'http://example.org/#Kneading', 'http://example.org/#Mixing', 'http://example.org/#NOM_Vocab', 'http://example.org/#Noodle', 'http://example.org/#Pancake', 'http://example.org/#Process', 'http://example.org/#madeFrom', 'http://example.org/#usesProcess', 'http://purl.org/dc/terms/identifier', 'http://purl.org/dc/terms/publisher', 'http://purl.org/heals/food/Recipe', 'http://purl.org/heals/food/hasCookTime', 'http://purl.org/heals/food/hasIngredient', 'http://purl.org/heals/ingredient/AllPurposeFlour', 'http://purl.org/heals/ingredient/AppleCiderVinegar', 'http://purl.org/heals/ingredient/Bacon', 'http://purl.org/heals/ingredient/BlackPepper', 'http://purl.org/heals/ingredient/BrownSugar', 'http://purl.org/heals/ingredient/Butter', 'http://purl.org/heals/ingredient/ChickenEgg', 'http://purl.org/heals/ingredient/CowMilk', 'http://purl.org/heals/ingredient/Garlic', 'http://purl.org/heals/ingredient/Honey', 'http://purl.org/heals/ingredient/OliveOil', 'http://purl.org/heals/ingredient/Salt', 'http://purl.org/heals/ingredient/VanillaExtract', 'http://purl.org/heals/ingredient/Water', 'http://purl.org/heals/ingredient/WhiteSugar', 'http://purl.org/heals/ingredient/WholeWheatFlour', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type', 'http://www.w3.org/2000/01/rdf-schema#domain', 'http://www.w3.org/2000/01/rdf-schema#range', 'http://www.w3.org/2004/02/skos/core#Concept', 'http://www.w3.org/2004/02/skos/core#ConceptScheme', 'http://www.w3.org/2004/02/skos/core#altLabel', 'http://www.w3.org/2004/02/skos/core#closeMatch', 'http://www.w3.org/2004/02/skos/core#definition', 'http://www.w3.org/2004/02/skos/core#narrower', 'http://www.w3.org/2004/02/skos/core#prefLabel', 'http://www.wikidata.org/entity/Q1025010', 'http://www.wikidata.org/entity/Q12200', 'http://www.wikidata.org/entity/Q178', 'http://www.wikidata.org/entity/Q178024', 'http://www.wikidata.org/entity/Q1854639', 'http://www.wikidata.org/entity/Q192874', 'http://www.wikidata.org/entity/Q20065', 'http://www.wikidata.org/entity/Q29493', 'http://www.wikidata.org/entity/Q3089784', 'http://www.wikidata.org/entity/Q44541', 'http://www.wikidata.org/entity/Q627371', 'http://www.wikidata.org/entity/Q746549', 'http://www.wikidata.org/entity/Q815898', 'http://www.wikidata.org/entity/Q899392', 'https://derwen.ai/', 'https://www.food.com/recipe/100230', 'https://www.food.com/recipe/101876', 'https://www.food.com/recipe/103073', 'https://www.food.com/recipe/103964', 'https://www.food.com/recipe/104441', 'https://www.food.com/recipe/105830', 'https://www.food.com/recipe/107499', 'https://www.food.com/recipe/1079', 'https://www.food.com/recipe/109179', 'https://www.food.com/recipe/109196', 'https://www.food.com/recipe/111008', 'https://www.food.com/recipe/112374', 'https://www.food.com/recipe/113757', 'https://www.food.com/recipe/117025', 'https://www.food.com/recipe/117827', 'https://www.food.com/recipe/118047', 'https://www.food.com/recipe/12055', 'https://www.food.com/recipe/120884', 'https://www.food.com/recipe/123656', 'https://www.food.com/recipe/12402', 'https://www.food.com/recipe/124106', 'https://www.food.com/recipe/124131', 'https://www.food.com/recipe/124176', 'https://www.food.com/recipe/125648', 'https://www.food.com/recipe/126223', 'https://www.food.com/recipe/127989', 'https://www.food.com/recipe/135405', 'https://www.food.com/recipe/136111', 'https://www.food.com/recipe/136571', 'https://www.food.com/recipe/137046', 'https://www.food.com/recipe/137158', 'https://www.food.com/recipe/137357', 'https://www.food.com/recipe/138985', 'https://www.food.com/recipe/139338', 'https://www.food.com/recipe/139989', 'https://www.food.com/recipe/140570', 'https://www.food.com/recipe/141995', 'https://www.food.com/recipe/142565', 'https://www.food.com/recipe/144415', 'https://www.food.com/recipe/144841', 'https://www.food.com/recipe/145953', 'https://www.food.com/recipe/146591', 'https://www.food.com/recipe/14862', 'https://www.food.com/recipe/148900', 'https://www.food.com/recipe/150765', 'https://www.food.com/recipe/151617', 'https://www.food.com/recipe/15219', 'https://www.food.com/recipe/152895', 'https://www.food.com/recipe/154001', 'https://www.food.com/recipe/154808', 'https://www.food.com/recipe/157367', 'https://www.food.com/recipe/157638', 'https://www.food.com/recipe/159039', 'https://www.food.com/recipe/15984', 'https://www.food.com/recipe/160167', 'https://www.food.com/recipe/160402', 'https://www.food.com/recipe/16051', 'https://www.food.com/recipe/16163', 'https://www.food.com/recipe/161962', 'https://www.food.com/recipe/162011', 'https://www.food.com/recipe/163724', 'https://www.food.com/recipe/164636', 'https://www.food.com/recipe/165947', 'https://www.food.com/recipe/170296', 'https://www.food.com/recipe/171106', 'https://www.food.com/recipe/173544', 'https://www.food.com/recipe/175046', 'https://www.food.com/recipe/175450', 'https://www.food.com/recipe/175652', 'https://www.food.com/recipe/180624', 'https://www.food.com/recipe/183115', 'https://www.food.com/recipe/183808', 'https://www.food.com/recipe/186307', 'https://www.food.com/recipe/186504', 'https://www.food.com/recipe/186892', 'https://www.food.com/recipe/188921', 'https://www.food.com/recipe/189437', 'https://www.food.com/recipe/19097', 'https://www.food.com/recipe/19104', 'https://www.food.com/recipe/194304', 'https://www.food.com/recipe/197303', 'https://www.food.com/recipe/1975', 'https://www.food.com/recipe/200871', 'https://www.food.com/recipe/201097', 'https://www.food.com/recipe/20191', 'https://www.food.com/recipe/202806', 'https://www.food.com/recipe/208673', 'https://www.food.com/recipe/21301', 'https://www.food.com/recipe/216474', 'https://www.food.com/recipe/220141', 'https://www.food.com/recipe/220361', 'https://www.food.com/recipe/221279', 'https://www.food.com/recipe/229516', 'https://www.food.com/recipe/232248', 'https://www.food.com/recipe/234763', 'https://www.food.com/recipe/23496', 'https://www.food.com/recipe/235292', 'https://www.food.com/recipe/235671', 'https://www.food.com/recipe/23715', 'https://www.food.com/recipe/238849', 'https://www.food.com/recipe/252783', 'https://www.food.com/recipe/255660', 'https://www.food.com/recipe/255748', 'https://www.food.com/recipe/256577', 'https://www.food.com/recipe/261361', 'https://www.food.com/recipe/262038', 'https://www.food.com/recipe/262538', 'https://www.food.com/recipe/262949', 'https://www.food.com/recipe/264735', 'https://www.food.com/recipe/268209', 'https://www.food.com/recipe/268242', 'https://www.food.com/recipe/271545', 'https://www.food.com/recipe/272027', 'https://www.food.com/recipe/272433', 'https://www.food.com/recipe/272746', 'https://www.food.com/recipe/273527', 'https://www.food.com/recipe/273987', 'https://www.food.com/recipe/274637', 'https://www.food.com/recipe/277824', 'https://www.food.com/recipe/279314', 'https://www.food.com/recipe/280663', 'https://www.food.com/recipe/284518', 'https://www.food.com/recipe/284530', 'https://www.food.com/recipe/284620', 'https://www.food.com/recipe/299759', 'https://www.food.com/recipe/308224', 'https://www.food.com/recipe/31041', 'https://www.food.com/recipe/311010', 'https://www.food.com/recipe/31540', 'https://www.food.com/recipe/315525', 'https://www.food.com/recipe/317697', 'https://www.food.com/recipe/320154', 'https://www.food.com/recipe/320182', 'https://www.food.com/recipe/324267', 'https://www.food.com/recipe/324639', 'https://www.food.com/recipe/327593', 'https://www.food.com/recipe/328388', 'https://www.food.com/recipe/331765', 'https://www.food.com/recipe/335865', 'https://www.food.com/recipe/34614', 'https://www.food.com/recipe/349278', 'https://www.food.com/recipe/350593', 'https://www.food.com/recipe/351327', 'https://www.food.com/recipe/353290', 'https://www.food.com/recipe/35345', 'https://www.food.com/recipe/356829', 'https://www.food.com/recipe/357947', 'https://www.food.com/recipe/35828', 'https://www.food.com/recipe/358908', 'https://www.food.com/recipe/359334', 'https://www.food.com/recipe/359793', 'https://www.food.com/recipe/362055', 'https://www.food.com/recipe/368491', 'https://www.food.com/recipe/371414', 'https://www.food.com/recipe/371915', 'https://www.food.com/recipe/373964', 'https://www.food.com/recipe/37825', 'https://www.food.com/recipe/37971', 'https://www.food.com/recipe/382101', 'https://www.food.com/recipe/382707', 'https://www.food.com/recipe/384137', 'https://www.food.com/recipe/38959', 'https://www.food.com/recipe/399', 'https://www.food.com/recipe/400', 'https://www.food.com/recipe/40276', 'https://www.food.com/recipe/406738', 'https://www.food.com/recipe/40772', 'https://www.food.com/recipe/41305', 'https://www.food.com/recipe/419998', 'https://www.food.com/recipe/423015', 'https://www.food.com/recipe/424328', 'https://www.food.com/recipe/426449', 'https://www.food.com/recipe/430777', 'https://www.food.com/recipe/43126', 'https://www.food.com/recipe/437607', 'https://www.food.com/recipe/437729', 'https://www.food.com/recipe/438738', 'https://www.food.com/recipe/440398', 'https://www.food.com/recipe/440451', 'https://www.food.com/recipe/440609', 'https://www.food.com/recipe/441475', 'https://www.food.com/recipe/446011', 'https://www.food.com/recipe/447296', 'https://www.food.com/recipe/45057', 'https://www.food.com/recipe/45260', 'https://www.food.com/recipe/458', 'https://www.food.com/recipe/459', 'https://www.food.com/recipe/4643', 'https://www.food.com/recipe/467335', 'https://www.food.com/recipe/470634', 'https://www.food.com/recipe/471396', 'https://www.food.com/recipe/471598', 'https://www.food.com/recipe/48178', 'https://www.food.com/recipe/482308', 'https://www.food.com/recipe/483334', 'https://www.food.com/recipe/489', 'https://www.food.com/recipe/497918', 'https://www.food.com/recipe/49904', 'https://www.food.com/recipe/499338', 'https://www.food.com/recipe/503199', 'https://www.food.com/recipe/50402', 'https://www.food.com/recipe/504246', 'https://www.food.com/recipe/508734', 'https://www.food.com/recipe/511497', 'https://www.food.com/recipe/535853', 'https://www.food.com/recipe/5386', 'https://www.food.com/recipe/58476', 'https://www.food.com/recipe/58590', 'https://www.food.com/recipe/60149', 'https://www.food.com/recipe/60413', 'https://www.food.com/recipe/61108', 'https://www.food.com/recipe/62799', 'https://www.food.com/recipe/64564', 'https://www.food.com/recipe/64793', 'https://www.food.com/recipe/67535', 'https://www.food.com/recipe/71214', 'https://www.food.com/recipe/72129', 'https://www.food.com/recipe/72402', 'https://www.food.com/recipe/72759', 'https://www.food.com/recipe/73828', 'https://www.food.com/recipe/74250', 'https://www.food.com/recipe/7536', 'https://www.food.com/recipe/75949', 'https://www.food.com/recipe/76907', 'https://www.food.com/recipe/77354', 'https://www.food.com/recipe/78459', 'https://www.food.com/recipe/80546', 'https://www.food.com/recipe/82226', 'https://www.food.com/recipe/85817', 'https://www.food.com/recipe/86356', 'https://www.food.com/recipe/86710', 'https://www.food.com/recipe/9037', 'https://www.food.com/recipe/90391', 'https://www.food.com/recipe/91311', 'https://www.food.com/recipe/93428', 'https://www.food.com/recipe/93990', 'https://www.food.com/recipe/94616', 'https://www.food.com/recipe/95116', 'https://www.food.com/recipe/96087', 'https://www.food.com/recipe/97832', 'https://www.foodonline.com/solution/mixing-and-blending', rdflib.term.URIRef('http://example.org/#Component'), rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
###Markdown
Test the deseserialization
###Code
es2 = kglab.EvoShape(kg, measure)
uri_map = es2.deserialize(ser, subgraph)
print(es2.root.uri)
for k, v in uri_map.items():
print(k, v)
for e in es2.root.edges:
print("obj", e.obj)
print("edge", e.pred, e.obj.uri)
for n in es2.nodes:
print(n)
print(n.uri)
[ print(r) for r in es2.get_rdf() ]
es2.serialize(subgraph)
es2.get_sparql()
###Output
_____no_output_____
###Markdown
Prototype a leaderboard -
###Code
leaderboard = kglab.Leaderboard()
leaderboard.df
dist = leaderboard.add_shape(es0.serialize(subgraph))
print(dist)
leaderboard.df
dist = leaderboard.add_shape(es1.serialize(subgraph))
print(dist)
leaderboard.df
es3 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(type_uri, terminal=True)
es3.add_link(es3.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Butter"
edge_node = kglab.EvoShapeNode(edge_node_uri)
es3.add_link(es3.root, edge_uri, edge_node)
shape = es3.serialize(subgraph)
shape
dist = leaderboard.add_shape(es3.serialize(subgraph))
print(dist)
leaderboard.df
###Output
2
###Markdown
Evolutionary Shape PredictionAn experiment in evolutionary software using *reinforcement learning* to discover interesting data objects within a given set of graph data.
###Code
import kglab
namespaces = {
"nom": "http://example.org/#",
"wtm": "http://purl.org/heals/food/",
"ind": "http://purl.org/heals/ingredient/",
"skos": "http://www.w3.org/2004/02/skos/core#",
}
kg = kglab.KnowledgeGraph(
name = "A recipe KG example based on Food.com",
base_uri = "https://www.food.com/recipe/",
language = "en",
namespaces = namespaces,
)
kg.load_rdf("../dat/recipes.ttl")
import sys
import inspect
__name__ = "kglab"
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
clsmembers
###Output
_____no_output_____
###Markdown
Graph measures and topological analysis Let's measure this graph, to develop some estimators that we'll use later...
###Code
import pandas as pd
pd.set_option("max_rows", None)
measure = kglab.Measure()
measure.measure_graph(kg)
print("edges", measure.edge_count)
print("nodes", measure.node_count)
measure.s_gen.get_tally()
measure.p_gen.get_tally()
measure.o_gen.get_tally()
measure.l_gen.get_tally()
df, link_map = measure.n_gen.get_tally_map()
df
df, link_map = measure.e_gen.get_tally_map()
print(link_map)
###Output
_____no_output_____
###Markdown
ShapeFactory and evolved shapes
###Code
factory = kglab.ShapeFactory(kg, measure)
subgraph = factory.subgraph
es0 = factory.new_shape()
print(es0.serialize(subgraph))
[ print(r) for r in es0.get_rdf() ];
###Output
_____no_output_____
###Markdown
Now we can use this `ShapeFactory` object to evolve a *shape* within the graph, then generate a SPARQL query to test its cardinality:
###Code
sparql, bindings = es0.get_sparql()
print(sparql)
print(bindings)
for row in kg.query(sparql):
print(row)
###Output
_____no_output_____
###Markdown
We can also use this library to construct a specific shape programmatically, e.g., a recipe:
###Code
es1 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es1.add_link(es1.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/VanillaExtract"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/AllPurposeFlour"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Salt"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/ChickenEgg"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
[ print(r) for r in es1.get_rdf() ]
es1.serialize(subgraph)
sparql, bindings = es1.get_sparql()
print(sparql)
print(bindings)
###Output
_____no_output_____
###Markdown
Query to find matching instances for this shape `es1` within the graph:
###Code
for row in kg.query(sparql, bindings=bindings):
print(row)
###Output
_____no_output_____
###Markdown
Leaderboard which can be distributed across a cluster We can calculate metrics to describe how these shapes `es0` and `es1` might rank on a *leaderboard*:
###Code
es0.get_cardinality()
es1.get_cardinality()
###Output
_____no_output_____
###Markdown
Then calculate a vector distance between `es1` and `es0` which we'd generated earlier:
###Code
es0.calc_distance(es1)
###Output
_____no_output_____
###Markdown
Now we can generate a compact, ordinal representation for the `es1` shape, which can be serialized as a string, transferred across a network to an actor, then deserialized as the same shape -- *as long as we use a similarly structured subgraph*
###Code
import json
ser = es1.serialize(subgraph)
j_ser = json.dumps(ser)
print(j_ser)
ser = json.loads(j_ser)
ser
###Output
_____no_output_____
###Markdown
Test the deseserialization
###Code
es2 = kglab.EvoShape(kg, measure)
uri_map = es2.deserialize(ser, subgraph)
print(es2.root.uri)
for k, v in uri_map.items():
print(k, v)
for e in es2.root.edges:
print("obj", e.obj)
print("edge", e.pred, e.obj.uri)
for n in es2.nodes:
print(n)
print(n.uri)
[ print(r) for r in es2.get_rdf() ]
es2.serialize(subgraph)
es2.get_sparql()
###Output
_____no_output_____
###Markdown
Prototype a leaderboard -
###Code
leaderboard = kglab.Leaderboard()
leaderboard.df
dist = leaderboard.add_shape(es0.serialize(subgraph))
print(dist)
leaderboard.df
dist = leaderboard.add_shape(es1.serialize(subgraph))
print(dist)
leaderboard.df
es3 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es3.add_link(es3.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Butter"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es3.add_link(es3.root, edge_uri, edge_node)
shape = es3.serialize(subgraph)
shape
dist = leaderboard.add_shape(es3.serialize(subgraph))
print(dist)
leaderboard.df
###Output
_____no_output_____
###Markdown
Evolutionary Shape PredictionAn experiment in evolutionary software using *reinforcement learning* to discover interesting data objects within a given set of graph data.
###Code
import kglab
namespaces = {
"nom": "http://example.org/#",
"wtm": "http://purl.org/heals/food/",
"ind": "http://purl.org/heals/ingredient/",
"skos": "http://www.w3.org/2004/02/skos/core#",
}
kg = kglab.KnowledgeGraph(
name = "A recipe KG example based on Food.com",
base_uri = "https://www.food.com/recipe/",
language = "en",
namespaces = namespaces,
)
kg.load_rdf("../dat/recipes.ttl")
import sys
import inspect
__name__ = "kglab"
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
clsmembers
###Output
_____no_output_____
###Markdown
Graph measures and topological analysis Let's measure this graph, to develop some estimators that we'll use later...
###Code
import pandas as pd
pd.set_option("max_rows", None)
measure = kglab.Measure()
measure.measure_graph(kg)
print("edges", measure.edge_count)
print("nodes", measure.node_count)
measure.s_gen.get_tally()
measure.p_gen.get_tally()
measure.o_gen.get_tally()
measure.l_gen.get_tally()
df, link_map = measure.n_gen.get_tally_map()
df
df, link_map = measure.e_gen.get_tally_map()
print(link_map)
###Output
_____no_output_____
###Markdown
ShapeFactory and evolved shapes
###Code
factory = kglab.ShapeFactory(kg, measure)
subgraph = factory.subgraph
es0 = factory.new_shape()
print(es0.serialize(subgraph))
[ print(r) for r in es0.get_rdf() ];
###Output
_____no_output_____
###Markdown
Now we can use this `ShapeFactory` object to evolve a *shape* within the graph, then generate a SPARQL query to test its cardinality:
###Code
sparql, bindings = es0.get_sparql()
print(sparql)
print(bindings)
for row in kg.query(sparql):
print(row)
###Output
_____no_output_____
###Markdown
We can also use this library to construct a specific shape programatically, e.g., a recipe:
###Code
es1 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es1.add_link(es1.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/VanillaExtract"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/AllPurposeFlour"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Salt"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/ChickenEgg"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
[ print(r) for r in es1.get_rdf() ]
es1.serialize(subgraph)
sparql, bindings = es1.get_sparql()
print(sparql)
print(bindings)
###Output
_____no_output_____
###Markdown
Query to find matching instances for this shape `es1` within the graph:
###Code
for row in kg.query(sparql, bindings=bindings):
print(row)
###Output
_____no_output_____
###Markdown
Leaderboard which can be distributed across a cluster We can calculate metrics to describe how these shapes `es0` and `es1` might rank on a *leaderboard*:
###Code
es0.get_cardinality()
es1.get_cardinality()
###Output
_____no_output_____
###Markdown
Then calculate a vector distance between `es1` and `es0` which we'd generated earlier:
###Code
es0.calc_distance(es1)
###Output
_____no_output_____
###Markdown
Now we can generate a compact, ordinal representation for the `es1` shape, which can be serialized as a string, transferred across a network to an actor, then deserialized as the same shape -- *as long as we use a similarly structured subgraph*
###Code
import json
ser = es1.serialize(subgraph)
j_ser = json.dumps(ser)
print(j_ser)
ser = json.loads(j_ser)
ser
###Output
_____no_output_____
###Markdown
Test the deseserialization
###Code
es2 = kglab.EvoShape(kg, measure)
uri_map = es2.deserialize(ser, subgraph)
print(es2.root.uri)
for k, v in uri_map.items():
print(k, v)
for e in es2.root.edges:
print("obj", e.obj)
print("edge", e.pred, e.obj.uri)
for n in es2.nodes:
print(n)
print(n.uri)
[ print(r) for r in es2.get_rdf() ]
es2.serialize(subgraph)
es2.get_sparql()
###Output
_____no_output_____
###Markdown
Prototype a leaderboard -
###Code
leaderboard = kglab.Leaderboard()
leaderboard.df
dist = leaderboard.add_shape(es0.serialize(subgraph))
print(dist)
leaderboard.df
dist = leaderboard.add_shape(es1.serialize(subgraph))
print(dist)
leaderboard.df
es3 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es3.add_link(es3.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Butter"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es3.add_link(es3.root, edge_uri, edge_node)
shape = es3.serialize(subgraph)
shape
dist = leaderboard.add_shape(es3.serialize(subgraph))
print(dist)
leaderboard.df
###Output
_____no_output_____
###Markdown
Evolutionary Shape PredictionAn experiment in evolutionary software using *reinforcement learning* to discover interesting data objects within a given set of graph data.
###Code
import kglab
namespaces = {
"nom": "http://example.org/#",
"wtm": "http://purl.org/heals/food/",
"ind": "http://purl.org/heals/ingredient/",
"skos": "http://www.w3.org/2004/02/skos/core#",
}
kg = kglab.KnowledgeGraph(
name = "A recipe KG example based on Food.com",
base_uri = "https://www.food.com/recipe/",
language = "en",
namespaces = namespaces,
)
kg.load_rdf(dirname(os.getcwd()) + "/dat/recipes.ttl")
import sys
import inspect
__name__ = "kglab"
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
clsmembers
###Output
_____no_output_____
###Markdown
Graph measures and topological analysis Let's measure this graph, to develop some estimators that we'll use later...
###Code
import pandas as pd
pd.set_option("max_rows", None)
measure = kglab.Measure()
measure.measure_graph(kg)
print("edges", measure.edge_count)
print("nodes", measure.node_count)
measure.s_gen.get_tally()
measure.p_gen.get_tally()
measure.o_gen.get_tally()
measure.l_gen.get_tally()
df, link_map = measure.n_gen.get_tally_map()
df
df, link_map = measure.e_gen.get_tally_map()
print(link_map)
###Output
_____no_output_____
###Markdown
ShapeFactory and evolved shapes
###Code
factory = kglab.ShapeFactory(kg, measure)
subgraph = factory.subgraph
es0 = factory.new_shape()
print(es0.serialize(subgraph))
[ print(r) for r in es0.get_rdf() ];
###Output
_____no_output_____
###Markdown
Now we can use this `ShapeFactory` object to evolve a *shape* within the graph, then generate a SPARQL query to test its cardinality:
###Code
sparql, bindings = es0.get_sparql()
print(sparql)
print(bindings)
for row in kg.query(sparql):
print(row)
###Output
_____no_output_____
###Markdown
We can also use this library to construct a specific shape programmatically, e.g., a recipe:
###Code
es1 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es1.add_link(es1.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/VanillaExtract"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/AllPurposeFlour"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Salt"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/ChickenEgg"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es1.add_link(es1.root, edge_uri, edge_node)
[ print(r) for r in es1.get_rdf() ]
es1.serialize(subgraph)
sparql, bindings = es1.get_sparql()
print(sparql)
print(bindings)
###Output
_____no_output_____
###Markdown
Query to find matching instances for this shape `es1` within the graph:
###Code
for row in kg.query(sparql, bindings=bindings):
print(row)
###Output
_____no_output_____
###Markdown
Leaderboard which can be distributed across a cluster We can calculate metrics to describe how these shapes `es0` and `es1` might rank on a *leaderboard*:
###Code
es0.get_cardinality()
es1.get_cardinality()
###Output
_____no_output_____
###Markdown
Then calculate a vector distance between `es1` and `es0` which we'd generated earlier:
###Code
es0.calc_distance(es1)
###Output
_____no_output_____
###Markdown
Now we can generate a compact, ordinal representation for the `es1` shape, which can be serialized as a string, transferred across a network to an actor, then deserialized as the same shape -- *as long as we use a similarly structured subgraph*
###Code
import json
ser = es1.serialize(subgraph)
j_ser = json.dumps(ser)
print(j_ser)
ser = json.loads(j_ser)
ser
###Output
_____no_output_____
###Markdown
Test the deseserialization
###Code
es2 = kglab.EvoShape(kg, measure)
uri_map = es2.deserialize(ser, subgraph)
print(es2.root.uri)
for k, v in uri_map.items():
print(k, v)
for e in es2.root.edges:
print("obj", e.obj)
print("edge", e.pred, e.obj.uri)
for n in es2.nodes:
print(n)
print(n.uri)
[ print(r) for r in es2.get_rdf() ]
es2.serialize(subgraph)
es2.get_sparql()
###Output
_____no_output_____
###Markdown
Prototype a leaderboard -
###Code
leaderboard = kglab.Leaderboard()
leaderboard.df
dist = leaderboard.add_shape(es0.serialize(subgraph))
print(dist)
leaderboard.df
dist = leaderboard.add_shape(es1.serialize(subgraph))
print(dist)
leaderboard.df
es3 = kglab.EvoShape(kg, measure)
type_uri = "http://purl.org/heals/food/Recipe"
type_node = kglab.EvoShapeNode(uri=type_uri, terminal=True)
es3.add_link(es3.root, kg.get_ns("rdf").type, type_node)
edge_uri = "http://purl.org/heals/food/hasIngredient"
edge_node_uri = "http://purl.org/heals/ingredient/Butter"
edge_node = kglab.EvoShapeNode(uri=edge_node_uri)
es3.add_link(es3.root, edge_uri, edge_node)
shape = es3.serialize(subgraph)
shape
dist = leaderboard.add_shape(es3.serialize(subgraph))
print(dist)
leaderboard.df
###Output
_____no_output_____ |
test/testdata/tensorflow/siamese_network/siamese_network.ipynb | ###Markdown
Siamese NetworkA Siamese neural network (sometimes called a twin neural network) is an artificial neural network that uses the same weights while working in tandem on two different input vectors to compute comparable output vectors.Often one of the output vectors is precomputed, thus forming a baseline against which the other output vector is compared. This is similar to comparing fingerprints but can be described more technically as a distance function for locality-sensitive hashing.It is possible to make a kind of structure that is functional similar to a siamese network, but implements a slightly different function. This is typically used for comparing similar instances in different type sets.Uses of similarity measures where a twin network might be used are such things as recognizing handwritten checks, automatic detection of faces in camera images, and matching queries with indexed documents. The perhaps most well-known application of twin networks are face recognition, where known images of people are precomputed and compared to an image from a turnstile or similar. It is not obvious at first, but there are two slightly different problems. One is recognizing a person among a large number of other persons, that is the facial recognition problem.Source: [Wikipedia](https://en.wikipedia.org/wiki/Siamese_neural_network)
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers, utils, callbacks
###Output
_____no_output_____
###Markdown
Lets start off by creating our dataset. Our input data will be consisting of pairs of images, and output will be either 1 or 0 indicating if the pair are similar or not
###Code
def make_pairs(images, labels, seed=19):
np.random.seed(seed)
pairImages = []
pairLabels = []
numClasses = len(np.unique(labels))
idx = [np.where(labels == i)[0] for i in range(numClasses)]
for idxA in range(len(images)):
currentImage = images[idxA]
label = labels[idxA]
idxB = np.random.choice(idx[label])
posImage = images[idxB]
pairImages.append([currentImage, posImage])
pairLabels.append([1])
negIdx = np.where(labels != label)[0]
negImage = images[np.random.choice(negIdx)]
pairImages.append([currentImage, negImage])
pairLabels.append([0])
return (np.array(pairImages), np.array(pairLabels))
###Output
_____no_output_____
###Markdown
We will be working with `MNIST` dataset in our notebook which comes along with the tensorflow library
###Code
(trainX, trainY), (testX, testY) = mnist.load_data()
trainX = 1 - (trainX / 255.0)
testX = 1 - (testX / 255.0)
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
(pairTrain, labelTrain) = make_pairs(trainX, trainY)
(pairTest, labelTest) = make_pairs(testX, testY)
print(f'\nTrain Data Shape: {pairTrain.shape}')
print(f'Test Data Shape: {pairTest.shape}\n\n')
###Output
_____no_output_____
###Markdown
Lets visualize the mnist images
###Code
fig, ax = plt.subplots(2, 6, figsize=(20, 6))
random.seed(19)
idx = random.choices(range(len(trainX)), k=12)
for i in range(12):
ax[i//6][i%6].imshow(np.squeeze(trainX[idx[i]]), cmap='gray')
ax[i//6][i%6].set_title(f'Label: {trainY[idx[i]]}', fontsize=18)
ax[i//6][i%6].set_axis_off()
fig.suptitle('MNIST Images', fontsize=24);
###Output
_____no_output_____
###Markdown
Here is a sample of our prepared dataset
###Code
fig, ax = plt.subplots(2, 6, figsize=(20, 6))
random.seed(19)
idx = random.choices(range(len(pairTrain)), k=6)
for i in range(0, 12, 2):
ax[i//6][i%6].imshow(np.squeeze(pairTrain[idx[i//2]][0]), cmap='gray')
ax[i//6][i%6+1].imshow(np.squeeze(pairTrain[idx[i//2]][1]), cmap='gray')
ax[i//6][i%6].set_title(f'Label: {labelTrain[idx[i//2]]}', fontsize=18)
ax[i//6][i%6].set_axis_off()
ax[i//6][i%6+1].set_axis_off()
fig.suptitle('Input Pair Images', fontsize=24);
###Output
_____no_output_____
###Markdown
Here we define some configurations for our model
###Code
class config():
IMG_SHAPE = (28, 28, 1)
EMBEDDING_DIM = 48
BATCH_SIZE = 64
EPOCHS = 500
###Output
_____no_output_____
###Markdown
Here we define a function to calculate euclidean distance between two vectors. This will be used by our model to calculate the euclidean distance between the vectors of the image pairs (image vectors will be created by the feature extractor of our model)
###Code
def euclidean_distance(vectors):
(featsA, featsB) = vectors
sumSquared = K.sum(K.square(featsA - featsB), axis=1, keepdims=True)
return K.sqrt(K.maximum(sumSquared, K.epsilon()))
###Output
_____no_output_____
###Markdown
With Siamese Network, the two most commonly used loss functions are:* contrastive loss* triplet lossWe will be using contrastive loss in this notebook ie:```Contrastive loss = mean( (1-true_value) * square(prediction) + true_value * square( max(margin - prediction, 0)))```
###Code
def loss(margin=1):
def contrastive_loss(y_true, y_pred):
y_true = tf.cast(y_true, y_pred.dtype)
square_pred = tf.math.square(y_pred)
margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))
return tf.math.reduce_mean(
(1 - y_true) * square_pred + (y_true) * margin_square
)
return contrastive_loss
###Output
_____no_output_____
###Markdown
Finally we define our model architecture* The model contains two input layers* A feature extractor through which both the images will be passed to generate feature vectors, the feature extractor typically consists of Convolutional and Pooling Layers* The feature vectors are passed through a custom layer to get euclidean distance between the vectors* The final layer consists of a single sigmoid unit
###Code
class SiameseNetwork(Model):
def __init__(self, inputShape, embeddingDim):
super(SiameseNetwork, self).__init__()
imgA = layers.Input(shape=inputShape)
imgB = layers.Input(shape=inputShape)
featureExtractor = self.build_feature_extractor(inputShape, embeddingDim)
featsA = featureExtractor(imgA)
featsB = featureExtractor(imgB)
distance = layers.Lambda(euclidean_distance, name='euclidean_distance')([featsA, featsB])
outputs = layers.Dense(1, activation="sigmoid")(distance)
self.model = Model(inputs=[imgA, imgB], outputs=outputs)
def build_feature_extractor(self, inputShape, embeddingDim=48):
model = Sequential([
layers.Input(inputShape),
layers.Conv2D(64, (2, 2), padding="same", activation="relu"),
layers.MaxPooling2D(pool_size=2),
layers.Dropout(0.3),
layers.Conv2D(64, (2, 2), padding="same", activation="relu"),
layers.MaxPooling2D(pool_size=2),
layers.Dropout(0.3),
layers.Conv2D(128, (1, 1), padding="same", activation="relu"),
layers.Flatten(),
layers.Dense(embeddingDim, activation='tanh')
])
return model
def call(self, x):
return self.model(x)
model = SiameseNetwork(inputShape=config.IMG_SHAPE, embeddingDim=config.EMBEDDING_DIM)
model.compile(loss=loss(margin=1), optimizer="adam", metrics=["accuracy"])
es = callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1, restore_best_weights=True, min_delta=1e-4)
rlp = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=1e-6, mode='min', verbose=1)
history = model.fit(
[pairTrain[:, 0], pairTrain[:, 1]], labelTrain[:],
validation_data=([pairTest[:, 0], pairTest[:, 1]], labelTest[:]),
batch_size=config.BATCH_SIZE,
epochs=config.EPOCHS,
callbacks=[es, rlp]
)
fig, ax = plt.subplots(2, 6, figsize=(20, 6))
random.seed(19)
idx = random.choices(range(len(pairTest)), k=6)
preds = model.predict([pairTest[:, 0], pairTest[:, 1]])
for i in range(0, 12, 2):
ax[i//6][i%6].imshow(np.squeeze(pairTest[idx[i//2]][0]), cmap='gray')
ax[i//6][i%6+1].imshow(np.squeeze(pairTest[idx[i//2]][1]), cmap='gray')
ax[i//6][i%6].set_title(f'Label: {labelTest[idx[i//2]]}', fontsize=18)
ax[i//6][i%6+1].set_title(f'Predicted: {np.round(preds[idx[i//2]], 2)}', fontsize=18)
ax[i//6][i%6].set_axis_off()
ax[i//6][i%6+1].set_axis_off()
fig.suptitle('Test Pair Images', fontsize=24);
sns.set_style('darkgrid')
fig, ax = plt.subplots(2, 1, figsize=(20, 8))
df = pd.DataFrame(history.history)
df[['accuracy', 'val_accuracy']].plot(ax=ax[0])
df[['loss', 'val_loss']].plot(ax=ax[1])
ax[0].set_title('Model Accuracy', fontsize=12)
ax[1].set_title('Model Loss', fontsize=12)
fig.suptitle('Siamese Network: Learning Curve', fontsize=18);
###Output
_____no_output_____ |
l02c01_celsius_to_fahrenheit.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
The Basics: Training Your First Model Run in Google Colab View source on GitHub Welcome to this Colab where you will train your first Machine Learning model!We'll try to keep things simple here, and only introduce basic concepts. Later Colabs will cover more advanced problems.The problem we will solve is to convert from Celsius to Fahrenheit, where the approximate formula is:$$ f = c \times 1.8 + 32 $$Of course, it would be simple enough to create a conventional Python function that directly performs this calculation, but that wouldn't be machine learning.Instead, we will give TensorFlow some sample Celsius values (0, 8, 15, 22, 38) and their corresponding Fahrenheit values (32, 46, 59, 72, 100).Then, we will train a model that figures out the above formula through the training process. Import dependenciesFirst, import TensorFlow. Here, we're calling it `tf` for ease of use. We also tell it to only display errors.Next, import [NumPy](http://www.numpy.org/) as `np`. Numpy helps us to represent our data as highly performant lists.
###Code
import tensorflow as tf
import numpy as np
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
###Output
_____no_output_____
###Markdown
Set up training dataAs we saw before, supervised Machine Learning is all about figuring out an algorithm given a set of inputs and outputs. Since the task in this Codelab is to create a model that can give the temperature in Fahrenheit when given the degrees in Celsius, we create two lists `celsius_q` and `fahrenheit_a` that we can use to train our model.
###Code
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i,c in enumerate(celsius_q):
print("{} degrees Celsius = {} degrees Fahrenheit".format(c, fahrenheit_a[i]))
###Output
-40.0 degrees Celsius = -40.0 degrees Fahrenheit
-10.0 degrees Celsius = 14.0 degrees Fahrenheit
0.0 degrees Celsius = 32.0 degrees Fahrenheit
8.0 degrees Celsius = 46.0 degrees Fahrenheit
15.0 degrees Celsius = 59.0 degrees Fahrenheit
22.0 degrees Celsius = 72.0 degrees Fahrenheit
38.0 degrees Celsius = 100.0 degrees Fahrenheit
###Markdown
Some Machine Learning terminology - **Feature** — The input(s) to our model. In this case, a single value — the degrees in Celsius. - **Labels** — The output our model predicts. In this case, a single value — the degrees in Fahrenheit. - **Example** — A pair of inputs/outputs used during training. In our case a pair of values from `celsius_q` and `fahrenheit_a` at a specific index, such as `(22,72)`. Create the modelNext, create the model. We will use the simplest possible model we can, a Dense network. Since the problem is straightforward, this network will require only a single layer, with a single neuron. Build a layerWe'll call the layer `l0` and create it by instantiating `tf.keras.layers.Dense` with the following configuration:* `input_shape=[1]` — This specifies that the input to this layer is a single value. That is, the shape is a one-dimensional array with one member. Since this is the first (and only) layer, that input shape is the input shape of the entire model. The single value is a floating point number, representing degrees Celsius.* `units=1` — This specifies the number of neurons in the layer. The number of neurons defines how many internal variables the layer has to try to learn how to solve the problem (more later). Since this is the final layer, it is also the size of the model's output — a single float value representing degrees Fahrenheit. (In a multi-layered network, the size and shape of the layer would need to match the `input_shape` of the next layer.)
###Code
l0 = tf.keras.layers.Dense(units=1, input_shape=[1])
###Output
_____no_output_____
###Markdown
Assemble layers into the modelOnce layers are defined, they need to be assembled into a model. The Sequential model definition takes a list of layers as an argument, specifying the calculation order from the input to the output.This model has just a single layer, l0.
###Code
model = tf.keras.Sequential([l0])
###Output
_____no_output_____
###Markdown
**Note**You will often see the layers defined inside the model definition, rather than beforehand:```pythonmodel = tf.keras.Sequential([ tf.keras.layers.Dense(units=1, input_shape=[1])])``` Compile the model, with loss and optimizer functionsBefore training, the model has to be compiled. When compiled for training, the model is given:- **Loss function** — A way of measuring how far off predictions are from the desired outcome. (The measured difference is called the "loss".)- **Optimizer function** — A way of adjusting internal values in order to reduce the loss.
###Code
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(0.1))
###Output
_____no_output_____
###Markdown
These are used during training (`model.fit()`, below) to first calculate the loss at each point, and then improve it. In fact, the act of calculating the current loss of a model and then improving it is precisely what training is.During training, the optimizer function is used to calculate adjustments to the model's internal variables. The goal is to adjust the internal variables until the model (which is really a math function) mirrors the actual equation for converting Celsius to Fahrenheit.TensorFlow uses numerical analysis to perform this tuning, and all this complexity is hidden from you so we will not go into the details here. What is useful to know about these parameters are:The loss function ([mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error)) and the optimizer ([Adam](https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/)) used here are standard for simple models like this one, but many others are available. It is not important to know how these specific functions work at this point.One part of the Optimizer you may need to think about when building your own models is the learning rate (`0.1` in the code above). This is the step size taken when adjusting values in the model. If the value is too small, it will take too many iterations to train the model. Too large, and accuracy goes down. Finding a good value often involves some trial and error, but the range is usually within 0.001 (default), and 0.1 Train the modelTrain the model by calling the `fit` method.During training, the model takes in Celsius values, performs a calculation using the current internal variables (called "weights") and outputs values which are meant to be the Fahrenheit equivalent. Since the weights are initially set randomly, the output will not be close to the correct value. The difference between the actual output and the desired output is calculated using the loss function, and the optimizer function directs how the weights should be adjusted.This cycle of calculate, compare, adjust is controlled by the `fit` method. The first argument is the inputs, the second argument is the desired outputs. The `epochs` argument specifies how many times this cycle should be run, and the `verbose` argument controls how much output the method produces.
###Code
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
###Output
Finished training the model
###Markdown
In later videos, we will go into more detail on what actually happens here and how a Dense layer actually works internally. Display training statisticsThe `fit` method returns a history object. We can use this object to plot how the loss of our model goes down after each training epoch. A high loss means that the Fahrenheit degrees the model predicts is far from the corresponding value in `fahrenheit_a`.We'll use [Matplotlib](https://matplotlib.org/) to visualize this (you could use another tool). As you can see, our model improves very quickly at first, and then has a steady, slow improvement until it is very near "perfect" towards the end.
###Code
import matplotlib.pyplot as plt
plt.xlabel('Epoch Number')
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
###Output
_____no_output_____
###Markdown
Use the model to predict valuesNow you have a model that has been trained to learn the relationship between `celsius_q` and `fahrenheit_a`. You can use the predict method to have it calculate the Fahrenheit degrees for a previously unknown Celsius degrees.So, for example, if the Celsius value is 100, what do you think the Fahrenheit result will be? Take a guess before you run this code.
###Code
print(model.predict([100.0]))
###Output
[[211.32352]]
###Markdown
The correct answer is $100 \times 1.8 + 32 = 212$, so our model is doing really well. To review* We created a model with a Dense layer* We trained it with 3500 examples (7 pairs, over 500 epochs).Our model tuned the variables (weights) in the Dense layer until it was able to return the correct Fahrenheit value for any Celsius value. (Remember, 100 Celsius was not part of our training data.) Looking at the layer weightsFinally, let's print the internal variables of the Dense layer.
###Code
print("These are the layer variables: {}".format(l0.get_weights()))
###Output
These are the layer variables: [array([[1.8225722]], dtype=float32), array([29.0663], dtype=float32)]
###Markdown
The first variable is close to ~1.8 and the second to ~32. These values (1.8 and 32) are the actual variables in the real conversion formula.This is really close to the values in the conversion formula. We'll explain this in an upcoming video where we show how a Dense layer works, but for a single neuron with a single input and a single output, the internal math looks the same as [the equation for a line](https://en.wikipedia.org/wiki/Linear_equationSlope%E2%80%93intercept_form), $y = mx + b$, which has the same form as the conversion equation, $f = 1.8c + 32$.Since the form is the same, the variables should converge on the standard values of 1.8 and 32, which is exactly what happened.With additional neurons, additional inputs, and additional outputs, the formula becomes much more complex, but the idea is the same. A little experimentJust for fun, what if we created more Dense layers with different units, which therefore also has more variables?
###Code
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("Finished training the model")
print(model.predict([100.0]))
print("Model predicts that 100 degrees Celsius is: {} degrees Fahrenheit".format(model.predict([100.0])))
print("These are the l0 variables: {}".format(l0.get_weights()))
print("These are the l1 variables: {}".format(l1.get_weights()))
print("These are the l2 variables: {}".format(l2.get_weights()))
###Output
Finished training the model
[[211.74745]]
Model predicts that 100 degrees Celsius is: [[211.74745]] degrees Fahrenheit
These are the l0 variables: [array([[-0.16284885, 0.12751213, -0.32148883, 0.5214121 ]],
dtype=float32), array([-3.3183224, 3.0172393, -3.3112485, 3.1654341], dtype=float32)]
These are the l1 variables: [array([[ 0.08300527, 0.30524582, 0.81497777, 1.0592592 ],
[ 0.7283595 , 0.2548315 , -0.44183302, -0.7064162 ],
[ 0.22422549, 0.92266417, 1.1235605 , 0.9576998 ],
[ 0.3765884 , 0.14963949, -0.9221817 , -0.3846107 ]],
dtype=float32), array([ 3.1003428, -1.663689 , -3.366849 , -3.3779995], dtype=float32)]
These are the l2 variables: [array([[ 0.67793643],
[-0.10660361],
[-0.9932232 ],
[-0.79200923]], dtype=float32), array([3.1923056], dtype=float32)]
|
courses/machine_learning/deepdive2/recommendation_systems/labs/basic_ranking.ipynb | ###Markdown
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
###Code
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
This notebook uses TF2.x.Please check your tensorflow version using the cell below.
###Code
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
###Output
TensorFlow version: 2.5.0
###Markdown
Lab Task 1: Preparing the datasetWe're going to use the same data as the [retrieval](https://www.tensorflow.org/recommenders/examples/basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
###Code
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
###Output
_____no_output_____
###Markdown
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
###Code
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
###Output
_____no_output_____
###Markdown
Let's also figure out unique user ids and movie titles present in the data. This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
###Code
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
###Output
_____no_output_____
###Markdown
Lab Task 2: Implementing a model ArchitectureRanking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
###Code
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
###Output
_____no_output_____
###Markdown
This model takes user ids and movie titles, and outputs a predicted rating:
###Code
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
###Output
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: ['42']
Consider rewriting this model with the Functional API.
###Markdown
Loss and metricsThe next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation. We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
###Code
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
###Output
_____no_output_____
###Markdown
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop. The full modelWe can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.The base model will then take care of creating the appropriate training loop to fit our model.
###Code
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
###Output
_____no_output_____
###Markdown
Lab Task 3: Fitting and evaluatingAfter defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.Let's first instantiate the model.
###Code
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
###Output
_____no_output_____
###Markdown
Then shuffle, batch, and cache the training and evaluation data.
###Code
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
###Output
_____no_output_____
###Markdown
Then train the model:
###Code
model.fit(cached_train, epochs=3)
###Output
Epoch 1/3
###Markdown
As the model trains, the loss is falling and the RMSE metric is improving. Finally, we can evaluate our model on the test set:
###Code
# TODO 3a -- your code goes here
###Output
1/5 [=====>........................] - ETA: 1s - root_mean_squared_error: 1.1169 - loss: 1.2475 - regularization_loss: 0.0000e+00 - total_loss: 1.2475
###Markdown
Recommending movies: ranking**Learning Objectives**1. Get our data and split it into a training and test set.2. Implement a ranking model.3. Fit and evaluate it. Introduction The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.Each learning objective will correspond to a _TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/recommendation_systems/soulutions/basic_ranking.ipynb) ImportsLet's first get our imports out of the way.
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
###Output
_____no_output_____
###Markdown
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
###Code
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
This notebook uses TF2.x.Please check your tensorflow version using the cell below.
###Code
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
###Output
TensorFlow version: 2.3.0
###Markdown
Preparing the datasetWe're going to use the same data as the [retrieval](basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
###Code
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
###Output
_____no_output_____
###Markdown
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
###Code
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
train = shuffled.take(80_000)
test = shuffled.skip(80_000).take(20_000)
###Output
_____no_output_____
###Markdown
Let's also figure out unique user ids and movie titles present in the data. This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
###Code
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
###Output
_____no_output_____
###Markdown
Implementing a model ArchitectureRanking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
###Code
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
self.user_embeddings = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=unique_user_ids, mask_token=None),
tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension)
])
# Compute embeddings for movies.
self.movie_embeddings = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=unique_movie_titles, mask_token=None),
tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension)
])
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
###Output
_____no_output_____
###Markdown
This model takes user ids and movie titles, and outputs a predicted rating:
###Code
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
###Output
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: ['42']
Consider rewriting this model with the Functional API.
###Markdown
Loss and metricsThe next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation. We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
###Code
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
###Output
_____no_output_____
###Markdown
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop. The full modelWe can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.The base model will then take care of creating the appropriate training loop to fit our model.
###Code
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
###Output
_____no_output_____
###Markdown
Fitting and evaluatingAfter defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.Let's first instantiate the model.
###Code
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
###Output
_____no_output_____
###Markdown
Then shuffle, batch, and cache the training and evaluation data.
###Code
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
###Output
_____no_output_____
###Markdown
Then train the model:
###Code
model.fit(cached_train, epochs=3)
###Output
Epoch 1/3
###Markdown
As the model trains, the loss is falling and the RMSE metric is improving. Finally, we can evaluate our model on the test set:
###Code
model.evaluate(cached_test, return_dict=True)
###Output
1/5 [=====>........................] - ETA: 1s - root_mean_squared_error: 1.1169 - loss: 1.2475 - regularization_loss: 0.0000e+00 - total_loss: 1.2475
###Markdown
Recommending movies: ranking**Learning Objectives**1. Get our data and split it into a training and test set.2. Implement a ranking model.3. Fit and evaluate it. Introduction The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.Each learning objective will correspond to a _TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/recommendation_systems/soulutions/basic_ranking.ipynb) ImportsLet's first get our imports out of the way.
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
###Output
_____no_output_____
###Markdown
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
###Code
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
This notebook uses TF2.x.Please check your tensorflow version using the cell below.
###Code
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
###Output
TensorFlow version: 2.3.0
###Markdown
Lab Task 1: Preparing the datasetWe're going to use the same data as the [retrieval](basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
###Code
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
###Output
_____no_output_____
###Markdown
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
###Code
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
###Output
_____no_output_____
###Markdown
Let's also figure out unique user ids and movie titles present in the data. This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
###Code
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
###Output
_____no_output_____
###Markdown
Lab Task 2: Implementing a model ArchitectureRanking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
###Code
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
###Output
_____no_output_____
###Markdown
This model takes user ids and movie titles, and outputs a predicted rating:
###Code
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
###Output
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: ['42']
Consider rewriting this model with the Functional API.
###Markdown
Loss and metricsThe next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation. We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
###Code
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
###Output
_____no_output_____
###Markdown
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop. The full modelWe can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.The base model will then take care of creating the appropriate training loop to fit our model.
###Code
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
###Output
_____no_output_____
###Markdown
Lab Task 3: Fitting and evaluatingAfter defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.Let's first instantiate the model.
###Code
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
###Output
_____no_output_____
###Markdown
Then shuffle, batch, and cache the training and evaluation data.
###Code
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
###Output
_____no_output_____
###Markdown
Then train the model:
###Code
model.fit(cached_train, epochs=3)
###Output
Epoch 1/3
###Markdown
As the model trains, the loss is falling and the RMSE metric is improving. Finally, we can evaluate our model on the test set:
###Code
# TODO 3a -- your code goes here
###Output
1/5 [=====>........................] - ETA: 1s - root_mean_squared_error: 1.1169 - loss: 1.2475 - regularization_loss: 0.0000e+00 - total_loss: 1.2475
###Markdown
Recommending movies: ranking**Learning Objectives**1. Get our data and split it into a training and test set.2. Implement a ranking model.3. Fit and evaluate it. Introduction The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.Each learning objective will correspond to a _TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/recommendation_systems/soulutions/basic_ranking.ipynb) ImportsLet's first get our imports out of the way.
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
###Output
_____no_output_____
###Markdown
Recommending movies: ranking Learning Objectives1. Get our data and split it into a training and test set.2. Implement a ranking model.3. Fit and evaluate it. Introduction The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.Each learning objective will correspond to a _TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/basic_ranking.ipynb) ImportsLet's first get our imports out of the way.
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
###Output
_____no_output_____
###Markdown
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
###Code
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
This notebook uses TF2.x.Please check your tensorflow version using the cell below.
###Code
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
###Output
TensorFlow version: 2.6.0
###Markdown
Lab Task 1: Preparing the datasetWe're going to use the same data as the [retrieval](https://www.tensorflow.org/recommenders/examples/basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
###Code
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
###Output
[1mDownloading and preparing dataset 4.70 MiB (download: 4.70 MiB, generated: 32.41 MiB, total: 37.10 MiB) to /home/jupyter/tensorflow_datasets/movielens/100k-ratings/0.1.0...[0m
###Markdown
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
###Code
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
###Output
_____no_output_____
###Markdown
Let's also figure out unique user ids and movie titles present in the data. This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
###Code
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
###Output
_____no_output_____
###Markdown
Lab Task 2: Implementing a model ArchitectureRanking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
###Code
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
###Output
_____no_output_____
###Markdown
This model takes user ids and movie titles, and outputs a predicted rating:
###Code
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
###Output
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: ['42']
Consider rewriting this model with the Functional API.
###Markdown
Loss and metricsThe next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation. We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
###Code
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
###Output
_____no_output_____
###Markdown
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop. The full modelWe can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.The base model will then take care of creating the appropriate training loop to fit our model.
###Code
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
###Output
_____no_output_____
###Markdown
Lab Task 3: Fitting and evaluatingAfter defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.Let's first instantiate the model.
###Code
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
###Output
_____no_output_____
###Markdown
Then shuffle, batch, and cache the training and evaluation data.
###Code
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
###Output
_____no_output_____
###Markdown
Then train the model:
###Code
model.fit(cached_train, epochs=3)
###Output
Epoch 1/3
###Markdown
As the model trains, the loss is falling and the RMSE metric is improving. Finally, we can evaluate our model on the test set:
###Code
# TODO 3a -- your code goes here
###Output
1/5 [=====>........................] - ETA: 1s - root_mean_squared_error: 1.1169 - loss: 1.2475 - regularization_loss: 0.0000e+00 - total_loss: 1.2475
###Markdown
Recommending movies: ranking Learning Objectives1. Get our data and split it into a training and test set.2. Implement a ranking model.3. Fit and evaluate it. Introduction The retrieval stage is responsible for selecting an initial set of hundreds of candidates from all possible candidates. The main objective of this model is to efficiently weed out all candidates that the user is not interested in. Because the retrieval model may be dealing with millions of candidates, it has to be computationally efficient.The ranking stage takes the outputs of the retrieval model and fine-tunes them to select the best possible handful of recommendations. Its task is to narrow down the set of items the user may be interested in to a shortlist of likely candidates.Each learning objective will correspond to a _TODO_ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/basic_ranking.ipynb) ImportsLet's first get our imports out of the way.
###Code
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
###Output
_____no_output_____
###Markdown
**Note: Please ignore the incompatibility errors and re-run the above cell before proceeding for the lab.**
###Code
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import os
import pprint
import tempfile
from typing import Dict, Text
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs
###Output
_____no_output_____
###Markdown
This notebook uses TF2.x.Please check your tensorflow version using the cell below.
###Code
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
###Output
TensorFlow version: 2.6.0
###Markdown
Lab Task 1: Preparing the datasetWe're going to use the same data as the [retrieval](https://www.tensorflow.org/recommenders/examples/basic_retrieval) tutorial. This time, we're also going to keep the ratings: these are the objectives we are trying to predict.
###Code
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(lambda x: {
"movie_title": x["movie_title"],
"user_id": x["user_id"],
"user_rating": x["user_rating"]
})
###Output
[1mDownloading and preparing dataset 4.70 MiB (download: 4.70 MiB, generated: 32.41 MiB, total: 37.10 MiB) to /home/jupyter/tensorflow_datasets/movielens/100k-ratings/0.1.0...[0m
###Markdown
As before, we'll split the data by putting 80% of the ratings in the train set, and 20% in the test set.
###Code
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)
# TODO 1a -- your code goes here
###Output
_____no_output_____
###Markdown
Let's also figure out unique user ids and movie titles present in the data. This is important because we need to be able to map the raw values of our categorical features to embedding vectors in our models. To do that, we need a vocabulary that maps a raw feature value to an integer in a contiguous range: this allows us to look up the corresponding embeddings in our embedding tables.
###Code
movie_titles = ratings.batch(1_000_000).map(lambda x: x["movie_title"])
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])
unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))
###Output
_____no_output_____
###Markdown
Lab Task 2: Implementing a model ArchitectureRanking models do not face the same efficiency constraints as retrieval models do, and so we have a little bit more freedom in our choice of architectures.A model composed of multiple stacked dense layers is a relatively common architecture for ranking tasks. We can implement it as follows:
###Code
class RankingModel(tf.keras.Model):
def __init__(self):
super().__init__()
embedding_dimension = 32
# Compute embeddings for users.
# TODO 2a -- your code goes here
# Compute embeddings for movies.
# TODO 2b -- your code goes here
# Compute predictions.
self.ratings = tf.keras.Sequential([
# Learn multiple dense layers.
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
# Make rating predictions in the final layer.
tf.keras.layers.Dense(1)
])
def call(self, inputs):
user_id, movie_title = inputs
user_embedding = self.user_embeddings(user_id)
movie_embedding = self.movie_embeddings(movie_title)
return self.ratings(tf.concat([user_embedding, movie_embedding], axis=1))
###Output
_____no_output_____
###Markdown
This model takes user ids and movie titles, and outputs a predicted rating:
###Code
RankingModel()((["42"], ["One Flew Over the Cuckoo's Nest (1975)"]))
###Output
WARNING:tensorflow:Layers in a Sequential model should only have a single input tensor, but we receive a <class 'list'> input: ['42']
Consider rewriting this model with the Functional API.
###Markdown
Loss and metricsThe next component is the loss used to train our model. TFRS has several loss layers and tasks to make this easy.In this instance, we'll make use of the `Ranking` task object: a convenience wrapper that bundles together the loss function and metric computation. We'll use it together with the `MeanSquaredError` Keras loss in order to predict the ratings.
###Code
task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
###Output
_____no_output_____
###Markdown
The task itself is a Keras layer that takes true and predicted as arguments, and returns the computed loss. We'll use that to implement the model's training loop. The full modelWe can now put it all together into a model. TFRS exposes a base model class (`tfrs.models.Model`) which streamlines bulding models: all we need to do is to set up the components in the `__init__` method, and implement the `compute_loss` method, taking in the raw features and returning a loss value.The base model will then take care of creating the appropriate training loop to fit our model.
###Code
class MovielensModel(tfrs.models.Model):
def __init__(self):
super().__init__()
self.ranking_model: tf.keras.Model = RankingModel()
self.task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
rating_predictions = self.ranking_model(
(features["user_id"], features["movie_title"]))
# The task computes the loss and the metrics.
return self.task(labels=features["user_rating"], predictions=rating_predictions)
###Output
_____no_output_____
###Markdown
Lab Task 3: Fitting and evaluatingAfter defining the model, we can use standard Keras fitting and evaluation routines to fit and evaluate the model.Let's first instantiate the model.
###Code
model = MovielensModel()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1))
###Output
_____no_output_____
###Markdown
Then shuffle, batch, and cache the training and evaluation data.
###Code
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
###Output
_____no_output_____
###Markdown
Then train the model:
###Code
model.fit(cached_train, epochs=3)
###Output
Epoch 1/3
###Markdown
As the model trains, the loss is falling and the RMSE metric is improving. Finally, we can evaluate our model on the test set:
###Code
# TODO 3a -- your code goes here
###Output
1/5 [=====>........................] - ETA: 1s - root_mean_squared_error: 1.1169 - loss: 1.2475 - regularization_loss: 0.0000e+00 - total_loss: 1.2475 |
21_seguridad_y_autenticacion_basicas.ipynb | ###Markdown
[](https://pythonista.io) Seguridad y autenticación básicas. *Django* cuenta desde su instalación con un sistema simple de:* Gestión de usuarios.* Gestión de grupos.* Autenticación básica.* Gestión de sesiones.La documentación correspondiente puede ser consultada en la siguiente liga:https://docs.djangoproject.com/en/3.0/topics/auth/**Nota:** Los alcances de este curso sólo cubren los temas de gestión básica de usuarios y autenticación simple. Aplicaciones y middleware asociados. Las aplicaciones instaladas para la gestión de usuarios y permisos son:* ```django.contrib.auth``` para la gestión de usuarios y grupos.* ```django.contrib.contenttypes``` para asignar permisoso a los modelos que cree el desarrollador.
###Code
from tutorial.tutorial import settings
settings.INSTALLED_APPS
###Output
_____no_output_____
###Markdown
El middleware que utiliza *Django* por efecto es:* ```django.contrib.sessions.middleware.SessionMiddleware```, el cual permite realizar la autenticación y autorización de acceso.* ```django.contrib.auth.middleware.AuthenticationMiddleware```, el cual gestiona las sesiones.
###Code
settings.MIDDLEWARE
###Output
_____no_output_____
###Markdown
El modelo ```django.contrib.auth.models.User```. Los usuarios y grupos en *Django* se crean mediante modelos predeterminados.* La clase ```django.contrib.auth.models.User``` es una subclase de ```django.db.models.Model``` y corresponde al modelo básico de un usuario de *Django*. A partir de este momento se hará referencia a esta clase como ```User```. El atributo de clase ```User.objects```.Al ser una subclase de ```django.db.models.Model```, el modelo ```User``` cuenta con el atributo de clase ```objects```, el cual puede realizar operaciones de consulta, altas y bajas de instancias mediante métodos como:* ```all()```.* ```get()```.* ```filter()```. El método ```User.objects.create_user()```.Este método permite crear la cuenta de un usuario y guardarla en la base de datos.``` User.objects.create_user('', '', '')``` Atributos de las instancias de ```User```.Las instancias del modelo ```User``` cuenta con los siguientes atributos:* ```id```, el cual corresponde a un identificador numérico que *Django* le otorga al modelo cuando es instanciado.* ```username```, el cual corresponde al nombre de usuario. Este atributo es obligatorio para crear al modelo.* ```password```, el cual corresponde a la contraseña del usuario. Este atributo es obligatorio para crear al modelo.* ```first_name```, el cual corresponde al nombre del usuario.* ```last_name```, el cual corresponde al apellido del usuario.* ```email```, el cual corresponde a la dirección de correo electrónico del usuario.* ```is_superuser``` es un valor de tipo ```bool``` que indica si el usuario es in superusuario.* ```is_staff``` es un valor de tipo ```bool``` que indica si el usuario es parte del "staff" de la organización.* ```is_active``` es un valor de tipo ```bool``` que indica si el usuario está activo.* ```last_login``` es un valor de tipo ```datetime.datetime```con la fecha y hora del último acceso del usuario.* ```date_joined``` es un valor de tipo ```datetime.datetime```con la fecha y hora en la que el usuario fue creado. Ejemplo ilustrativo de creación de un suario desde el shell.``` ipythonIn [1]: from django.contrib.auth.models import UserIn [2]: User.objects.all()Out[2]: In [3]: User.objects.create_user('josech', '[email protected]', '0p3n5t4ck')Out[3]: In [4]: User.objects.all()Out[4]: ]>In [5]: usuario = User.objects.all()[0]In [5]: usuarioOut[5]: In [6]: usuario.is_superuserOut[6]: FalseIn [7]: usuario.is_superuser=True``` Creación de un superusuario con el script ```manage.py```.Para poder crear unsuperuisuario se utiliza el siguiente comando desde una terminal localziada en el directorio del proyecto.```python manage.py createsuperuser --email="" --user=""``` **Ejemplo:**```python manage.py createsuperuser --email="[email protected]" --user="admin"``` Tablas asociadas a usuarios y grupos en la base de datos.La gestión de usuarios está ligada a varias tablas de la base de datos asociada a *Django*, la cuales fueron creadas al ejecutar el comando ```manage.py migrate```.Estas tablas tienen el prefijo ```auth_```:* ```auth_user```.* ```auth_group```.* ```auth_permission```.* ```auth_group_permissions```.* ```auth_user_groups```.* ```auth_user_user_permissions```. El módulo ```django.contrib.auth.urls```.Este módulo define los siguientes patrones de *URL* predefinidos para gestionar los accesos de un usuario:* ```'login/'``` ingreso a la sesión de un usuario. * ```'logout/'``` salida de la sesión de un usuario.* ```'password_change/'```, la cual permite cambiar la contraseña de un usuario.* ```'password_reset/'```, la cual permite recuperar una contraseña.* ```'reset//'```, el cual permite reiniciar a un usuario.* ```'password_change/done/'```.* ```'password_reset/done/'```.* ```'reset/done/'```.Para que estos patrones puedan ser accedidos es necesario incluir un patrón en el objeto ```urlpatterns``` del script ```urls.py``` del proyecto de la siguiente manera:``` path('', include('django.contrib.auth.urls'))```* Donde `````` corresponde a la ruta desde la cual se accederá a cada *URL* definida por ```django.contrib.auth.urls```. Con excepción de ```'login/'``` cada función de vista relacionada con cada patrón cuenta con una plantilla predefinida. Sin embargo, es posible crear plantillas a la medida.Pro defecto las *URLs* de este módulo buscarán las plantillas correspondientes en el directorio```registration/``` el cual se encuentra a su vez en el directorio definido para las plantillas del proyecto. Configuración de la función de la *URL* ```login/```En el caso de la *URL* ligada a la regla ```'login/'```, es necesario crear una plantilla en la ruta ```registration/login.html```, la cual permita enviar el usuario y la contraseña.Un ejemplo de esta plantilla puede ser consultado en https://docs.djangoproject.com/en/2.2/topics/auth/default/all-authentication-views. Redireccionamiento en caso de un ingreso correcto.Cuando las credenciales ingresadas y enviadas son correctas, *Django* redirigirá al navegador a la ruta ```'/accounts/profile/'```. Para indicar un redireccionamiento a una *URL* distinta es necesario definirla con el nombre ```LOGIN_REDIRECT_URL``` en el script ```settings.py``` del proyecto.```LOGIN_REDIRECT_URL = ''``` Ejemplo ilustrativo. Definición del patrón de *URLs*.El archivo ```src/21/urls.py``` contiene la siguiente definición de ```urlpatterns``` ligando al módulo ``` django.contrib.auth.urls``` con la ruta ```usuarios/```.```pythonurlpatterns = [path('admin/', admin.site.urls), path('main/', include('main.urls')), path('api/', include('api.urls')), path('accounts/', include('django.contrib.auth.urls')), ]``` * Se sustituirá el script ```tutorial/tutorial/urls.py``` con ```src/22/urls.py```. * Para las platafomas Linux y MacOS X.
###Code
!cp src/21/urls.py tutorial/tutorial/urls.py
###Output
_____no_output_____
###Markdown
* Para la plataforma Windows.
###Code
!copy src\21\urls.py tutorial\tutorial\urls.py
###Output
_____no_output_____
###Markdown
* La siguiente celda desplegará el archivo sustituido.
###Code
%pycat tutorial/tutorial/urls.py
###Output
_____no_output_____
###Markdown
Creación del subdirectorio ```registration/```.* La siguiente celda creará el directorio ```registration/``` dentro del directorio ```tutorial/templates```.
###Code
%mkdir tutorial/templates/registration
%mkdir tutorial\templates\registration
###Output
_____no_output_____
###Markdown
La plantilla ```registration/login.html```.La plantilla ```src/22/login.html``` contiene el siguiente código:``` html{% extends "base.html" %}{% block cuerpo %}{% if form.errors %} Ingresó una contraseña incorrecta. Vuelva a intentar.{% endif %}{% if next %} {% if user.is_authenticated %} Su cuenta no tiene acceso a esta página. Vuelva a intentar con un usuario autorizado. {% else %} Por favor ingrese con usuario con los permisos adecuados. {% endif %}{% endif %}{% csrf_token %} {{ form.username.label_tag }} {{ form.username }} {{ form.password.label_tag }} {{ form.password }}{ Assumes you setup the password_reset view in your URLconf }¿Perdió su contraseña?{% endblock %}``` * A continuacion se copiará al script ```src/21/urls.py``` en el directorio ```tutorial/templates/registration/```. * Para las platafomas Linux y MacOS X.
###Code
%cp src/21/login.html tutorial/templates/registration/
###Output
_____no_output_____
###Markdown
* Para la platafoma Windows.
###Code
%copy src\21\login.html tutorial\templates\registration\
###Output
_____no_output_____
###Markdown
* La siguiente celda despelgará el contenido del script ```tutorial/templates/registration/login.html```
###Code
%pycat tutorial/templates/registration/login.html
###Output
_____no_output_____
###Markdown
Configuración del redireccionamiento de ingreso exitoso.El script ```src/21/settings.py``` define a ```/api/``` como la ruta a la que el navegador será redirigido en caso de que el acceso sea exitoso.``` pythonLOGIN_REDIRECT_URL = '/api/'``` * A continuación se sustituirá al script ```tutorial/tutorial/settings.py``` con ```src/21/settings.py```. * Para las platafomas Linux y MacOS X.
###Code
%cp src/21/settings.py tutorial/tutorial/settings.py
###Output
_____no_output_____
###Markdown
* Para la platafoma Linux.
###Code
%copy src\21\settings.py tutorial\tutorial\settings.py
###Output
_____no_output_____
###Markdown
La siguiente celda mostrará el resultado de la sustitución en ```tutorial/tutorial/settings.py```.
###Code
%pycat tutorial/tutorial/settings.py
###Output
_____no_output_____ |
Tutorial-08/TUT8-3-rnn-concise.ipynb | ###Markdown
Concise Implementation of RNNsNow we will see how to implement the same language model more efficientlyusing functions provided by high-level PyTorch APIs.We begin as before by reading the time machine dataset.
###Code
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
###Output
Downloading ../data/timemachine.txt from http://d2l-data.s3-accelerate.amazonaws.com/timemachine.txt...
###Markdown
[**Defining the Model**]PyTorch APIs provide implementations of recurrent neural networks.We construct the recurrent neural network layer `rnn_layer` with a single hidden layer and 256 hidden units.
###Code
num_hiddens = 256
rnn_layer = nn.RNN(len(vocab), num_hiddens)
###Output
_____no_output_____
###Markdown
We (**use a tensor to initialize the hidden state**),whose shape is(number of hidden layers, batch size, number of hidden units).
###Code
state = torch.zeros((1, batch_size, num_hiddens))
state.shape
###Output
_____no_output_____
###Markdown
[**With a hidden state and an input,we can compute the output withthe updated hidden state.**]Here, it should be emphasized thatthe "output" (`Y`) of `rnn_layer`does *not* involve computation of output layers:it refers tothe hidden state at *each* time step,and they can be used as the inputto the subsequent output layer.
###Code
X = torch.rand(size=(num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
Y.shape, state_new.shape
###Output
_____no_output_____
###Markdown
Similarly,[**we define an `RNNModel` classfor a complete RNN model.**]Note that `rnn_layer` only contains the hidden recurrent layers, we need to create a separate output layer.
###Code
class RNNModel(nn.Module):
"""The RNN model."""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# If the RNN is bidirectional (to be introduced later),
# `num_directions` should be 2, else it should be 1.
if not self.rnn.bidirectional:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T.long(), self.vocab_size)
X = X.to(torch.float32)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of `Y` to
# (`num_steps` * `batch_size`, `num_hiddens`). Its output shape is
# (`num_steps` * `batch_size`, `vocab_size`).
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, device, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# `nn.GRU` takes a tensor as hidden state
return torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device)
else:
# `nn.LSTM` takes a tuple of hidden states
return (torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device),
torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device))
###Output
_____no_output_____
###Markdown
Training and PredictingBefore training the model, let us [**make a prediction with the a model that has random weights.**]
###Code
device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
net = net.to(device)
d2l.predict_ch8('time traveller', 10, net, vocab, device)
###Output
_____no_output_____
###Markdown
As is quite obvious, this model does not work at all. Next, we call `train_ch8` with the same hyperparameters defined before and [**train our model with PyTorch APIs**].
###Code
num_epochs, lr = 500, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
###Output
perplexity 1.4, 107950.9 tokens/sec on cuda:0
time travellerit mounter onead to asc alt the bery ix arovur foo
traveller amtter i beer we briggt so mentas ig the three di
|
coarsegrainer/examples/tutorial.ipynb | ###Markdown
RSMI optimisation for the dimer model Download sample dataset
###Code
url = 'https://polybox.ethz.ch/index.php/s/bUp9a5qZWuLGXMb/download'
filename = 'configs_intdimer2d_square_L64_T15.000.npy'
data_dir = os.path.join(os.pardir, 'data')
if os.path.isfile(os.path.join(data_dir, filename)):
print('Existing dataset found.')
else:
print('No existing dataset found.')
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
print('Created the data directory.')
print('Downloading data...')
r = requests.get(url, allow_redirects=True)
open(os.path.join(data_dir, filename), 'wb').write(r.content)
print('Data downloaded into /coarsegrainer/data/.')
###Output
Existing dataset found.
###Markdown
Enter system parameters
###Code
data_params = {
'model': 'intdimer2d',
'lattice_type': 'square',
'L': 64,
'T': 15.000,
'N_samples': 28800,
'dimension': 2,
}
generator=ds.dataset(**data_params)
print(data_params)
###Output
Existing data found.
Loading the data...
Loading complete.
{'model': 'intdimer2d', 'lattice_type': 'square', 'L': 64, 'T': 15.0, 'N_samples': 28800, 'dimension': 2}
###Markdown
Enter optimisation parameters
###Code
CG_params = {'init_temperature': 0.75,
'min_temperature': 0.1,
'relaxation_rate': 0.0001, # was 0.0005 for above BKT 0.002 for below BKT
'Nq': None,
'conv_activation': None,
'num_hiddens': 2,
'h_embed': True,
'use_probs': False,
'use_logits': True
}
ll = CG_params['ll'] = (8,8)
critic_params = {
'layers': 2,
'embed_dim': 8,
'hidden_dim': 32,
'activation': 'relu',
}
opt_params = {
"batch_size": 500, # was 800 for larger buffers
"iterations": 250, # was 400 for above BKT, 25 for below BKT
"shuffle": 100000,
"learning_rate": 9e-3 # was 4e-3
}
index = (10, 10) # index of the visible patch to be coarse-grained
buffer_size = 4
env_size = 4
V, E = generator.rsmi_data(index, ll, buffer_size=buffer_size, cap=ll[0]+2*(buffer_size+env_size))
###Output
Preparing the RSMI dataset...
RSMI dataset prepared.
###Markdown
Perform the optimisation and plot results
###Code
estimates, _, filters, _ = cg_opt.train_RSMI_optimiser(CG_params, critic_params, opt_params, data_params,
E=E, V=V)
print('RSMI estimate is ', cg_opt.RSMI_estimate(estimates, ema_span=100))
plotter.plot_fancy_rsmimax(estimates, filters, opt_params, CG_params,
generator, N_samples=data_params['N_samples'],
mi_bound='InfoNCE', filter_lim=1.3, EMA_span=80, save=False,
series_skip=data_params['N_samples']//(opt_params['batch_size']*4)*opt_params['iterations'],
interpolation='hanning', cmap='RdBu')
print(CG_params)
print(critic_params)
print(opt_params)
###Output
RSMI estimate is 0.2990450126009397
|
04_user_guide/26_reshaping_pivot.ipynb | ###Markdown
Reshaping and pivot tables
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Reshaping by pivoting DataFrame objects For the curious here is how the above `DataFrame` was created:
###Code
import pandas._testing as tm
def unpivot(frame):
N, K = frame.shape
data = {
"value": frame.to_numpy().ravel("F"),
"variable": np.asarray(frame.columns).repeat(N),
"date": np.tile(np.asarray(frame.index), K),
}
columns = ["date", "variable", "value"]
return pd.DataFrame(data, columns=columns)
df = unpivot(tm.makeTimeDataFrame(3))
###Output
_____no_output_____
###Markdown
Data is often stored in so-called “stacked” or “record” format:
###Code
df
###Output
_____no_output_____
###Markdown
To select out everything for variable `A` we could do:
###Code
df[df["variable"] == "A"]
###Output
_____no_output_____
###Markdown
But suppose we wish to do time series operations with the variables. A betterrepresentation would be where the `columns` are the unique variables and an`index` of dates identifies individual observations. To reshape the data intothis form, we use the `DataFrame.pivot()` method (also implemented as atop level function `pivot()`):
###Code
df.pivot(index="date", columns="variable", values="value")
###Output
_____no_output_____
###Markdown
If the `values` argument is omitted, and the input `DataFrame` has more thanone column of values which are not used as column or index inputs to `pivot`,then the resulting “pivoted” `DataFrame` will have hierarchical columns whose topmost level indicates the respective valuecolumn:
###Code
df["value2"] = df["value"] * 2
pivoted = df.pivot(index="date", columns="variable")
pivoted
###Output
_____no_output_____
###Markdown
You can then select subsets from the pivoted `DataFrame`:
###Code
pivoted["value2"]
###Output
_____no_output_____
###Markdown
Note that this returns a view on the underlying data in the case where the dataare homogeneously-typed.>**Note**>>`pivot()` will error with a `ValueError: Index contains duplicateentries, cannot reshape` if the index/column pair is not unique. In thiscase, consider using `pivot_table()` which is a generalizationof pivot that can handle duplicate values for one index/column pair. Reshaping by stacking and unstackingClosely related to the `pivot()` method are the related`stack()` and `unstack()` methods available on`Series` and `DataFrame`. These methods are designed to work together with`MultiIndex` objects (see the section on hierarchical indexing). Here are essentially what these methods do:- `stack`: “pivot” a level of the (possibly hierarchical) column labels, returning a `DataFrame` with an index with a new inner-most level of row labels. - `unstack`: (inverse operation of `stack`) “pivot” a level of the (possibly hierarchical) row index to the column axis, producing a reshaped `DataFrame` with a new inner-most level of column labels. The clearest way to explain is by example. Let’s take a prior example data setfrom the hierarchical indexing section:
###Code
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=["A", "B"])
df2 = df[:4]
df2
###Output
_____no_output_____
###Markdown
The `stack` function “compresses” a level in the `DataFrame`’s columns toproduce either:- A `Series`, in the case of a simple column Index. - A `DataFrame`, in the case of a `MultiIndex` in the columns. If the columns have a `MultiIndex`, you can choose which level to stack. Thestacked level becomes the new lowest level in a `MultiIndex` on the columns:
###Code
stacked = df2.stack()
stacked
###Output
_____no_output_____
###Markdown
With a “stacked” `DataFrame` or `Series` (having a `MultiIndex` as the`index`), the inverse operation of `stack` is `unstack`, which by defaultunstacks the **last level**:
###Code
stacked.unstack()
stacked.unstack(1)
stacked.unstack(0)
###Output
_____no_output_____
###Markdown
If the indexes have names, you can use the level names instead of specifyingthe level numbers:
###Code
stacked.unstack("second")
###Output
_____no_output_____
###Markdown
Notice that the `stack` and `unstack` methods implicitly sort the indexlevels involved. Hence a call to `stack` and then `unstack`, or vice versa,will result in a **sorted** copy of the original `DataFrame` or `Series`:
###Code
index = pd.MultiIndex.from_product([[2, 1], ["a", "b"]])
df = pd.DataFrame(np.random.randn(4), index=index, columns=["A"])
df
all(df.unstack().stack() == df.sort_index())
###Output
_____no_output_____
###Markdown
The above code will raise a `TypeError` if the call to `sort_index` isremoved. Multiple levelsYou may also stack or unstack more than one level at a time by passing a listof levels, in which case the end result is as if each level in the list wereprocessed individually.
###Code
columns = pd.MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = pd.DataFrame(np.random.randn(4, 4), columns=columns)
df
df.stack(level=["animal", "hair_length"])
###Output
_____no_output_____
###Markdown
The list of levels can contain either level names or level numbers (butnot a mixture of the two).
###Code
# df.stack(level=['animal', 'hair_length'])
# from above is equivalent to:
df.stack(level=[1])
df.stack(level=[2])
df.stack(level=[1,2])
###Output
_____no_output_____
###Markdown
Missing dataThese functions are intelligent about handling missing data and do not expecteach subgroup within the hierarchical index to have the same set of labels.They also can handle the index being unsorted (but you can make it sorted bycalling `sort_index`, of course). Here is a more complex example:
###Code
columns = pd.MultiIndex.from_tuples(
[
("A", "cat"),
("B", "dog"),
("B", "cat"),
("A", "dog"),
],
names=["exp", "animal"],
)
index = pd.MultiIndex.from_product(
[("bar", "baz", "foo", "qux"), ("one", "two")], names=["first", "second"]
)
df = pd.DataFrame(np.random.randn(8, 4), index=index, columns=columns)
df2 = df.iloc[[0, 1, 2, 4, 5, 7]]
df2
###Output
_____no_output_____
###Markdown
As mentioned above, `stack` can be called with a `level` argument to selectwhich level in the columns to stack:
###Code
df2.stack("exp")
df2.stack("animal")
###Output
_____no_output_____
###Markdown
Unstacking can result in missing values if subgroups do not have the sameset of labels. By default, missing values will be replaced with the defaultfill value for that data type, `NaN` for float, `NaT` for datetimelike,etc. For integer types, by default data will converted to float and missingvalues will be set to `NaN`.
###Code
df3 = df.iloc[[0, 1, 4, 7], [1, 2]]
df3
df3.unstack()
###Output
_____no_output_____
###Markdown
Alternatively, unstack takes an optional `fill_value` argument, for specifyingthe value of missing data.
###Code
df3.unstack(fill_value=-1e9)
###Output
_____no_output_____
###Markdown
With a MultiIndexUnstacking when the columns are a `MultiIndex` is also careful about doingthe right thing:
###Code
df[:3].unstack(0)
df2.unstack(1)
###Output
_____no_output_____
###Markdown
Reshaping by meltThe top-level `melt()` function and the corresponding `DataFrame.melt()`are useful to massage a `DataFrame` into a format where one or more columnsare *identifier variables*, while all other columns, considered *measuredvariables*, are “unpivoted” to the row axis, leaving just two non-identifiercolumns, “variable” and “value”. The names of those columns can be customizedby supplying the `var_name` and `value_name` parameters.For instance,
###Code
cheese = pd.DataFrame(
{
"first": ["John", "Mary"],
"last": ["Doe", "Bo"],
"height": [5.5, 6.0],
"weight": [130, 150],
}
)
cheese
cheese.melt(id_vars=["first", "last"])
cheese.melt(id_vars=["first", "last"], var_name="quantity")
###Output
_____no_output_____
###Markdown
When transforming a DataFrame using `melt()`, the index will be ignored. The original index values can be kept around by setting the `ignore_index` parameter to `False` (default is `True`). This will however duplicate them.New in version 1.1.0.
###Code
index = pd.MultiIndex.from_tuples([("person", "A"), ("person", "B")])
cheese = pd.DataFrame(
{
"first": ["John", "Mary"],
"last": ["Doe", "Bo"],
"height": [5.5, 6.0],
"weight": [130, 150],
},
index=index,
)
cheese
cheese.melt(id_vars=["first", "last"])
cheese.melt(id_vars=["first", "last"], ignore_index=False)
###Output
_____no_output_____
###Markdown
Another way to transform is to use the `wide_to_long()` panel dataconvenience function. It is less flexible than `melt()`, but moreuser-friendly.
###Code
dft = pd.DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), np.random.randn(3))),
}
)
dft["id"] = dft.index
dft
pd.wide_to_long(dft, ["A", "B"], i="id", j="year")
###Output
_____no_output_____
###Markdown
Combining with stats and GroupByIt should be no shock that combining `pivot` / `stack` / `unstack` withGroupBy and the basic Series and DataFrame statistical functions can producesome very expressive and fast data manipulations.
###Code
df
df.stack().mean(1).unstack()
# same result, another way
df.groupby(level=1, axis=1).mean()
df.stack().groupby(level=1).mean()
df.mean().unstack(0)
###Output
_____no_output_____
###Markdown
Pivot tablesWhile `pivot()` provides general purpose pivoting with variousdata types (strings, numerics, etc.), pandas also provides `pivot_table()`for pivoting with aggregation of numeric data.The function `pivot_table()` can be used to create spreadsheet-stylepivot tables. See the [cookbook](44_cookbook.ipynbcookbook-pivot) for some advancedstrategies.It takes a number of arguments:- `data`: a DataFrame object. - `values`: a column or a list of columns to aggregate. - `index`: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. - `columns`: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. - `aggfunc`: function to use for aggregation, defaulting to `numpy.mean`. Consider a data set like this:
###Code
import datetime
df = pd.DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": np.random.randn(24),
"E": np.random.randn(24),
"F": [datetime.datetime(2013, i, 1) for i in range(1, 13)]
+ [datetime.datetime(2013, i, 15) for i in range(1, 13)],
}
)
df
###Output
_____no_output_____
###Markdown
We can produce pivot tables from this data very easily:
###Code
pd.pivot_table(df, values="D", index=["A", "B"], columns=["C"])
pd.pivot_table(df, values="D", index=["B"], columns=["A", "C"], aggfunc=np.sum)
pd.pivot_table(
df, values=["D", "E"],
index=["B"],
columns=["A", "C"],
aggfunc=np.sum,
)
###Output
_____no_output_____
###Markdown
The result object is a `DataFrame` having potentially hierarchical indexes on therows and columns. If the `values` column name is not given, the pivot tablewill include all of the data that can be aggregated in an additional level ofhierarchy in the columns:
###Code
pd.pivot_table(df, index=["A", "B"], columns=["C"])
###Output
_____no_output_____
###Markdown
Also, you can use `Grouper` for `index` and `columns` keywords. For detail of `Grouper`, see [Grouping with a Grouper specification](36_groupby.ipynbgroupby-specify).
###Code
pd.pivot_table(df, values="D", index=pd.Grouper(freq="M", key="F"), columns="C")
###Output
_____no_output_____
###Markdown
You can render a nice output of the table omitting the missing values bycalling `to_string` if you wish:
###Code
table = pd.pivot_table(df, index=["A", "B"], columns=["C"])
print(table.to_string(na_rep=""))
###Output
_____no_output_____
###Markdown
Note that `pivot_table` is also available as an instance method on DataFrame,i.e. `DataFrame.pivot_table()`. Adding marginsIf you pass `margins=True` to `pivot_table`, special `All` columns androws will be added with partial group aggregates across the categories on therows and columns:
###Code
df.pivot_table(index=["A", "B"], columns="C", margins=True, aggfunc=np.std)
###Output
_____no_output_____
###Markdown
Cross tabulationsUse `crosstab()` to compute a cross-tabulation of two (or more)factors. By default `crosstab` computes a frequency table of the factorsunless an array of values and an aggregation function are passed.It takes a number of arguments- `index`: array-like, values to group by in the rows. - `columns`: array-like, values to group by in the columns. - `values`: array-like, optional, array of values to aggregate according to the factors. - `aggfunc`: function, optional, If no values array is passed, computes a frequency table. - `rownames`: sequence, default `None`, must match number of row arrays passed. - `colnames`: sequence, default `None`, if passed, must match number of column arrays passed. - `margins`: boolean, default `False`, Add row/column margins (subtotals) - `normalize`: boolean, {‘all’, ‘index’, ‘columns’}, or {0,1}, default `False`. Normalize by dividing all values by the sum of values. Any `Series` passed will have their name attributes used unless row or columnnames for the cross-tabulation are specifiedFor example:
###Code
foo, bar, dull, shiny, one, two = "foo", "bar", "dull", "shiny", "one", "two"
a = np.array([foo, foo, bar, bar, foo, foo], dtype=object)
b = np.array([one, one, two, one, two, one], dtype=object)
c = np.array([dull, dull, shiny, dull, dull, shiny], dtype=object)
pd.crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"])
###Output
_____no_output_____
###Markdown
If `crosstab` receives only two Series, it will provide a frequency table.
###Code
df = pd.DataFrame(
{"A": [1, 2, 2, 2, 2], "B": [3, 3, 4, 4, 4], "C": [1, 1, np.nan, 1, 1]}
)
df
pd.crosstab(df["A"], df["B"])
###Output
_____no_output_____
###Markdown
`crosstab` can also be implementedto `Categorical` data.
###Code
foo = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
bar = pd.Categorical(["d", "e"], categories=["d", "e", "f"])
pd.crosstab(foo, bar)
###Output
_____no_output_____
###Markdown
If you want to include **all** of data categories even if the actual data doesnot contain any instances of a particular category, you should set `dropna=False`.For example:
###Code
pd.crosstab(foo, bar, dropna=False)
###Output
_____no_output_____
###Markdown
NormalizationFrequency tables can also be normalized to show percentages rather than countsusing the `normalize` argument:
###Code
pd.crosstab(df["A"], df["B"], normalize=True)
###Output
_____no_output_____
###Markdown
`normalize` can also normalize values within each row or within each column:
###Code
pd.crosstab(df["A"], df["B"], normalize="columns")
###Output
_____no_output_____
###Markdown
`crosstab` can also be passed a third `Series` and an aggregation function(`aggfunc`) that will be applied to the values of the third `Series` withineach group defined by the first two `Series`:
###Code
pd.crosstab(df["A"], df["B"], values=df["C"], aggfunc=np.sum)
###Output
_____no_output_____
###Markdown
Adding marginsFinally, one can also add margins or normalize this output.
###Code
pd.crosstab(
df["A"], df["B"], values=df["C"], aggfunc=np.sum, normalize=True, margins=True
)
###Output
_____no_output_____
###Markdown
TilingThe `cut()` function computes groupings for the values of the inputarray and is often used to transform continuous variables to discrete orcategorical variables:
###Code
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
pd.cut(ages, bins=3)
###Output
_____no_output_____
###Markdown
If the `bins` keyword is an integer, then equal-width bins are formed.Alternatively we can specify custom bin-edges:
###Code
c = pd.cut(ages, bins=[0, 18, 35, 70])
c
###Output
_____no_output_____
###Markdown
If the `bins` keyword is an `IntervalIndex`, then these will beused to bin the passed data.:
###Code
pd.cut([25, 20, 50], bins=c.categories)
###Output
_____no_output_____
###Markdown
Computing indicator / dummy variablesTo convert a categorical variable into a “dummy” or “indicator” `DataFrame`,for example a column in a `DataFrame` (a `Series`) which has `k` distinctvalues, can derive a `DataFrame` containing `k` columns of 1s and 0s using`get_dummies()`:
###Code
df = pd.DataFrame({"key": list("bbacab"), "data1": range(6)})
df
pd.get_dummies(df["key"])
###Output
_____no_output_____
###Markdown
Sometimes it’s useful to prefix the column names, for example when merging the resultwith the original `DataFrame`:
###Code
dummies = pd.get_dummies(df["key"], prefix="key")
dummies
df[["data1"]].join(dummies)
###Output
_____no_output_____
###Markdown
This function is often used along with discretization functions like `cut`:
###Code
values = np.random.randn(10)
values
bins = [0, 0.2, 0.4, 0.6, 0.8, 1]
pd.get_dummies(pd.cut(values, bins))
###Output
_____no_output_____
###Markdown
See also `Series.str.get_dummies`.`get_dummies()` also accepts a `DataFrame`. By default all categoricalvariables (categorical in the statistical sense, those with `object` or`categorical` dtype) are encoded as dummy variables.
###Code
df = pd.DataFrame({"A": ["a", "b", "a"], "B": ["c", "c", "b"], "C": [1, 2, 3]})
pd.get_dummies(df)
###Output
_____no_output_____
###Markdown
All non-object columns are included untouched in the output. You can controlthe columns that are encoded with the `columns` keyword.
###Code
pd.get_dummies(df, columns=["A"])
###Output
_____no_output_____
###Markdown
Notice that the `B` column is still included in the output, it just hasn’tbeen encoded. You can drop `B` before calling `get_dummies` if you don’twant to include it in the output.As with the `Series` version, you can pass values for the `prefix` and`prefix_sep`. By default the column name is used as the prefix, and ‘_’ asthe prefix separator. You can specify `prefix` and `prefix_sep` in 3 ways:- string: Use the same value for `prefix` or `prefix_sep` for each column to be encoded. - list: Must be the same length as the number of columns being encoded. - dict: Mapping column name to prefix.
###Code
simple = pd.get_dummies(df, prefix="new_prefix")
simple
from_list = pd.get_dummies(df, prefix=["from_A", "from_B"])
from_list
from_dict = pd.get_dummies(df, prefix={"B": "from_B", "A": "from_A"})
from_dict
###Output
_____no_output_____
###Markdown
Sometimes it will be useful to only keep k-1 levels of a categoricalvariable to avoid collinearity when feeding the result to statistical models.You can switch to this mode by turn on `drop_first`.
###Code
s = pd.Series(list("abcaa"))
s
pd.get_dummies(s)
pd.get_dummies(s, drop_first=True)
###Output
_____no_output_____
###Markdown
When a column contains only one level, it will be omitted in the result.
###Code
df = pd.DataFrame({"A": list("aaaaa"), "B": list("ababc")})
pd.get_dummies(df)
pd.get_dummies(df, drop_first=True)
###Output
_____no_output_____
###Markdown
By default new columns will have `np.uint8` dtype.To choose another dtype, use the `dtype` argument:
###Code
df = pd.DataFrame({"A": list("abc"), "B": [1.1, 2.2, 3.3]})
pd.get_dummies(df, dtype=bool).dtypes
###Output
_____no_output_____
###Markdown
Factorizing valuesTo encode 1-d values as an enumerated type use `factorize()`:
###Code
x = pd.Series(["A", "A", np.nan, "B", 3.14, np.inf])
x
labels, uniques = pd.factorize(x)
labels
uniques
###Output
_____no_output_____
###Markdown
Note that `factorize` is similar to `numpy.unique`, but differs in itshandling of NaN:>**Note**>>The following `numpy.unique` will fail under Python 3 with a `TypeError`because of an ordering bug. See also[here](https://github.com/numpy/numpy/issues/641). ```pythonIn [1]: x = pd.Series(['A', 'A', np.nan, 'B', 3.14, np.inf])In [2]: pd.factorize(x, sort=True)Out[2]:(array([ 2, 2, -1, 3, 0, 1]), Index([3.14, inf, 'A', 'B'], dtype='object'))In [3]: np.unique(x, return_inverse=True)[::-1]Out[3]: (array([3, 3, 0, 4, 1, 2]), array([nan, 3.14, inf, 'A', 'B'], dtype=object))``` >**Note**>>If you just want to handle one column as a categorical variable (like R’s factor),you can use `df["cat_col"] = pd.Categorical(df["col"])` or`df["cat_col"] = df["col"].astype("category")`. For full docs on `Categorical`,see the Categorical introduction and the[API documentation](../05_reference/14_Pandas_Arrays.ipynbapi-arrays-categorical). ExamplesIn this section, we will review frequently asked questions and examples. Thecolumn names and relevant column values are named to correspond with how thisDataFrame will be pivoted in the answers below.
###Code
np.random.seed([3, 1415])
n = 20
cols = np.array(["key", "row", "item", "col"])
df = cols + pd.DataFrame(
(np.random.randint(5, size=(n, 4)) // [2, 1, 2, 1]).astype(str)
)
df.columns = cols
df = df.join(pd.DataFrame(np.random.rand(n, 2).round(2)).add_prefix("val"))
df
###Output
_____no_output_____
###Markdown
Pivoting with single aggregationsSuppose we wanted to pivot `df` such that the `col` values are columns,`row` values are the index, and the mean of `val0` are the values? Inparticular, the resulting DataFrame should look like: ```textcol col0 col1 col2 col3 col4rowrow0 0.77 0.605 NaN 0.860 0.65row2 0.13 NaN 0.395 0.500 0.25row3 NaN 0.310 NaN 0.545 NaNrow4 NaN 0.100 0.395 0.760 0.24``` This solution uses `pivot_table()`. Also note that`aggfunc='mean'` is the default. It is included here to be explicit.
###Code
df.pivot_table(values="val0", index="row", columns="col", aggfunc="mean")
###Output
_____no_output_____
###Markdown
Note that we can also replace the missing values by using the `fill_value`parameter.
###Code
df.pivot_table(
values="val0",
index="row",
columns="col",
aggfunc="mean",
fill_value=0,
)
###Output
_____no_output_____
###Markdown
Also note that we can pass in other aggregation functions as well. For example,we can also pass in `sum`.
###Code
df.pivot_table(
values="val0",
index="row",
columns="col",
aggfunc="sum",
fill_value=0,
)
###Output
_____no_output_____
###Markdown
Another aggregation we can do is calculate the frequency in which the columnsand rows occur together a.k.a. “cross tabulation”. To do this, we can pass`size` to the `aggfunc` parameter.
###Code
df.pivot_table(index="row", columns="col", fill_value=0, aggfunc="size")
###Output
_____no_output_____
###Markdown
Pivoting with multiple aggregationsWe can also perform multiple aggregations. For example, to perform both a`sum` and `mean`, we can pass in a list to the `aggfunc` argument.
###Code
df.pivot_table(
values="val0",
index="row",
columns="col",
aggfunc=["mean", "sum"],
)
###Output
_____no_output_____
###Markdown
Note to aggregate over multiple value columns, we can pass in a list to the`values` parameter.
###Code
df.pivot_table(
values=["val0", "val1"],
index="row",
columns="col",
aggfunc=["mean"],
)
###Output
_____no_output_____
###Markdown
Note to subdivide over multiple columns we can pass in a list to the`columns` parameter.
###Code
df.pivot_table(
values=["val0"],
index="row",
columns=["item", "col"],
aggfunc=["mean"],
)
###Output
_____no_output_____
###Markdown
Exploding a list-like columnNew in version 0.25.0.Sometimes the values in a column are list-like.
###Code
keys = ["panda1", "panda2", "panda3"]
values = [["eats", "shoots"], ["shoots", "leaves"], ["eats", "leaves"]]
df = pd.DataFrame({"keys": keys, "values": values})
df
###Output
_____no_output_____
###Markdown
We can ‘explode’ the `values` column, transforming each list-like to a separate row, by using `explode()`. This will replicate the index values from the original row:
###Code
df["values"].explode()
###Output
_____no_output_____
###Markdown
You can also explode the column in the `DataFrame`.
###Code
df.explode("values")
###Output
_____no_output_____
###Markdown
`Series.explode()` will replace empty lists with `np.nan` and preserve scalar entries. The dtype of the resulting `Series` is always `object`.
###Code
s = pd.Series([[1, 2, 3], "foo", [], ["a", "b"]])
s
s.explode()
###Output
_____no_output_____
###Markdown
Here is a typical usecase. You have comma separated strings in a column and want to expand this.
###Code
df = pd.DataFrame([{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}])
df
###Output
_____no_output_____
###Markdown
Creating a long form DataFrame is now straightforward using explode and chained operations
###Code
df.assign(var1=df.var1.str.split(",")).explode("var1")
###Output
_____no_output_____ |
classification/k_nearest_neighbour/knn_classifier.ipynb | ###Markdown
k-Nearest Neighbors ClassifierIn this notebook, you will implement your own k-nearest neighbors (k-NN) algorithm for the classification problem. You are supposed to learn:* How to prepare the dataset for "training" and testing of the model.* How to implement k-nearest neighbors classification algorithm.* How to evaluate the performance of your classifier.**Instructions:*** Read carefuly through this notebook. Be sure you understand what is provided to you, and what is required from you.* Place your code only in sections annotated with ` START CODE HERE ` and ` END CODE HERE `.* Use comments whenever the code is not self-explanatory.* Submit an executable notebook (`*.ipynb`) with your solution to BlackBoard.Enjoy :-) PackagesFollowing packages is all you need. Do not import any additional packages!* [Pandas](https://pandas.pydata.org/) is a library providing easy-to-use data structures and data analysis tools.* [Numpy](http://www.numpy.org/) library provides support for large multi-dimensional arrays and matrices, along with functions to operate on these.
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
ProblemYou are given a dataset `mushrooms.csv` with characteristics/attributes of mushrooms, and your task is to implement and evaluate a k-nearest neighbors classifier able to say whether a mushroom is poisonous or edible based on its attributes. DatasetThe dataset of mushroom characteristics is freely available at [Kaggle Datasets](https://www.kaggle.com/uciml/mushroom-classification) where you can find further information about the dataset. It consists of 8124 mushrooms characterized by 23 attributes (including the class). Following is the overview of attributes and values:* class: edible=e, poisonous=p* cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s* cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s* cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r,pink=p,purple=u,red=e,white=w,yellow=y* bruises: bruises=t,no=f* odor: almond=a,anise=l,creosote=c,fishy=y,foul=f,musty=m,none=n,pungent=p,spicy=s* gill-attachment: attached=a,descending=d,free=f,notched=n* gill-spacing: close=c,crowded=w,distant=d* gill-size: broad=b,narrow=n* gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, green=r,orange=o,pink=p,purple=u,red=e,white=w,yellow=y* stalk-shape: enlarging=e,tapering=t* stalk-root: bulbous=b,club=c,cup=u,equal=e,rhizomorphs=z,rooted=r,missing=?* stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s* stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s* stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y* stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o,pink=p,red=e,white=w,yellow=y* veil-type: partial=p,universal=u* veil-color: brown=n,orange=o,white=w,yellow=y* ring-number: none=n,one=o,two=t* ring-type: cobwebby=c,evanescent=e,flaring=f,large=l,none=n,pendant=p,sheathing=s,zone=z* spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r,orange=o,purple=u,white=w,yellow=y* population: abundant=a,clustered=c,numerous=n,scattered=s,several=v,solitary=y* habitat: grasses=g,leaves=l,meadows=m,paths=p,urban=u,waste=w,woods=dLet's load the dataset into so called Pandas dataframe.
###Code
mushrooms_df = pd.read_csv('mushrooms.csv')
###Output
_____no_output_____
###Markdown
Now we can take a closer look at the data.
###Code
mushrooms_df
###Output
_____no_output_____
###Markdown
You can also print an overview of all attributes with the counts of unique values.
###Code
mushrooms_df.describe().T
###Output
_____no_output_____
###Markdown
The dataset is pretty much balanced. That's a good news for the evaluation. Dataset PreprocessingAs our dataset consist of nominal/categorical values only, we will encode the strings into integers which will allow us to use similiraty measures such as Euclidean distance.
###Code
def encode_labels(df):
import sklearn.preprocessing
encoder = {}
for col in df.columns:
le = sklearn.preprocessing.LabelEncoder()
le.fit(df[col])
df[col] = le.transform(df[col])
encoder[col] = le
return df, encoder
mushrooms_encoded_df, encoder = encode_labels(mushrooms_df)
mushrooms_encoded_df
###Output
_____no_output_____
###Markdown
Dataset SplittingBefore we start with the implementation of our k-nearest neighbors algorithm we need to prepare our dataset for the "training" and testing.First, we divide the dataset into attributes (often called features) and classes (often called targets). Keeping attributes and classes separately is a common practice in many implementations. This should simplify the implementation and make the code understandable.
###Code
X_df = mushrooms_encoded_df.drop('class', axis=1) # attributes
y_df = mushrooms_encoded_df['class'] # classes
X_array = X_df.to_numpy()
y_array = y_df.to_numpy()
# X_array = X_df.as_matrix()
# y_array = y_df.as_matrix()
###Output
_____no_output_____
###Markdown
And this is how it looks like.
###Code
print('X =', X_array)
print('len(X) =', len(X_array))
print('y =', y_array)
print('len(y) =', len(y_array))
###Output
X = [[5 2 4 ... 2 3 5]
[5 2 9 ... 3 2 1]
[0 2 8 ... 3 2 3]
...
[2 2 4 ... 0 1 2]
[3 3 4 ... 7 4 2]
[5 2 4 ... 4 1 2]]
len(X) = 8124
y = [1 0 0 ... 0 1 0]
len(y) = 8124
###Markdown
Next, we need to split the attributes and classes into training sets and test sets.**Exercise:**Implement the holdout splitting method with shuffling.
###Code
def train_test_split(X, y, test_size=0.2):
"""
Shuffles the dataset and splits it into training and test sets.
:param X
attributes
:param y
classes
:param test_size
float between 0.0 and 1.0 representing the proportion of the dataset to include in the test split
:return
train-test splits (X-train, X-test, y-train, y-test)
"""
### START CODE HERE ###
from math import ceil
if (test_size < 0 or test_size > 1):
raise ValueError("Test size must be a value between 0 and 1")
return X[int(ceil(test_size * len(X))):], y[0: int(ceil(test_size * len(y)))], X[int(ceil(test_size * len(X))):], y[0: int(ceil(test_size * len(y)))]
# train_index, test_index = 0, 0
# for i in range(len(X)):
# if (i > len(X) * test_size):
# X_train[train_index] = X[i]
# y_train[train_index] = y[i]
# train_index += 1
# else:
# X_test[test_index] = X[i]
# y_test[test_index] = y[i]
# test_index += 1
# print(len(X_test), len(y_test))
### END CODE HERE ###
###Output
_____no_output_____
###Markdown
Let's split the dataset into training and validation/test set with 67:33 split.
###Code
X_train, X_test, y_train, y_test = train_test_split(X_array, y_array, 0.33)
print('X_train =', X_train)
print('y_train =', y_train)
print('X_test =', X_test)
print('y_test =', y_test)
###Output
_____no_output_____
###Markdown
A quick sanity check...
###Code
assert len(X_train) == len(y_train)
assert len(y_train) == 5443
assert len(X_test) == len(y_test)
assert len(y_test) == 2681
###Output
_____no_output_____
###Markdown
AlgorithmThe k-nearest neighbors algorithm doesn't require a training step. The class of an unseen sample is deduced by comparison with samples of known class.**Exercise:**Implement the k-nearest neighbors algorithm.
###Code
# Use this section to place any "helper" code for the `knn()` function.
### START CODE HERE ###
def distance(a, b):
return None
### END CODE HERE ###
def knn(X_true, y_true, X_pred, k=5):
"""
k-nearest neighbors classifier.
:param X_true
attributes of the groung truth (training set)
:param y_true
classes of the groung truth (training set)
:param X_pred
attributes of samples to be classified
:param k
number of neighbors to use
:return
predicted classes
"""
### START CODE HERE ###
m = 100
for i in range(m):
print("something")
y_pred = None
### END CODE HERE ###
return y_pred
y_hat = knn(X_train, y_train, X_test, k=5)
###Output
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
something
###Markdown
First ten predictions of the test set.
###Code
y_hat[:10]
###Output
_____no_output_____
###Markdown
EvaluationNow we would like to assess how well our classifier performs.**Exercise:**Implement a function for calculating the accuracy of your predictions given the ground truth and predictions.
###Code
def evaluate(y_true, y_pred):
"""
Function calculating the accuracy of the model on the given data.
:param y_true
true classes
:paaram y
predicted classes
:return
accuracy
"""
### START CODE HERE ###
### END CODE HERE ###
return accuracy
accuracy = evaluate(y_test, y_hat)
print('accuracy =', accuracy)
###Output
_____no_output_____
###Markdown
How many items where misclassified?
###Code
print('misclassified =', sum(abs(y_hat - y_test)))
###Output
_____no_output_____
###Markdown
How balanced is our test set?
###Code
np.bincount(y_test)
###Output
_____no_output_____ |
quantopian/lectures/Confidence_Intervals/notebook.ipynb | ###Markdown
Tutorial: Confidence IntervalsBy Delaney Granizo-Mackenzie, Jeremiah Johnson, and Gideon WulfsohnPart of the Quantopian Lecture Series:http://www.quantopian.com/lectureshttp://github.com/quantopian/research_publicNotebook released under the Creative Commons Attribution 4.0 License. Sample Mean vs. Population MeanSample means and population means are different. Generally, we want to know about a population mean, but we can only calculate a sample mean. We then want to use the sample mean to estimate the population mean. We use confidence intervals in an attempt to determine how accurately our sample mean estimates the population mean. Confidence IntervalIf I asked you to estimate the average height of a woman in the USA, you might do this by measuring 10 women and estimating that the mean of that sample was close to the population. Let's try that.
###Code
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
# We'll set a seed here so our runs are consistent
np.random.seed(10)
# Let's define some 'true' population parameters, we'll pretend we don't know these.
POPULATION_MU = 64
POPULATION_SIGMA = 5
# Generate our sample by drawing from the population distribution
sample_size = 10
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
print heights
mean_height = np.mean(heights)
print 'sample mean: ', mean_height
###Output
[ 70.65793252 67.57639487 56.27299854 63.95808075 67.10667987
60.3995722 65.32755793 64.54274263 64.02145715 63.12699895]
sample mean: 64.2990415407
###Markdown
Unfortunately simply reporting the sample mean doesn't do much for us, as we don't know how it relates to the population mean. To get a sense for how it might relate, we can look for how much variance there is in our sample. Higher variance indicates instability and uncertainty.
###Code
print 'sample standard deviation: ', np.std(heights)
###Output
sample standard deviation: 3.75987801845
###Markdown
This still doesn't do that much for us, to really get a sense of how our sample mean relates to the population mean we need to compute a standard error. The standard error is a measure of the variance of the sample mean. IMPORTANTComputing a standard error involves assuming that the way you sample is unbaised, and that the data are normal and independent. If these conditions are violated, your standard error will be wrong. There are ways of testing for this and correcting.The formula for standard error is.$$SE = \frac{\sigma}{\sqrt{n}}$$Where $\sigma$ is the sample standard deviation and $n$ is the number of samples.
###Code
SE = np.std(heights) / np.sqrt(sample_size)
print 'standard error: ', SE
###Output
standard error: 1.18897782627
###Markdown
There is a function in scipy's stats library for calculating the standard error. Note that this function by default contains a degrees-of-freedom correction that is often not necessary (for large enough samples, it is effectively irrelevant). You can omit the correction by setting the parameter ddof to 0.
###Code
stats.sem(heights, ddof=0)
###Output
_____no_output_____
###Markdown
Assuming our data are normally distributed, we can use the standard error to compute our confidence interval. To do this we first set our desired confidence level, say 95%, we then determine how many standard deviations contain 95% of the mass. Turns out that the 95% of the mass lies between -1.96 and 1.96 on a standard normal distribution. When the samples are large enough (generally > 30 is taken as a threshold) the Central Limit Theorem applies and normality can be safely assumed; if sample sizes are smaller, a safer approach is to use a $t$-distribution with appropriately specified degrees of freedom. The actual way to compute the values is by using a cumulative distribution function (CDF). If you are not familiar with CDFs, inverse CDFs, and their companion PDFs, you can read about them [here](https://en.wikipedia.org/wiki/Probability_density_function) and [here](https://en.wikipedia.org/wiki/Cumulative_distribution_function). Look [here](https://en.wikipedia.org/wiki/Student%27s_t-distribution) for information on the $t$-distribution. We can check the 95% number using one of the Python functions. NOTE: Be careful when applying the Central Limit Theorem, however, as many datasets in finance are fundamentally non-normal and it is not safe to apply the theorem casually or without attention to subtlety.We can visualize the 95% mass bounds here.
###Code
# Set up the x axis
x = np.linspace(-5,5,100)
# Here's the normal distribution
y = stats.norm.pdf(x,0,1)
plt.plot(x,y)
# Plot our bounds
plt.vlines(-1.96, 0, 1, colors='r', linestyles='dashed')
plt.vlines(1.96, 0, 1, colors='r', linestyles='dashed')
# Shade the area
fill_x = np.linspace(-1.96, 1.96, 500)
fill_y = stats.norm.pdf(fill_x, 0, 1)
plt.fill_between(fill_x, fill_y)
plt.xlabel('$\sigma$')
plt.ylabel('Normal PDF');
###Output
_____no_output_____
###Markdown
Here's the trickNow, rather than reporting our sample mean without any sense of the probability of it being correct, we can compute an interval and be much more confident that the population mean lies in that interval. To do this we take our sample mean $\mu$ and report $\left(\mu-1.96 SE , \mu+1.96SE\right)$.This works because assuming normality, that interval will contain the population mean 95% of the time. SUBTLETY:In any given case, the true value of the estimate and the bounds of the confidence interval are fixed. It is incorrect to say that "The national mean female height is between 63 and 65 inches with 95% probability," but unfortunately this is a very common misinterpretation. Rather, the 95% refers instead to the fact that over many computations of a 95% confidence interval, the true value will be in the interval in 95% of the cases (assuming correct calibration of the confidence interval, which we will discuss later). But in fact for a single sample and the single confidence interval computed from it, we have no way of assessing the probability that the interval contains the population mean. The visualization below demonstrates this. In the code block below, there are two things to note. First, although the sample size is sufficiently large to assume normality, we're using a $t$-distribution, just to demonstrate how it is used. Second, the $t$-values needed (analogous to the $\pm1.96$ used above) are being calculated from the inverted cumulative density function, the ppf in scipy.stats. The $t$-distribution requires the extra parameter degrees of freedom (d.o.f), which is the size of the sample minus one.
###Code
np.random.seed(8309)
n = 100 # number of samples to take
samples = [np.random.normal(loc=0, scale=1, size=100) for _ in range(n)]
fig, ax = plt.subplots(figsize=(10, 7))
for i in np.arange(1, n, 1):
sample_mean = np.mean(samples[i]) # calculate sample mean
se = stats.sem(samples[i]) # calculate sample standard error
h = se*stats.t.ppf((1+0.95)/2, len(samples[i])-1) # calculate t; 2nd param is d.o.f.
sample_ci = [sample_mean - h, sample_mean + h]
if ((sample_ci[0] <= 0) and (0 <= sample_ci[1])):
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='blue', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'bo');
else:
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='red', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'ro');
plt.axvline(x=0, ymin=0, ymax=1, linestyle='--', label = 'Population Mean');
plt.legend(loc='best');
plt.title('100 95% Confidence Intervals for mean of 0');
###Output
_____no_output_____
###Markdown
Further ReadingThis is only a brief introduction, Wikipedia has excellent articles detailing these subjects in greater depth. Let's go back to our heights example. Since the sample size is small, we'll use a $t$-test.
###Code
# standard error SE was already calculated
t_val = stats.t.ppf((1+0.95)/2, 9) # d.o.f. = 10 - 1
print 'sample mean height:', mean_height
print 't-value:', t_val
print 'standard error:', SE
print 'confidence interval:', (mean_height - t_val * SE, mean_height + t_val * SE)
###Output
sample mean height: 64.2990415407
t-value: 2.26215716274
standard error: 1.18897782627
confidence interval: (61.609386834663141, 66.988696246744738)
###Markdown
There is a built-in function in scipy.stats for computing the interval. Remember to specify the degrees of freedom.
###Code
print '99% confidence interval:', stats.t.interval(0.99, df=9,
loc=mean_height, scale=SE)
print '95% confidence interval:', stats.t.interval(0.95, df = 9,
loc=mean_height, scale=SE)
print '80% confidence interval:', stats.t.interval(0.8, df = 9,
loc=mean_height, scale=SE)
###Output
99% confidence interval: (60.43505913983995, 68.163023941567928)
95% confidence interval: (61.609386834663141, 66.988696246744738)
80% confidence interval: (62.654651037653949, 65.94343204375393)
###Markdown
Note that as your confidence increases, the interval necessarily widens. Assuming normality, there's also a built in function that will compute our interval for us. This time you don't need to specify the degrees of freedom. Note that at a corresponding level of confidence, the interval calculated using the normal distribution is narrower than the interval calcuated using the $t$-distribution.
###Code
print stats.norm.interval(0.99, loc=mean_height, scale=SE)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
print stats.norm.interval(0.80, loc=mean_height, scale=SE)
###Output
(61.236437614523354, 67.361645466884525)
(61.968687822794635, 66.629395258613243)
(62.775305146047593, 65.822777935360293)
###Markdown
What does this mean?Confidence intervals allow us to set our desired confidence, and then report a range that will likely contain the population mean. The higher our desired confidence, the larger range we report. In general, one can never report a single point value, because the probability that any given point is the true population mean is incredibly small. Let's see how our intervals tighten as we change sample size.
###Code
np.random.seed(10)
sample_sizes = [10, 100, 1000]
for s in sample_sizes:
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, s)
SE = np.std(heights) / np.sqrt(s)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
###Output
(61.968687822794635, 66.629395258613243)
(63.343692029946574, 65.254391051461297)
(64.00593339807287, 64.592149683335009)
###Markdown
Visualizing Confidence IntervalsHere is some code to visualize a confidence interval on a graph. Feel free to play around with it.
###Code
sample_size = 100
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
SE = np.std(heights) / np.sqrt(sample_size)
(l, u) = stats.norm.interval(0.95, loc=np.mean(heights), scale=SE)
print (l, u)
plt.hist(heights, bins=20)
plt.xlabel('Height')
plt.ylabel('Frequency')
# Just for plotting
y_height = 5
plt.plot([l, u], [y_height, y_height], '-', color='r', linewidth=4, label='Confidence Interval')
plt.plot(np.mean(heights), y_height, 'o', color='r', markersize=10);
###Output
(63.588854219913536, 65.573463595434731)
###Markdown
Miscalibration and Violation of AssumptionsThe computation of a standard deviation, standard error, and confidence interval all rely on certain assumptions. If these assumptions are violated then the 95% confidence interval will not necessarily contain the population parameter 95% of the time. We say that in this case the confidence interval is miscalibrated. Here is an example. Example: Autocorrelated DataIf your data generating process is autocorrelated, then estimates of standard deviation will be wrong. This is because autocorrelated processes tend to produce more extreme values than normally distributed processes. This is due to new values being dependent on previous values, series that are already far from the mean are likely to stay far from the mean. To check this we'll generate some autocorrelated data according to the following process.$$X_t = \theta X_{t-1} + \epsilon$$$$\epsilon \sim \mathcal{N}(0,1)$$
###Code
def generate_autocorrelated_data(theta, mu, sigma, N):
# Initialize the array
X = np.zeros((N, 1))
for t in range(1, N):
# X_t = theta * X_{t-1} + epsilon
X[t] = theta * X[t-1] + np.random.normal(mu, sigma)
return X
X = generate_autocorrelated_data(0.5, 0, 1, 100)
plt.plot(X);
plt.xlabel('t');
plt.ylabel('X[t]');
###Output
_____no_output_____
###Markdown
It turns out that for larger sample sizes, you should see the sample mean asymptotically converge to zero. This is because the process is still centered around zero, but let's check if that's true. We'll vary the number of samples drawn, and look for convergence as we increase sample size.
###Code
sample_means = np.zeros(200-1)
for i in range(1, 200):
X = generate_autocorrelated_data(0.5, 0, 1, i * 10)
sample_means[i-1] = np.mean(X)
plt.bar(range(1, 200), sample_means);
plt.xlabel('Sample Size');
plt.ylabel('Sample Mean');
###Output
_____no_output_____
###Markdown
Definitely looks like there's some convergence, we can also check what the mean of the sample means is.
###Code
np.mean(sample_means)
###Output
_____no_output_____
###Markdown
Pretty close to zero. We could also derive symbolically that the mean is zero, but let's assume that we've convinced ourselves with the simple empiral analysis. Now that we know the population mean, we can check the calibration of confidence intervals. First we'll write two helper functions which compute a naive interval for some input data, and check whether the interval contains the true mean, 0.
###Code
def compute_unadjusted_interval(X):
T = len(X)
# Compute mu and sigma MLE
mu = np.mean(X)
sigma = np.std(X)
SE = sigma / np.sqrt(T)
# Compute the bounds
return stats.norm.interval(0.95, loc=mu, scale=SE)
# We'll make a function that returns true when the computed bounds contain 0
def check_unadjusted_coverage(X):
l, u = compute_unadjusted_interval(X)
# Check to make sure l <= 0 <= u
if l <= 0 and u >= 0:
return True
else:
return False
###Output
_____no_output_____
###Markdown
Now we'll run many trials, in each we'll sample some data, compute a confidence interval, and then check if the confidence interval contains the population mean. We'll keep a running tally, and we should expect to see 95% of the trials succeed if the intervals are calibrated correctly.
###Code
T = 100
trials = 500
times_correct = 0
for i in range(trials):
X = generate_autocorrelated_data(0.5, 0, 1, T)
if check_unadjusted_coverage(X):
times_correct += 1
print 'Empirical Coverage: ', times_correct/float(trials)
print 'Expected Coverage: ', 0.95
###Output
Empirical Coverage: 0.732
Expected Coverage: 0.95
|
lessons/ObjectOrientedProgramming/JupyterNotebooks/4.OOP_code_magic_methods/magic_methods.ipynb | ###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
self.mean = sum(self.data)/len(self.data)
return self.mean
pass
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calcaulte_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.xlabel('Data')
plt.ylabel('Count')
plt.title('Histogram of data')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return math.exp(-math.pow((x-self.mean)/self.stdev,2)/2)/(self.stdev * math.sqrt(2* math.pi))
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = math.sqrt(self.stdev **2 + other.stdev ** 2) # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
pass
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.013s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder. Supporting Materials
###Code
# Install packages
!pip install requests
!pip install matplotlib
# Import package
import requests
# Setup scripts
# Download numbers.py
numbers_url = 'https://video.udacity-data.com/topher/2021/April/60784b46_numbers/numbers.txt'
r = requests.get(numbers_url)
# make sure your filename is the same as how you want to import
with open('numbers.txt', 'w') as f:
f.write(r.text)
# Download answer.py
answer_url = 'https://video.udacity-data.com/topher/2021/April/60784b3c_answer/answer.py'
r = requests.get(answer_url)
# make sure your filename is the same as how you want to import
with open('answer.py', 'w') as f:
f.write(r.text)
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
mean = sum(self.data) / len(self.data)
# Return the mean of the data set
self.mean = mean
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# Keep the value of sample in mind for calculating the standard deviation
#
# If sample = True, this means the data is a sample.
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sumdev = 0
for i in self.data:
sumdev += (i - mean)**2
sigma = math.sqrt(sumdev / n)
# Make sure to update self.stdev and return the standard deviation as well
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
self.data = data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
self.mean = self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
plt.grid(True)
# plt.show()
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
pdf = (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev)**2)
return pdf
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2) # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
_____no_output_____
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
if len(self.data) > 0:
self.mean = float(sum(self.data) / len(self.data))
return self.mean
else:
print('Please load data first.')
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma/n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.xlabel('Data')
plt.ylabel('Count')
plt.title('Histogram of Data')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return 1.0/(math.sqrt(2*math.pi)*self.stdev) * math.exp(-0.5*((x-self.mean)**2)/(self.stdev**2))
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2) # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.027s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return(1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
....F.
======================================================================
FAIL: test_repr (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\irina\AppData\Local\Temp/ipykernel_1980/583898914.py", line 39, in test_repr
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
AssertionError: 'mean 25, standart deviation 3' != 'mean 25, standard deviation 3'
- mean 25, standart deviation 3
? ^
+ mean 25, standard deviation 3
? ^
----------------------------------------------------------------------
Ran 6 tests in 0.013s
FAILED (failures=1)
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.016s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
if len(self.data) == 0:
self.mean = 0
else:
self.mean = sum(self.data) / len(self.data)
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for data_point in self.data:
sigma += (data_point - mean)**2
self.stdev = math.sqrt(sigma / n)
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return 'mean {}, standard deviation {}'.format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.006s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
self.mean = sum(self.data) / len(self.data)
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
denominator = len(self.data) - 1
else:
denominator = len(self.data)
self.stdev = math.sqrt(
sum(
[
(x - self.mean) **2
for x in self.data
]
)
/ denominator
)
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calcaulte_stdev() method.
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.xlabel("Bins")
plt.ylabel("Number of observations")
plt.title("Data Histogram")
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return 1 / (self.stdev * math.sqrt(2*math.pi)) * math.exp(-(x-self.mean)**2 / (2*self.stdev**2))
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return f"mean {self.mean}, standard deviation {self.stdev}"
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.008s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
pass
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
pass
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calcaulte_stdev() method.
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
pass
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = 5 # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = 2 # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
pass
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
F.EE.F
======================================================================
ERROR: test_meancalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 18, in test_meancalculation
self.assertEqual(self.gaussian.calculate_mean(), sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
ZeroDivisionError: float division by zero
======================================================================
ERROR: test_pdf (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 14, in test_pdf
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947, 'pdf function does not give expected result')
TypeError: type NoneType doesn't define __round__ method
======================================================================
FAIL: test_add (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 31, in test_add
self.assertEqual(gaussian_sum.mean, 55)
AssertionError: 5 != 55
======================================================================
FAIL: test_stdevcalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 22, in test_stdevcalculation
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
AssertionError: 2 != 92.87 : sample standard deviation incorrect
----------------------------------------------------------------------
Ran 6 tests in 0.015s
FAILED (failures=2, errors=2)
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
mu = 0
for i in self.data:
mu = mu + i
self.mean = mu / len(self.data)
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
std = 0
for j in self.data:
std = std + (j - self.mean)**2
self.stdev = (std / n)**(1/2)
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.title('Histogram of the data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return(1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = (self.stdev**2 + other.stdev**2)**(1/2) # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return 'mean {}, standard deviation {}'.format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.016s
OK
###Markdown
Table of Contents1 Magic Methods Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.010s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
average = sum(self.data)/len(self.data)
self.mean = average
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for x in self.data:
sigma += (x - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data_list)
plt.xlabel('data_list')
plt.ylabel('frequency')
plt.title('Histogram plot for the given data')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
res = (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
return res
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.006s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
pass
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
pass
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
pass
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = 5 # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = 2 # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
pass
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
F.EE.F
======================================================================
ERROR: test_meancalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 18, in test_meancalculation
self.assertEqual(self.gaussian.calculate_mean(), sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
ZeroDivisionError: float division by zero
======================================================================
ERROR: test_pdf (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 14, in test_pdf
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947, 'pdf function does not give expected result')
TypeError: type NoneType doesn't define __round__ method
======================================================================
FAIL: test_add (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 31, in test_add
self.assertEqual(gaussian_sum.mean, 55)
AssertionError: 5 != 55
======================================================================
FAIL: test_stdevcalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 22, in test_stdevcalculation
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
AssertionError: 2 != 92.87 : sample standard deviation incorrect
----------------------------------------------------------------------
Ran 6 tests in 0.015s
FAILED (failures=2, errors=2)
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
avg = 1.0* sum(self.data)/len(self.data)
self.mean = avg
return self.mean
pass
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.mean
sigma = 0
for d in self.data:
sigma += (d - mean)**2
sigma = math.sqrt(sigma/n)
self.stdev = sigma
return self.stdev
pass
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.title("Histogram of data")
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
pass
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2) # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean,self.stdev)
pass
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.028s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
import numpy as np
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
mu = np.mean(self.data)
self.mean = mu
return mu
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
import numpy as np
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
if self.mean == 0:
print("WARNING: Your mean is 0. Please make sure that it is the case or you just forget to calculate mean.")
print("Please calculate mean or just ignore this message.")
if sample == True:
sigma = np.std(self.data, ddof=1)
else:
sigma = np.std(self.data, ddof=0)
return sigma
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calcaulte_stdev() method.
for d in data_list:
self.data.append(d)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample=sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
plt.hist(self.data)
plt.xlabel("Data")
plt.ylabel("Histogram")
plt.title("Histogram plot")
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
mu = self.mean
sigma = self.stdev
pdf = 1./np.sqrt(2*np.pi*sigma**2) * np.exp( -(x-mu)**2 / (2*sigma**2) )
return pdf
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = np.sqrt(self.stdev**2 + other.stdev**2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.015s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = sum(self.data)/len(self.data)
self.mean = avg
return self.mean
# Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
avg = self.mean
sigma2 = 0
for value in self.data:
sigma2 += (value-avg)**2
sigma2 = sigma2/n
sigma = math.sqrt(sigma2)
self.stdev = sigma
return self.stdev
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calculate_stdev() method.
self.data = data_list
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
# Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
avg = self.mean
sigma2 = self.stdev**2
return math.exp(-(x-avg)**2/(2*sigma2))/math.sqrt(2*math.pi*sigma2)
# Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
# Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# calculate the mean and standard deviation of the sum of two Gaussians
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
return "mean {}, standard deviation {}".format(self.mean,self.stdev)
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
......
----------------------------------------------------------------------
Ran 6 tests in 0.005s
OK
###Markdown
Magic MethodsBelow you'll find the same code from the previous exercise except two more methods have been added: an __add__ method and a __repr__ method. Your task is to fill out the code and get all of the unit tests to pass. You'll find the code cell with the unit tests at the bottom of this Jupyter notebook.As in previous exercises, there is an answer key that you can look at if you get stuck. Click on the "Jupyter" icon at the top of this notebook, and open the folder 4.OOP_code_magic_methods. You'll find the answer.py file inside the folder.
###Code
import math
import matplotlib.pyplot as plt
class Gaussian():
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
self.mean = mu
self.stdev = sigma
self.data = []
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
#TODO: Calculate the mean of the data set. Remember that the data set is stored in self.data
# Change the value of the mean attribute to be the mean of the data set
# Return the mean of the data set
pass
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
# TODO:
# Calculate the standard deviation of the data set
#
# The sample variable determines if the data set contains a sample or a population
# If sample = True, this means the data is a sample.
# Keep the value of sample in mind for calculating the standard deviation
#
# Make sure to update self.stdev and return the standard deviation as well
pass
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
# This code opens a data file and appends the data to a list called data_list
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
# TODO:
# Update the self.data attribute with the data_list
# Update self.mean with the mean of the data_list.
# You can use the calculate_mean() method with self.calculate_mean()
# Update self.stdev with the standard deviation of the data_list. Use the
# calcaulte_stdev() method.
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Plot a histogram of the data_list using the matplotlib package.
# Be sure to label the x and y axes and also give the chart a title
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function of the Gaussian distribution
# at the value x. You'll need to use self.stdev and self.mean to do the calculation
pass
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
#TODO: Nothing to do for this method. Try it out and see how it works.
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
# TODO: Calculate the results of summing two Gaussian distributions
# When summing two Gaussian distributions, the mean value is the sum
# of the means of each Gaussian.
#
# When summing two Gaussian distributions, the standard deviation is the
# square root of the sum of square ie sqrt(stdev_one ^ 2 + stdev_two ^ 2)
# create a new Gaussian object
result = Gaussian()
# TODO: calculate the mean and standard deviation of the sum of two Gaussians
result.mean = 5 # change this line to calculate the mean of the sum of two Gaussian distributions
result.stdev = 2 # change this line to calculate the standard deviation of the sum of two Gaussian distributions
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Return a string in the following format -
# "mean mean_value, standard deviation standard_deviation_value"
# where mean_value is the mean of the Gaussian distribution
# and standard_deviation_value is the standard deviation of
# the Gaussian.
# For example "mean 3.5, standard deviation 1.3"
pass
# Unit tests to check your solution
import unittest
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
def test_meancalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.gaussian.read_data_file('numbers.txt', True)
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
self.gaussian.read_data_file('numbers.txt', False)
self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
def test_repr(self):
gaussian_one = Gaussian(25, 3)
self.assertEqual(str(gaussian_one), "mean 25, standard deviation 3")
tests = TestGaussianClass()
tests_loaded = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(tests_loaded)
###Output
F.EE.F
======================================================================
ERROR: test_meancalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 18, in test_meancalculation
self.assertEqual(self.gaussian.calculate_mean(), sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
ZeroDivisionError: float division by zero
======================================================================
ERROR: test_pdf (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 14, in test_pdf
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947, 'pdf function does not give expected result')
TypeError: type NoneType doesn't define __round__ method
======================================================================
FAIL: test_add (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 31, in test_add
self.assertEqual(gaussian_sum.mean, 55)
AssertionError: 5 != 55
======================================================================
FAIL: test_stdevcalculation (__main__.TestGaussianClass)
----------------------------------------------------------------------
Traceback (most recent call last):
File "<ipython-input-3-09732a143793>", line 22, in test_stdevcalculation
self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect')
AssertionError: 2 != 92.87 : sample standard deviation incorrect
----------------------------------------------------------------------
Ran 6 tests in 0.015s
FAILED (failures=2, errors=2)
|
historical-data-newspaper-titles.ipynb | ###Markdown
Gathering historical data about the addition of newspaper titles to TroveThe number of digitised newspapers available through Trove has increased dramatically since 2009. Understanding when newspapers were added is important for historiographical purposes, but there's no data about this available directly from Trove. This notebook uses web archives to extract lists of newspapers in Trove over time, and chart Trove's development.Trove has always provided a browseable list of digitised newspaper titles. The url and format of this list has changed over time, but it's possible to find captures of this page in the Internet Archive and extract the full list of titles. The pages are also captured in the Australian Web Archive, but the Wayback Machine has a more detailed record.The pages that I'm looking for are:* [http://trove.nla.gov.au/ndp/del/titles](https://web.archive.org/web/*/http://trove.nla.gov.au/ndp/del/titles)* [https://trove.nla.gov.au/newspaper/about](https://web.archive.org/web/*/https://trove.nla.gov.au/newspaper/about)This notebook creates the following data files:* [trove_newspaper_titles_2009_2021.csv](https://github.com/GLAM-Workbench/trove-newspapers/blob/master/trove_newspaper_titles_2009_2021.csv) – complete dataset of captures and titles* [trove_newspaper_titles_first_appearance_2009_2021.csv](https://github.com/GLAM-Workbench/trove-newspapers/blob/master/trove_newspaper_titles_first_appearance_2009_2021.csv) – filtered dataset, showing only the first appearance of each title / place / date range combinationI've also created a [browseable list of titles](https://gist.github.com/wragge/7d80507c3e7957e271c572b8f664031a), showing when they first appeared in Trove.
###Code
import requests
import json
import re
from surt import surt
from bs4 import BeautifulSoup
import arrow
import pandas as pd
import altair as alt
from IPython.display import display, HTML
from pathlib import Path
###Output
_____no_output_____
###Markdown
Code for harvesting web archive capturesWe're using the Memento protocol to get a list of captures. See the [Web Archives section](https://glam-workbench.net/web-archives/) of the GLAM Workbench for more details.
###Code
# The code in this cell is copied from notebooks in the Web Archives section of the GLAM Workbench (https://glam-workbench.net/web-archives/)
# In particular see: https://glam-workbench.net/web-archives/#find-all-the-archived-versions-of-a-web-page
# These are the repositories we'll be using
TIMEGATES = {
'awa': 'https://web.archive.org.au/awa/',
'nzwa': 'https://ndhadeliver.natlib.govt.nz/webarchive/wayback/',
'ukwa': 'https://www.webarchive.org.uk/wayback/en/archive/',
'ia': 'https://web.archive.org/web/'
}
def convert_lists_to_dicts(results):
'''
Converts IA style timemap (a JSON array of arrays) to a list of dictionaries.
Renames keys to standardise IA with other Timemaps.
'''
if results:
keys = results[0]
results_as_dicts = [dict(zip(keys, v)) for v in results[1:]]
else:
results_as_dicts = results
for d in results_as_dicts:
d['status'] = d.pop('statuscode')
d['mime'] = d.pop('mimetype')
d['url'] = d.pop('original')
return results_as_dicts
def get_capture_data_from_memento(url, request_type='head'):
'''
For OpenWayback systems this can get some extra capture info to insert into Timemaps.
'''
if request_type == 'head':
response = requests.head(url)
else:
response = requests.get(url)
headers = response.headers
length = headers.get('x-archive-orig-content-length')
status = headers.get('x-archive-orig-status')
status = status.split(' ')[0] if status else None
mime = headers.get('x-archive-orig-content-type')
mime = mime.split(';')[0] if mime else None
return {'length': length, 'status': status, 'mime': mime}
def convert_link_to_json(results, enrich_data=False):
'''
Converts link formatted Timemap to JSON.
'''
data = []
for line in results.splitlines():
parts = line.split('; ')
if len(parts) > 1:
link_type = re.search(r'rel="(original|self|timegate|first memento|last memento|memento)"', parts[1]).group(1)
if link_type == 'memento':
link = parts[0].strip('<>')
timestamp, original = re.search(r'/(\d{14})/(.*)$', link).groups()
capture = {'urlkey': surt(original), 'timestamp': timestamp, 'url': original}
if enrich_data:
capture.update(get_capture_data_from_memento(link))
print(capture)
data.append(capture)
return data
def get_timemap_as_json(timegate, url, enrich_data=False):
'''
Get a Timemap then normalise results (if necessary) to return a list of dicts.
'''
tg_url = f'{TIMEGATES[timegate]}timemap/json/{url}/'
response = requests.get(tg_url)
response_type = response.headers['content-type']
if response_type == 'text/x-ndjson':
data = [json.loads(line) for line in response.text.splitlines()]
elif response_type == 'application/json':
data = convert_lists_to_dicts(response.json())
elif response_type in ['application/link-format', 'text/html;charset=utf-8']:
data = convert_link_to_json(response.text, enrich_data=enrich_data)
return data
###Output
_____no_output_____
###Markdown
Harvest the title data from the Internet ArchiveThis gets the web page captures from the Internet Archive, scrapes the list of titles from the page, then does a bit of normalisation of the title data.
###Code
titles = []
# These are the pages that listed available titles.
# There was a change in 2016
pages = [{'url': 'http://trove.nla.gov.au/ndp/del/titles', 'path': '/ndp/del/title/'},
{'url': 'https://trove.nla.gov.au/newspaper/about', 'path': '/newspaper/title/'}]
for page in pages:
for capture in get_timemap_as_json('ia', page['url']):
if capture['status'] == '200':
url = f'https://web.archive.org/web/{capture["timestamp"]}id_/{capture["url"]}'
#print(url)
capture_date = arrow.get(capture['timestamp'][:8], 'YYYYMMDD').format('YYYY-MM-DD')
#print(capture_date)
response = requests.get(url)
soup = BeautifulSoup(response.content)
title_links = soup.find_all('a', href=re.compile(page['path']))
for title in title_links:
# Get the title text
full_title = title.get_text().strip()
# Get the title id
title_id = re.search(r'\/(\d+)\/?$', title['href']).group(1)
# Most of the code below is aimed at normalising the publication place and dates values to allow for easy grouping & deduplication
brief_title = re.sub(r'\(.+\)\s*$', '', full_title).strip()
try:
details = re.search(r'\((.+)\)\s*$', full_title).group(1).split(':')
except AttributeError:
place = ''
dates = ''
else:
try:
place = details[0].strip()
# Normalise states
try:
place = re.sub(r'(, )?([A-Za-z]+)[\.\s]*$', lambda match: f'{match.group(1) if match.group(1) else ""}{match.group(2).upper()}', place)
except AttributeError:
pass
# Normalise dates
dates = ' - '.join([d.strip() for d in details[1].strip().split('-')])
except IndexError:
place = ''
dates = ' - '.join([d.strip() for d in details[0].strip().split('-')])
titles.append({'title_id': title_id, 'full_title': full_title, 'title': brief_title, 'place': place, 'dates': dates, 'capture_date': capture_date, 'capture_timestamp': capture['timestamp']})
###Output
_____no_output_____
###Markdown
Convert the title data to a DataFrame for analysis
###Code
df = pd.DataFrame(titles)
df
# Number of captures
len(df['capture_timestamp'].unique())
# Number of days on which the pages were captured
len(df['capture_date'].unique())
###Output
_____no_output_____
###Markdown
Save this dataset as a CSV file.
###Code
df.to_csv('trove_newspaper_titles_2009_2021.csv', index=False)
###Output
_____no_output_____
###Markdown
How did the number of titles change over time?
###Code
# Drop duplicates in cases where there were mutiple captures on a single day
captures_df = df.drop_duplicates(subset=['capture_date', 'full_title'])
# Calculate totals per capture
capture_totals = captures_df['capture_date'].value_counts().to_frame().reset_index()
capture_totals.columns = ['capture_date', 'total']
capture_totals
alt.Chart(capture_totals).mark_line(point=True).encode(
x=alt.X('capture_date:T', title='Date captured'),
y=alt.Y('total:Q', title='Number of newspaper titles'),
tooltip=[alt.Tooltip('capture_date:T', format='%e %b %Y'), 'total:Q'],
).properties(width=700)
###Output
_____no_output_____
###Markdown
When did titles first appear?For historiographical purposes, its useful to know when a particular title first appeared in Trove. Here we'll only keep the first appearance of each title (or any subsequent changes to its date range / location).
###Code
first_appearance = df.drop_duplicates(subset=['title', 'place', 'dates'])
first_appearance
###Output
_____no_output_____
###Markdown
Find when a particular newspaper first appeared.
###Code
first_appearance.loc[first_appearance['title'] == 'Canberra Times']
###Output
_____no_output_____
###Markdown
Generate an alphabetical list for easy browsing. View the [results as a Gist](https://gist.github.com/wragge/7d80507c3e7957e271c572b8f664031a).
###Code
with Path('titles_list.md').open('w') as titles_list:
for title, group in first_appearance.groupby(['title', 'title_id']):
places = ' | '.join(group['place'].unique())
titles_list.write(f'<h4><a href="http://nla.gov.au/nla.news-title{title[1]}">{title[0]} ({places})</a></h4>')
titles_list.write(group.sort_values(by='capture_date')[['capture_date','dates', 'place']].to_html(index=False))
###Output
_____no_output_____
###Markdown
Save this dataset to CSV.
###Code
first_appearance.to_csv('trove_newspaper_titles_first_appearance_2009_2021.csv', index=False)
###Output
_____no_output_____ |
Fi.2a-r_InSAR_data_model_profile.ipynb | ###Markdown
Read Inversion results file
###Code
def get_basemap(dem_file):
dem, atr_dem = readfile.read(dem_file)
geo_box=[];
geo_box.append(np.float(atr_dem['X_FIRST']));geo_box.append(np.float(atr_dem['Y_FIRST']));
geo_box.append(geo_box[0]+np.int(atr_dem['WIDTH'])*np.float(atr_dem['X_STEP']));
geo_box.append(geo_box[1]+np.int(atr_dem['FILE_LENGTH'])*np.float(atr_dem['Y_STEP']));
map_projection='cyl'; ls = LightSource(azdeg=315, altdeg=45)
dem_shade = ls.shade(dem, vert_exag=1.0, cmap=plt.cm.gray, vmin=-20000, vmax=np.nanmax(dem)+2500)
return dem_shade,geo_box
def get_data_model(inversion_results_file):
mat =sio.loadmat(inversion_results_file,struct_as_record=False,squeeze_me=True)
Data=[];Model=[];Residual=[];Data_box=[];
for i in range(len(mat['insar'])):
insar_mat_file = mat['insar'][i].dataPath
mask = sio.loadmat(insar_mat_file, struct_as_record=False, squeeze_me=True)['Mask']
length, width = mask.shape
insarPlot = mat['insarPlot'][i]
data = np.zeros((length, width), dtype=np.float32) * np.nan
model = np.zeros((length, width), dtype=np.float32) * np.nan
residual = np.zeros((length, width), dtype=np.float32) * np.nan
data[mask!=0] = insarPlot.data
model[mask!=0] = insarPlot.model
residual[mask!=0] = insarPlot.residual;
atr = vars(sio.loadmat(insar_mat_file, struct_as_record=False, squeeze_me=True)['Metadata']);
data_box=[1,2,3,4];
data_box[0]=np.float(atr['X_FIRST']);data_box[1]=np.float(atr['Y_FIRST']);
data_box[2]=data_box[0]+np.int(atr['WIDTH'])*np.float(atr['X_STEP']);
data_box[3]=data_box[1]+np.int(atr['LENGTH'])*np.float(atr['Y_STEP']);
Data.append(data);Model.append(model);Residual.append(residual);Data_box.append(data_box);
#mat['insar'] = [mat['insar']]
insar_data=[Data[0],Data[1],Model[0],Model[1]];
Box= [Data_box[0],Data_box[1],Data_box[0],Data_box[1]]
return insar_data,Box
#DEM
dem_file='../InSAR_data/demGeo.h5'
dem_shade,geo_box=get_basemap(dem_file);
#Faults and coast lines
lines=sio.loadmat('../InSAR_data/hawaii_lines_new.mat',squeeze_me=True);
#get model data inversion results file
inversion_results_file = 'GBIS_files/2014_15_invert_1_2_GPS_D_M_F.mat'
#inversion_results_file = 'GBIS_files/2015_18_invert_1_2_GPS_D_M_F.mat'
#inversion_results_file = 'GBIS_files/2018_20_invert_1_2_GPS_D_M_F.mat'
insar_data,Box = get_data_model(inversion_results_file)
###Output
_____no_output_____
###Markdown
Get optimal model parameters
###Code
#top get model parameters
def get_optimal_model(inversion_results_file):
inversion_results=sio.loadmat(inversion_results_file,squeeze_me=True)
geo_origin=inversion_results['geo'].item()[0]
optimal_model=inversion_results['invResults']['model'].item()['optimal'].item().tolist()
par_names= inversion_results['invResults']['model'].item()['parName'].item().tolist()
par_names.append('Ins_const')
optimal_model = dict(zip(par_names, optimal_model))
return optimal_model,geo_origin
def local2llh(x,y,geo_ref_point):
ref_lon=float(geo_ref_point[0]);ref_lat=float(geo_ref_point[1]);
lon= ref_lon + (x/1000/105)
lat= ref_lat + (y/1000/111)
return lon,lat
def get_fault_coords(x,y,strike,fault_length,fault_width):
ss=math.sin(math.radians(strike));
cs=math.cos(math.radians(strike));
fault_x=[0] * 4;fault_y=[0] * 4;
fault_x[0] = 1;
fault_x[0]= x - fault_length*0.5*ss;fault_y[0]= y - fault_length*0.5*cs;
fault_x[1]= x + fault_length*0.5*ss;fault_y[1]= y + fault_length*0.5*cs;
fault_oct_x = x - fault_width*cs;fault_oct_y = y + fault_width*ss; #other side fault centre
fault_x[3]= fault_oct_x - fault_length*0.5*ss;fault_y[3]= fault_oct_y - fault_length*0.5*cs;
fault_x[2]= fault_oct_x + fault_length*0.5*ss;fault_y[2]= fault_oct_y + fault_length*0.5*cs;
return fault_x,fault_y
def get_geo_coords(X,Y,geo_ref_point):
for i in range(len(X)):
X[i],Y[i] = local2llh(X[i],Y[i],geo_ref_point); #print X[i],Y[i]
return X,Y
#get optimal parameters from inversion results
optimal_model,geo_origin = get_optimal_model(inversion_results_file)
fault_x,fault_y = get_fault_coords(optimal_model['FAUL X'],optimal_model['FAUL Y'],optimal_model['FAUL Strike'],optimal_model['FAUL Lenght'],optimal_model['FAUL Width'])
dike_x,dike_y = get_fault_coords(optimal_model['DIKE X'],optimal_model['DIKE Y'],optimal_model['DIKE Strike'],optimal_model['DIKE Lenght'],optimal_model['DIKE Width'])
fault_lon,fault_lat = get_geo_coords(fault_x,fault_y,geo_origin)
dike_lon,dike_lat = get_geo_coords(dike_x[0:2],dike_y[0:2],geo_origin)
mogi_lon,mogi_lat = local2llh(optimal_model['MOGI X'],optimal_model['MOGI Y'],geo_origin)
###Output
_____no_output_____
###Markdown
Plot InSAR data model
###Code
fig,axes= plt.subplots(2,2,figsize=(10,10),sharex=True, sharey=True);
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95, wspace=0.025,hspace=0.04)
nos=['a','b','c','d','c','d'];
ffsize=20;
for i in range(4):
#read data
Asc_data=insar_data[i];
data_box= Box[i]
#customize axes
if i<2:
ax=axes[0,i]
elif i<4:
ax=axes[1,i-2]
ax.tick_params(labelsize=ffsize);
ax.tick_params(axis='x',length=0, width=0);ax.tick_params(axis='y',length=0, width=0);
if i in [0,2]:
ax.tick_params(axis='y',length=15, width=5);
if i in [2,3]:
ax.tick_params(axis='x',length=15, width=5);
#plot data and model
map_projection='cyl'
m = Basemap(llcrnrlon=geo_box[0], llcrnrlat=geo_box[3],\
urcrnrlon=geo_box[2], urcrnrlat=geo_box[1],\
projection= map_projection,\
resolution='c', area_thresh=1., suppress_ticks=False, ax=ax)
m.imshow(dem_shade, origin='upper', interpolation='spline16')
m2 = Basemap(llcrnrlon=data_box[0], llcrnrlat=data_box[3],\
urcrnrlon=data_box[2], urcrnrlat=data_box[1],\
projection= map_projection,\
resolution='c', area_thresh=1., suppress_ticks=False, ax=ax)
img1= m2.imshow(Asc_data*100, origin='upper',cmap='jet',vmin=-5, vmax=5)
#add lines and faults
ax.plot(lines['Lllh'][:,0],lines['Lllh'][:,1],color='black', linestyle='dashed',linewidth=2)
#add model
if i in [2,3]:
ax.plot(dike_lon,dike_lat, 'r-',linewidth=3,color='white')
c1=Rectangle((fault_lon[3],fault_lat[3]),0.048,0.090,angle=-optimal_model['FAUL Strike'],fill=None,color='black',linewidth=2)
mogi=plt.Circle((mogi_lon,mogi_lat),0.01,color='white',linewidth=2,fill=False)
ax.add_artist(c1)
ax.add_artist(mogi)
#add colorscale
if i in [2,3]:
cbbox = inset_axes(ax, '100%', '50%', loc='lower right',bbox_to_anchor=(0.7, 0.02, 0.3, 0.3),bbox_transform=ax.transAxes)
[cbbox.spines[k].set_visible(False) for k in cbbox.spines]
cbbox.set_xticklabels([]);cbbox.set_yticklabels([]);
cbbox.tick_params(axis='both', left='off', top='off', right='off', bottom='off',
labelleft='off', labeltop='off', labelright='off', labelbottom='off',
length=0,width=0);cbbox.set_facecolor([1,1,1,0.5])
axins1 = inset_axes(ax,width="90%",height="20%",loc='lower right',bbox_to_anchor=(0.68, 0.1, 0.3, 0.3),bbox_transform=ax.transAxes)
axins1.tick_params(labelsize=15);
cbar=fig.colorbar(img1, cax=axins1, orientation='horizontal',ticks=[-5,0,5]);
cbar.set_label('cm/yr', rotation=0,labelpad=-38,size=15)#
#set axis limits
ax.set_ylim((19.3,19.6));ax.set_xlim((-155.75,-155.45))
ax.set_yticks([19.35, 19.45, 19.55]);ax.set_xticks([-155.7,-155.6,-155.5]);
###Output
/Users/bkv3/Documents/development/python/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:29: MatplotlibDeprecationWarning:
The dedent function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use inspect.cleandoc instead.
/Users/bkv3/Documents/development/python/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:34: MatplotlibDeprecationWarning:
The dedent function was deprecated in Matplotlib 3.1 and will be removed in 3.3. Use inspect.cleandoc instead.
###Markdown
Get values from Data and Model along a transect. We choose it nearly perpendicualr to the rift at the summit.
###Code
def get_transect(insarPlot):
Lon= insarPlot.ll[:,0]
Lat= insarPlot.ll[:,1]
data=insarPlot.data;
model=insarPlot.model
points = np.array((Lon.flatten(), Lat.flatten())).T
f1_nearest = NearestNDInterpolator(points, data.flatten())
f2_nearest = NearestNDInterpolator(points, model.flatten())
LONS=np.linspace(-155.70,-155.45,200);
LATS=np.linspace(19.50,19.40,200);
dist = (((LONS-LONS[0])*105)**2+((LATS-LATS[0])*111)**2)**0.5
data_val = f1_nearest(LONS,LATS)
model_val = f2_nearest(LONS,LATS)
return dist,data_val,model_val
mat =sio.loadmat(inversion_results_file,struct_as_record=False,squeeze_me=True)
profile_dist,data_val,model_val=[],[],[];
for i in range(len(mat['insar'])):
insar_mat_file = mat['insar'][i].dataPath
mask = sio.loadmat(insar_mat_file, struct_as_record=False, squeeze_me=True)['Mask']
length, width = mask.shape
insarPlot = mat['insarPlot'][i]
dist,data,model=get_transect(insarPlot)
profile_dist.append(dist);data_val.append(data);model_val.append(model)
###Output
_____no_output_____
###Markdown
Plot transect on data and model
###Code
fig,axes= plt.subplots(1,2,figsize=(10,5),sharey=True);
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95, wspace=0.04,hspace=0.05)
xlim=(-2,32);ylim=(-2,11);
plt.setp(axes, xlim=xlim, ylim=ylim)
for i in range(2):
axes[i].tick_params(labelsize=20);
axes[i].tick_params(axis='x',length=15, width=5);
axes[i].tick_params(axis='y',length=15, width=5);
axes[i].set_xticks([0,15,30]);
axes[i].set_ylim((-2,11));
ax1=axes[0];ax2=axes[1];
ax2.tick_params(axis='y',length=0, width=0);
ax1.set_yticks([0,5,10])
ax1.plot(profile_dist[0],data_val[0]*100,label='Data',color='blue')
ax1.plot(profile_dist[0],model_val[0]*100,label='Model',color='red')
ax2.plot(profile_dist[1],data_val[1]*100,label='Data',color='blue')
ax2.plot(profile_dist[1],model_val[1]*100,label='Model',color='red')
legend = ax1.legend(loc='upper right', shadow=False, fontsize='x-large',frameon=False)
plt.show()
#save figure
#plt.savefig('InSAR_Data_model_profile.pdf',dpi=300,bbox_inches='tight',transparent=True)
###Output
_____no_output_____ |
demo-examples/pipeline_builder_v1.ipynb | ###Markdown
About this Jupyter Notebook@author: Yingding Wang Useful JupyterLab BasicBefore start, you may consider to update the jupyterlab with the commandpython!{sys.executable} -m pip install --upgrade --user jupyterlab 1. Autocomplete syntax with "Tab"2. View Doc String with "Shift + Tab"3. mark the code snippet -> select with right mouse -> Show Contextual Help (see the function code)
###Code
# import sys, os
# %env
import sys, os
print(f"Sys version: {sys.version}")
# os.environ["KF_PIPELINES_SA_TOKEN_PATH"]="/var/run/secrets/kubernetes.io/serviceaccount/token"
# os.environ["KF_PIPELINES_SA_TOKEN_PATH"]="/var/run/secrets/kubeflow/pipelines/token"
!{sys.executable} -m pip show jupyterlab # 3.0.16
# !{sys.executable} -m pip show jupyter_contrib_nbextensions
# update the jupyter lab
#!{sys.executable} -m pip install --upgrade --user jupyterlab
"""upgrade the kfp server api version to 1.7.0 for KF 1.4"""
# !{sys.executable} -m pip uninstall -y kfp-server-api
# !{sys.executable} -m pip install --user --upgrade kfp-server-api==1.7.0
import sys
!{sys.executable} -m pip install --upgrade --user kfp==1.8.12
!{sys.executable} -m pip install --upgrade --user kubernetes==18.20.0
#!{sys.executable} -m pip install --upgrade --user kubernetes==21.7.0
###Output
_____no_output_____
###Markdown
Restart the kernalAfter update the kfp, restart this notebook kernelJupyter notebook: Meun -> Kernel -> restart kernel Check the KubeFlow Pipeline version on the server side
###Code
!{sys.executable} -m pip list | grep kfp
###Output
kfp 1.8.12
kfp-pipeline-spec 0.1.14
kfp-server-api 1.7.0
###Markdown
Check my KubeFlow namespace total resource limits
###Code
# run command line to see the quota
!kubectl describe quota
###Output
Name: kf-resource-quota
Namespace: kubeflow-kindfor
Resource Used Hard
-------- ---- ----
basic-csi.storageclass.storage.k8s.io/persistentvolumeclaims 3 5
basic-csi.storageclass.storage.k8s.io/requests.storage 11Gi 50Gi
cpu 2110m 128
longhorn.storageclass.storage.k8s.io/persistentvolumeclaims 0 10
longhorn.storageclass.storage.k8s.io/requests.storage 0 500Gi
memory 2108Mi 512Gi
###Markdown
SetupExample Pipeline fromhttps://github.com/kubeflow/examples/tree/master/pipelines/simple-notebook-pipeline Getting started with Python function-based componentshttps://www.kubeflow.org/docs/components/pipelines/sdk/python-function-components/
###Code
from platform import python_version
EXPERIMENT_NAME = 'core kf test' # Name of the experiment in the UI
EXPERIMENT_DESC = 'testing KF platform'
# BASE_IMAGE = f"library/python:{python_version()}" # Base image used for components in the pipeline, which has not root
BASE_IMAGE = "python:3.8.13"
NAME_SPACE = "kubeflow-kindfor" # change namespace if necessary
import kfp
import kubernetes
import kfp.dsl as dsl
import kfp.compiler as compiler
import kfp.components as components
###Output
_____no_output_____
###Markdown
Connecting KFP Python SDK from Notebook to Pipeline* https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api/
###Code
print(kfp.__version__)
print(kubernetes.__version__)
def add(a: float, b: float) -> float:
'''Calculates sum of two arguments'''
print(a, '+', b, '=', a + b)
return a + b
###Output
_____no_output_____
###Markdown
Create component from functionhttps://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.components.html
###Code
# returns a task factory function
add_op = components.create_component_from_func(
add,
output_component_file='add_component.yaml',
base_image=BASE_IMAGE,
packages_to_install=None
)
###Output
_____no_output_____
###Markdown
Add pod memory and cpu restrictionhttps://github.com/kubeflow/pipelines/pull/5695
###Code
'''
def pod_defaults_transformer(op: dsl.ContainerOp):
op.set_memory_request('100Mi') # op.set_memory_limit('1000Mi')
op.set_memory_limit('100Mi')
op.set_cpu_request('100m') # 1 core, # op.set_cpu_limit('1000m')
op.set_cpu_limit('1000m')
return op
'''
def pod_defaults_transformer(op: dsl.ContainerOp):
"""
op.set_memory_limit('1000Mi') = 1GB
op.set_cpu_limit('1000m') = 1 cpu core
"""
return op.set_memory_request('200Mi')\
.set_memory_limit('1000Mi')\
.set_cpu_request('2000m')\
.set_cpu_limit('2000m')
@dsl.pipeline(
name='Calculation pipeline',
description='A toy pipeline that performs arithmetic calculations.'
)
def calc_pipeline(
a: float =0,
b: float =7
):
# Passing pipeline parameter and a constant value as operation arguments
# first_add_task = add_op(a, 4)
first_add_task = pod_defaults_transformer(add_op(a, 4))
# no value taken from cache
first_add_task.execution_options.caching_strategy.max_cache_staleness = "P0D"
# second_add_task = add_op(first_add_task.output, b)
second_add_task = pod_defaults_transformer(add_op(first_add_task.output, b))
# no cache
second_add_task.execution_options.caching_strategy.max_cache_staleness = "P0D"
###Output
_____no_output_____
###Markdown
(optional step) Compile the pipeline to see the settings
###Code
PIPE_LINE_FILE_NAME="calc_pipeline_with_resource_limit"
kfp.compiler.Compiler().compile(calc_pipeline, f"{PIPE_LINE_FILE_NAME}.yaml")
###Output
_____no_output_____
###Markdown
Run Pipeline with Multi-user Isolationhttps://www.kubeflow.org/docs/components/pipelines/multi-user/
###Code
# get the pipeline host from env set up be the notebook instance
client = kfp.Client()
# Make sure the volume is mounted /run/secrets/kubeflow/pipelines
# client.get_experiment(experiment_name=EXPERIMENT_NAME, namespace=NAME_SPACE)
# client.list_pipelines()
# print(NAME_SPACE)
# client.list_experiments(namespace=NAME_SPACE)
client.set_user_namespace(NAME_SPACE)
print(client.get_user_namespace())
exp = client.create_experiment(EXPERIMENT_NAME, description=EXPERIMENT_DESC)
# Specify pipeline argument values
arguments = {'a': '7', 'b': '8'}
# added a default pod transformer to all the pipeline ops
pipeline_config: dsl.PipelineConf = dsl.PipelineConf()
#pipeline_config.add_op_transformer(
# pod_defaults_transformer
#)
client.create_run_from_pipeline_func(pipeline_func=calc_pipeline, arguments=arguments,
experiment_name=EXPERIMENT_NAME, namespace=NAME_SPACE,
pipeline_conf=pipeline_config)
# The generated links below lead to the Experiment page and the pipeline run details page, respectively
###Output
_____no_output_____ |
tutorials/model_conversion/basic.ipynb | ###Markdown
Here is a brief introduction of how to use our model converter. Here we use the pretrained MobileNetV2 model as an example.
###Code
import torch
import torchvision
model = torchvision.models.mobilenet_v2(pretrained=True)
###Output
_____no_output_____
###Markdown
Then, we will convert it to TFLite using TinyNerualNetwork.
###Code
import sys
sys.path.append('../..')
from tinynn.converter import TFLiteConverter
# Provide a viable input to the model
dummy_input = torch.randn(1, 3, 224, 224)
model_path = 'mobilenet_v2.tflite'
# Moving the model to cpu and set it to evaluation mode before model conversion
with torch.no_grad():
model.cpu()
model.eval()
converter = TFLiteConverter(model, dummy_input, model_path)
converter.convert()
###Output
INFO (tinynn.converter.base) Generated model saved to mobilenet_v2.tflite
###Markdown
Let's prepare an example input using an online image.
###Code
import os
from torch.hub import download_url_to_file
cwd = os.path.abspath(os.getcwd())
img_path = os.path.join(cwd, 'dog.jpg')
img_urls = [
'https://github.com/pytorch/hub/raw/master/images/dog.jpg',
'https://raw.fastgit.org/pytorch/hub/master/images/dog.jpg',
]
# If you have diffculties accessing Github, then you may try out the second link
download_url_to_file(img_urls[0], img_path)
from PIL import Image
from torchvision import transforms
import numpy as np
img = Image.open(img_path)
mean = np.array([0.485, 0.456, 0.406], dtype='float32')
std = np.array([0.229, 0.224, 0.225], dtype='float32')
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
]
)
# Image preprocessing
processed_img = preprocess(img)
arr = np.asarray(processed_img).astype('float32') / 255
normalized = (arr - mean) / std
input_arr = np.expand_dims(normalized, 0)
###Output
_____no_output_____
###Markdown
Let's run the generate TFLite model with the example input.
###Code
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], input_arr)
interpreter.invoke()
output_arr = interpreter.get_tensor(output_details[0]['index'])
print('TFLite out:', np.argmax(output_arr))
###Output
TFLite out: 258
###Markdown
Let's check whether the output is consistent with the one predicted by the original model.
###Code
torch_input = torch.from_numpy(input_arr).permute((0, 3, 1, 2))
with torch.no_grad():
torch_output = model(torch_input)
print('PyTorch out:', torch.argmax(torch_output))
###Output
PyTorch out: tensor(258)
|
Learning Notes/Learning Notes ML - 7 Support Vector Machines.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* In-Depth: Support Vector Machines Support vector machines (SVMs) are a particularly powerful and flexible class of supervised algorithms for both classification and regression.In this section, we will develop the intuition behind support vector machines and their use in classification problems.We begin with the standard imports:
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
###Output
_____no_output_____
###Markdown
Motivating Support Vector Machines As part of our disussion of Bayesian classification (see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)), we learned a simple model describing the distribution of each underlying class, and used these generative models to probabilistically determine labels for new points.That was an example of *generative classification*; here we will consider instead *discriminative classification*: rather than modeling each class, we simply find a line or curve (in two dimensions) or manifold (in multiple dimensions) that divides the classes from each other.As an example of this, consider the simple case of a classification task, in which the two classes of points are well separated:
###Code
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn');
###Output
_____no_output_____
###Markdown
A linear discriminative classifier would attempt to draw a straight line separating the two sets of data, and thereby create a model for classification.For two dimensional data like that shown here, this is a task we could do by hand.But immediately we see a problem: there is more than one possible dividing line that can perfectly discriminate between the two classes!We can draw them as follows:
###Code
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plt.plot([0.6], [2.1], 'x', color='red', markeredgewidth=2, markersize=10)
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
###Output
_____no_output_____
###Markdown
These are three *very* different separators which, nevertheless, perfectly discriminate between these samples.Depending on which you choose, a new data point (e.g., the one marked by the "X" in this plot) will be assigned a different label!Evidently our simple intuition of "drawing a line between classes" is not enough, and we need to think a bit deeper. Support Vector Machines: Maximizing the *Margin*Support vector machines offer one way to improve on this.The intuition is this: rather than simply drawing a zero-width line between the classes, we can draw around each line a *margin* of some width, up to the nearest point.Here is an example of how this might look:
###Code
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none',
color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
###Output
_____no_output_____
###Markdown
In support vector machines, the line that maximizes this margin is the one we will choose as the optimal model.Support vector machines are an example of such a *maximum margin* estimator. Fitting a support vector machineLet's see the result of an actual fit to this data: we will use Scikit-Learn's support vector classifier to train an SVM model on this data.For the time being, we will use a linear kernel and set the ``C`` parameter to a very large number (we'll discuss the meaning of these in more depth momentarily).
###Code
from sklearn.svm import SVC # "Support vector classifier"
model = SVC(kernel='linear', C=1E10)
model.fit(X, y)
###Output
_____no_output_____
###Markdown
To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us:
###Code
def plot_svc_decision_function(model, ax=None, plot_support=True):
"""Plot the decision function for a 2D SVC"""
if ax is None:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X = np.meshgrid(y, x)
xy = np.vstack([X.ravel(), Y.ravel()]).T
P = model.decision_function(xy).reshape(X.shape)
# plot decision boundary and margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
if plot_support:
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, linewidth=1, facecolors='none');
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(model);
###Output
_____no_output_____
###Markdown
This is the dividing line that maximizes the margin between the two sets of points.Notice that a few of the training points just touch the margin: they are indicated by the black circles in this figure.These points are the pivotal elements of this fit, and are known as the *support vectors*, and give the algorithm its name.In Scikit-Learn, the identity of these points are stored in the ``support_vectors_`` attribute of the classifier:
###Code
model.support_vectors_
###Output
_____no_output_____
###Markdown
A key to this classifier's success is that for the fit, only the position of the support vectors matter; any points further from the margin which are on the correct side do not modify the fit!Technically, this is because these points do not contribute to the loss function used to fit the model, so their position and number do not matter so long as they do not cross the margin.We can see this, for example, if we plot the model learned from the first 60 points and first 120 points of this dataset:
###Code
def plot_svm(N=10, ax=None):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
model = SVC(kernel='linear', C=1E10)
model.fit(X, y)
ax = ax or plt.gca()
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
ax.set_xlim(-1, 4)
ax.set_ylim(-1, 6)
plot_svc_decision_function(model, ax)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for axi, N in zip(ax, [60, 120]):
plot_svm(N, axi)
axi.set_title('N = {0}'.format(N))
###Output
_____no_output_____
###Markdown
In the left panel, we see the model and the support vectors for 60 training points.In the right panel, we have doubled the number of training points, but the model has not changed: the three support vectors from the left panel are still the support vectors from the right panel.This insensitivity to the exact behavior of distant points is one of the strengths of the SVM model. If you are running this notebook live, you can use IPython's interactive widgets to view this feature of the SVM model interactively:
###Code
from ipywidgets import interact, fixed
interact(plot_svm, N=[10, 200], ax=fixed(None));
###Output
_____no_output_____
###Markdown
Beyond linear boundaries: Kernel SVMWhere SVM becomes extremely powerful is when it is combined with *kernels*.We have seen a version of kernels before, in the basis function regressions of [In Depth: Linear Regression](05.06-Linear-Regression.ipynb).There we projected our data into higher-dimensional space defined by polynomials and Gaussian basis functions, and thereby were able to fit for nonlinear relationships with a linear classifier.In SVM models, we can use a version of the same idea.To motivate the need for kernels, let's look at some data that is not linearly separable:
###Code
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(clf, plot_support=False);
###Output
_____no_output_____
###Markdown
It is clear that no linear discrimination will *ever* be able to separate this data.But we can draw a lesson from the basis function regressions in [In Depth: Linear Regression](05.06-Linear-Regression.ipynb), and think about how we might project the data into a higher dimension such that a linear separator *would* be sufficient.For example, one simple projection we could use would be to compute a *radial basis function* centered on the middle clump:
###Code
r = np.exp(-(X ** 2).sum(1))
###Output
_____no_output_____
###Markdown
We can visualize this extra data dimension using a three-dimensional plot—if you are running this notebook live, you will be able to use the sliders to rotate the plot:
###Code
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30, X=X, y=y):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='autumn')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=[-90, 90], azip=(-180, 180),
X=fixed(X), y=fixed(y));
###Output
_____no_output_____
###Markdown
We can see that with this additional dimension, the data becomes trivially linearly separable, by drawing a separating plane at, say, *r*=0.7.Here we had to choose and carefully tune our projection: if we had not centered our radial basis function in the right location, we would not have seen such clean, linearly separable results.In general, the need to make such a choice is a problem: we would like to somehow automatically find the best basis functions to use.One strategy to this end is to compute a basis function centered at *every* point in the dataset, and let the SVM algorithm sift through the results.This type of basis function transformation is known as a *kernel transformation*, as it is based on a similarity relationship (or kernel) between each pair of points.A potential problem with this strategy—projecting $N$ points into $N$ dimensions—is that it might become very computationally intensive as $N$ grows large.However, because of a neat little procedure known as the [*kernel trick*](https://en.wikipedia.org/wiki/Kernel_trick), a fit on kernel-transformed data can be done implicitly—that is, without ever building the full $N$-dimensional representation of the kernel projection!This kernel trick is built into the SVM, and is one of the reasons the method is so powerful.In Scikit-Learn, we can apply kernelized SVM simply by changing our linear kernel to an RBF (radial basis function) kernel, using the ``kernel`` model hyperparameter:
###Code
clf = SVC(kernel='rbf', C=1E6)
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300, lw=1, facecolors='none');
###Output
_____no_output_____
###Markdown
Using this kernelized support vector machine, we learn a suitable nonlinear decision boundary.This kernel transformation strategy is used often in machine learning to turn fast linear methods into fast nonlinear methods, especially for models in which the kernel trick can be used. Tuning the SVM: Softening MarginsOur discussion thus far has centered around very clean datasets, in which a perfect decision boundary exists.But what if your data has some amount of overlap?For example, you may have data like this:
###Code
X, y = make_blobs(n_samples=100, centers=2,
random_state=0, cluster_std=1.2)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn');
###Output
_____no_output_____
###Markdown
To handle this case, the SVM implementation has a bit of a fudge-factor which "softens" the margin: that is, it allows some of the points to creep into the margin if that allows a better fit.The hardness of the margin is controlled by a tuning parameter, most often known as $C$.For very large $C$, the margin is hard, and points cannot lie in it.For smaller $C$, the margin is softer, and can grow to encompass some points.The plot shown below gives a visual picture of how a changing $C$ parameter affects the final fit, via the softening of the margin:
###Code
X, y = make_blobs(n_samples=100, centers=2,
random_state=0, cluster_std=0.8)
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for axi, C in zip(ax, [10.0, 0.1]):
model = SVC(kernel='linear', C=C).fit(X, y)
axi.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='autumn')
plot_svc_decision_function(model, axi)
axi.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, lw=1, facecolors='none');
axi.set_title('C = {0:.1f}'.format(C), size=14)
###Output
_____no_output_____
###Markdown
The optimal value of the $C$ parameter will depend on your dataset, and should be tuned using cross-validation or a similar procedure (refer back to [Hyperparameters and Model Validation](05.03-Hyperparameters-and-Model-Validation.ipynb)). Example: Face RecognitionAs an example of support vector machines in action, let's take a look at the facial recognition problem.We will use the Labeled Faces in the Wild dataset, which consists of several thousand collated photos of various public figures.A fetcher for the dataset is built into Scikit-Learn:
###Code
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
###Output
['Ariel Sharon' 'Colin Powell' 'Donald Rumsfeld' 'George W Bush'
'Gerhard Schroeder' 'Hugo Chavez' 'Junichiro Koizumi' 'Tony Blair']
(1348, 62, 47)
###Markdown
Let's plot a few of these faces to see what we're working with:
###Code
fig, ax = plt.subplots(3, 5)
for i, axi in enumerate(ax.flat):
axi.imshow(faces.images[i], cmap='bone')
axi.set(xticks=[], yticks=[],
xlabel=faces.target_names[faces.target[i]])
###Output
_____no_output_____
###Markdown
Each image contains [62×47] or nearly 3,000 pixels.We could proceed by simply using each pixel value as a feature, but often it is more effective to use some sort of preprocessor to extract more meaningful features; here we will use a principal component analysis (see [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb)) to extract 150 fundamental components to feed into our support vector machine classifier.We can do this most straightforwardly by packaging the preprocessor and the classifier into a single pipeline:
###Code
from sklearn.svm import SVC
from sklearn.decomposition import RandomizedPCA
from sklearn.pipeline import make_pipeline
pca = RandomizedPCA(n_components=150, whiten=True, random_state=42)
svc = SVC(kernel='rbf', class_weight='balanced')
model = make_pipeline(pca, svc)
###Output
_____no_output_____
###Markdown
For the sake of testing our classifier output, we will split the data into a training and testing set:
###Code
from sklearn.cross_validation import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(faces.data, faces.target,
random_state=42)
###Output
_____no_output_____
###Markdown
Finally, we can use a grid search cross-validation to explore combinations of parameters.Here we will adjust ``C`` (which controls the margin hardness) and ``gamma`` (which controls the size of the radial basis function kernel), and determine the best model:
###Code
from sklearn.grid_search import GridSearchCV
param_grid = {'svc__C': [1, 5, 10, 50],
'svc__gamma': [0.0001, 0.0005, 0.001, 0.005]}
grid = GridSearchCV(model, param_grid)
%time grid.fit(Xtrain, ytrain)
print(grid.best_params_)
###Output
CPU times: user 47.8 s, sys: 4.08 s, total: 51.8 s
Wall time: 26 s
{'svc__gamma': 0.001, 'svc__C': 10}
###Markdown
The optimal values fall toward the middle of our grid; if they fell at the edges, we would want to expand the grid to make sure we have found the true optimum.Now with this cross-validated model, we can predict the labels for the test data, which the model has not yet seen:
###Code
model = grid.best_estimator_
yfit = model.predict(Xtest)
###Output
_____no_output_____
###Markdown
Let's take a look at a few of the test images along with their predicted values:
###Code
fig, ax = plt.subplots(4, 6)
for i, axi in enumerate(ax.flat):
axi.imshow(Xtest[i].reshape(62, 47), cmap='bone')
axi.set(xticks=[], yticks=[])
axi.set_ylabel(faces.target_names[yfit[i]].split()[-1],
color='black' if yfit[i] == ytest[i] else 'red')
fig.suptitle('Predicted Names; Incorrect Labels in Red', size=14);
###Output
_____no_output_____
###Markdown
Out of this small sample, our optimal estimator mislabeled only a single face (Bush’sface in the bottom row was mislabeled as Blair).We can get a better sense of our estimator's performance using the classification report, which lists recovery statistics label by label:
###Code
from sklearn.metrics import classification_report
print(classification_report(ytest, yfit,
target_names=faces.target_names))
###Output
precision recall f1-score support
Ariel Sharon 0.65 0.73 0.69 15
Colin Powell 0.81 0.87 0.84 68
Donald Rumsfeld 0.75 0.87 0.81 31
George W Bush 0.93 0.83 0.88 126
Gerhard Schroeder 0.86 0.78 0.82 23
Hugo Chavez 0.93 0.70 0.80 20
Junichiro Koizumi 0.80 1.00 0.89 12
Tony Blair 0.83 0.93 0.88 42
avg / total 0.85 0.85 0.85 337
###Markdown
We might also display the confusion matrix between these classes:
###Code
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(ytest, yfit)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=faces.target_names,
yticklabels=faces.target_names)
plt.xlabel('true label')
plt.ylabel('predicted label');
###Output
_____no_output_____ |
notebooks/4.1-baselines: LR and DT (domain features + imbalanced dataset).ipynb | ###Markdown
Logistic Regression
###Code
weights = {0:1, 1:10}
lr_clf = LogisticRegression(solver='lbfgs', class_weight=weights, max_iter=2000)
lr_clf.fit(X_train, y_train)
y_true = y_test.values
y_score = lr_clf.predict_proba(X_test)
y_pred = lr_clf.predict(X_test)
evaluate(y_true, y_score[:, 1], y_pred)
plot_precision_recall_curve(y_true, y_score[:, 1])
plot_roc_curve(y_true, y_score[:, 1])
###Output
_____no_output_____
###Markdown
Decision Tree
###Code
weights = {0:1, 1:10}
dt_clf = DecisionTreeClassifier(class_weight=weights)
dt_clf.fit(X_train, y_train)
y_true = y_test.values
y_score = dt_clf.predict_proba(X_test)
y_pred = dt_clf.predict(X_test)
evaluate(y_true, y_score[:, 1], y_pred)
plot_precision_recall_curve(y_true, y_score[:, 1])
plot_roc_curve(y_true, y_score[:, 1])
###Output
_____no_output_____ |
Introduction to Portfolio Construction and Analysis with Python/W2/Efficient Frontier II.ipynb | ###Markdown
Efficient Frontier II
###Code
import pandas as pd
import numpy as np
import ashmodule as ash
%matplotlib inline
%load_ext autoreload
%autoreload 2
ind = ash.get_idx_returns()
er = ash.annualize_rets(ind["1996":"2000"],12)
cov = ind["1996":"2000"].cov()
ind.columns
l = ["Oil","Servs","Fin","Clths"]
weights = np.repeat(1/4,4)
ash.portfolio_return(weights , er[l])
ash.portfolio_vol(weights, cov.loc[l,l])
###Output
_____no_output_____
###Markdown
2 Asset Fronteir
###Code
l = ["Fin", "Oil"]
n_points = 30
weights = [np.array([w,1-w]) for w in np.linspace(0,1,n_points)]
rets = [ash.portfolio_return(w, er[l]) for w in weights]
vols = [ash.portfolio_vol(w, cov.loc[l,l]) for w in weights]
ef = pd.DataFrame({"R": rets , "Vol": vols})
ef.plot.scatter(x="Vol", y = "R",figsize = (15,7), title="Effecint Frontier for Fin and Oil",style = ".-");
l=["Fin","Beer"]
ash.plot_ef2(40, er[l],cov.loc[l,l],figsize=(15,9),color="Green");
###Output
_____no_output_____
###Markdown
Multi-Asset Efficient Frontier
###Code
l = ["Smoke", "Fin", "Games", "Coal"]
ash.plot_ef(25, er[l],cov.loc[l,l], figsize = (15,9));
###Output
_____no_output_____ |
BlackScholesModel/BlackScholes.ipynb | ###Markdown
Black Scholes Model
In this notebook we illustrate the basic properties of the Black Scholes model.
The notebook is structured as follows:
1. Black-Scholes model code
2. Analysis of value function
3. Analysis of Greeks, i.e. sensitivities to model parameters Black-Scholes Model Code
We use a couple of standard Python modules.
###Code
import numpy as np
from scipy.stats import norm
from scipy.optimize import brentq
import plotly.express as px
import plotly.graph_objects as go
###Output
_____no_output_____
###Markdown
As a basic building block we implement the Black formula.
$$
\begin{aligned}
\text{Black}\left(F,K,\nu,\phi\right) &=\phi\,\left[F\,\Phi\left(\phi d_{1}\right)-K\,\Phi\left(\phi d_{2}\right)\right],\\
d_{1,2}&=\frac{\log\left(F/K\right)}{\nu}\pm\frac{\nu}{2}.
\end{aligned}
$$
###Code
def BlackOverK(moneyness, nu, callOrPut):
d1 = np.log(moneyness) / nu + nu / 2.0
d2 = d1 - nu
return callOrPut * (moneyness*norm.cdf(callOrPut*d1)-norm.cdf(callOrPut*d2))
def Black(forward, strike, nu, callOrPut):
if nu<1.0e-12: # assume zero
return np.maximum(callOrPut*(forward-strike),0.0) # intrinsic value
return strike * BlackOverK(forward/strike,nu,callOrPut)
def BlackImpliedVol(price, strike, forward, T, callOrPut):
def objective(nu):
return Black(forward, strike, nu, callOrPut) - price
return brentq(objective,0.01*np.sqrt(T), 1.00*np.sqrt(T), xtol=1.0e-8) / np.sqrt(T)
def BlackVega(strike, forward, sigma, T):
stdDev = sigma*np.sqrt(T)
d1 = np.log(forward/strike) / stdDev + stdDev / 2.0
return forward * norm.pdf(d1) * np.sqrt(T)
###Output
_____no_output_____
###Markdown
Analysis of Value Function
$$
v(s,T) = e^{-rT}\,\text{Black}\left(s\,e^{rT},K,\sigma\sqrt{T},\phi\right),
$$
###Code
def BlackScholesPrice(underlying, strike, rate, sigma, T, callOrPut):
df = np.exp(-rate*T)
nu = sigma*np.sqrt(T)
return df * Black(underlying/df, strike, nu, callOrPut)
###Output
_____no_output_____
###Markdown
We need to specify some sensible model and product parameters.
###Code
r = 0.01 # 1% risk-free rate is a sensible choice in current low-interest rate market environment
sigma = 0.15 # typical values for annualised equity volatility is between 10% - 25%
K = 1.0 # the strike should be in the order of the underlying asset; we will assume S~O(1)
phi = 1.0 # call or put
###Output
_____no_output_____
###Markdown
We want to see the value function for a grid of maturities $[0,T_{end}]$ and underlying risky asset prices $(0, S_{max}]$.
###Code
T = np.linspace(0.0, 2.0, 201)
S = np.linspace(0.01, 2.0, 200)
###Output
_____no_output_____
###Markdown
Now, we can calculate the call option prices.
###Code
v = lambda s, t : BlackScholesPrice(s, K, r, sigma, t, phi)
v_sT = np.array([ v(S,t) for t in T ]).transpose()
print(v_sT.shape)
fig = go.Figure(data=[go.Surface(x=T, y=S, z=v_sT)])
fig.update_layout(
title='Black-Scholes Value Function',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'v',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Analysis of Greeks
Greeks represent sensitivities of the value function with respect to changes in the model parameters. Delta
$$
\Delta_{BS}(s,T)=\frac{d}{ds}v(s,T) = \phi\,\Phi\left(\phi d_{1}\right).
$$
###Code
def BlackScholesDelta(underlying, strike, rate, sigma, T, callOrPut):
moneyness = np.exp(rate*T) * underlying / strike
nu = sigma * np.sqrt(T)
d1 = np.log(moneyness) / nu + nu / 2.0
return callOrPut * norm.cdf(callOrPut * d1)
###Output
_____no_output_____
###Markdown
We calculate the Delta for a range of underlyings and times.
###Code
T = np.linspace(0.01, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
Delta = lambda s, t : BlackScholesDelta(s, K, r, sigma, t, phi)
dv_ds = np.array([ Delta(S,t) for t in T ]).transpose()
print(dv_ds.shape)
# Check Delta via finite differences
eps = 1.0e-4
Delta_FD = lambda s, t : (BlackScholesPrice(s+eps, K, r, sigma, t, phi) - BlackScholesPrice(s-eps, K, r, sigma, t, phi))/2/eps
dv_ds_FD = np.array([ Delta_FD(S,t) for t in T ]).transpose()
print(np.max(np.abs(dv_ds-dv_ds_FD)))
###Output
_____no_output_____
###Markdown
And we plot the resulting sensitivity.
###Code
fig = go.Figure(data=[go.Surface(x=T, y=S, z=dv_ds)])
fig.update_layout(
title='Black-Scholes Delta',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'Delta',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Gamma
$$
\Gamma_{BS} = \frac{d}{ds}\Delta_{BS}(s,T)=\frac{d^{2}}{ds^{2}}v(s,T) = \frac{\Phi'\left(d_{1}\right)}{s\,\sigma\sqrt{T}}.
$$
###Code
def BlackScholesGamma(underlying, strike, rate, sigma, T, callOrPut):
moneyness = np.exp(rate*T) * underlying / strike
nu = sigma * np.sqrt(T)
d1 = np.log(moneyness) / nu + nu / 2.0
return norm.pdf(d1) / underlying / nu
###Output
_____no_output_____
###Markdown
We calculate the Gamma for a range of underlyings and times.
###Code
T = np.linspace(0.1, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
Gamma = lambda s, t : BlackScholesGamma(s, K, r, sigma, t, phi)
d2v_ds2 = np.array([ Gamma(S,t) for t in T ]).transpose()
print(d2v_ds2.shape)
# Check Gamma via finite differences
eps = 1.0e-4
Gamma_FD = lambda s, t : (BlackScholesPrice(s+eps, K, r, sigma, t, phi) - 2 * BlackScholesPrice(s, K, r, sigma, t, phi) + BlackScholesPrice(s-eps, K, r, sigma, t, phi))/eps**2
d2v_ds2_FD = np.array([ Gamma_FD(S,t) for t in T ]).transpose()
print(np.max(np.abs(d2v_ds2 - d2v_ds2_FD)))
fig = go.Figure(data=[go.Surface(x=T, y=S, z=d2v_ds2)])
fig.update_layout(
title='Black-Scholes Gamma',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'Gamma',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Theta
$$
\Theta_{BS}(s,T)=\frac{d}{dT}v(s,T) = \frac{s\,\Phi'\left(d_{1}\right)\,\sigma}{2\,\sqrt{T}}+\phi\,r\,K\,e^{-rT}\,\Phi\left(\phi d_{2}\right)
$$
###Code
def BlackScholesTheta(underlying, strike, rate, sigma, T, callOrPut):
moneyness = np.exp(rate*T) * underlying / strike
nu = sigma * np.sqrt(T)
d1 = np.log(moneyness) / nu + nu / 2.0
d2 = d1 - nu
return underlying * norm.pdf(d1) * sigma / 2 / np.sqrt(T) + \
callOrPut * rate * strike * np.exp(-rate*T) * norm.cdf(callOrPut * d2)
###Output
_____no_output_____
###Markdown
We calculate the Theta for a range of underlyings and times.
###Code
T = np.linspace(0.1, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
Theta = lambda s, t : BlackScholesTheta(s, K, r, sigma, t, phi)
dv_dT = np.array([ Theta(S,t) for t in T ]).transpose()
print(dv_dT.shape)
# Check Theta via finite differences
eps = 1.0e-4
Theta_FD = lambda s, t : (BlackScholesPrice(s, K, r, sigma, t+eps, phi) - BlackScholesPrice(s, K, r, sigma, t-eps, phi))/2/eps
dv_dT_FD = np.array([ Theta_FD(S,t) for t in T ]).transpose()
print(np.max(np.abs(dv_dT - dv_dT_FD)))
fig = go.Figure(data=[go.Surface(x=T, y=S, z=dv_dT)])
fig.update_layout(
title='Black-Scholes Theta',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'Theta',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Black-Scholes PDE
We calculate the linear operator
$$
{\cal L}\left[v\right]=-\frac{dv}{dT}+r\,s\,\frac{dv}{ds}+\frac{1}{2}\,\sigma^{2}\,s^{2}\,\frac{d^{2}v}{ds^{2}}-r\,v.
$$
And verify that ${\cal L}\left[v\right]=0$.
###Code
T = np.linspace(0.1, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
L_v = lambda s, T : -Theta(s,T) + r * s * Delta(s,T) + 0.5 * sigma**2 * s**2 * Gamma(s,T) - r * v(s,T)
L_v_sT = np.array([ L_v(S,t) for t in T ]).transpose()
print(L_v_sT.shape)
fig = go.Figure(data=[go.Surface(x=T, y=S, z=L_v_sT)])
fig.update_layout(
title='Black-Scholes Operator',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'L[v]',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Rho
$$
\varrho_{BS}(s,T)=\frac{d}{dr}v(s,T) = \phi\,K\,T\,e^{-rT}\,\Phi\left(\phi d_{2}\right).
$$
###Code
def BlackScholesRho(underlying, strike, rate, sigma, T, callOrPut):
moneyness = np.exp(rate*T) * underlying / strike
nu = sigma * np.sqrt(T)
d1 = np.log(moneyness) / nu + nu / 2.0
d2 = d1 - nu
return callOrPut * strike * T * np.exp(-rate*T) * norm.cdf(callOrPut * d2)
###Output
_____no_output_____
###Markdown
We calculate the Theta for a range of underlyings and times.
###Code
T = np.linspace(0.01, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
Rho = lambda s, t : BlackScholesRho(s, K, r, sigma, t, phi)
dv_dr = np.array([ Rho(S,t) for t in T ]).transpose()
print(dv_dr.shape)
# Check Rho via finite differences
eps = 1.0e-6
Rho_FD = lambda s, t : (BlackScholesPrice(s, K, r+eps, sigma, t, phi) - BlackScholesPrice(s, K, r-eps, sigma, t, phi))/2/eps
dv_dr_FD = np.array([ Rho_FD(S,t) for t in T ]).transpose()
print(np.max(np.abs(dv_dr - dv_dr_FD)))
fig = go.Figure(data=[go.Surface(x=T, y=S, z=dv_dr)])
fig.update_layout(
title='Black-Scholes Rho',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'Rho',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Vega
$$
\text{Vega}_{BS}(s,T)=\frac{d}{d\sigma}v(s,T) = s\,\Phi'\left(d_{1}\right)\sqrt{T}
$$
###Code
def BlackScholesVega(underlying, strike, rate, sigma, T, callOrPut):
moneyness = np.exp(rate*T) * underlying / strike
nu = sigma * np.sqrt(T)
d1 = np.log(moneyness) / nu + nu / 2.0
return underlying * norm.pdf(d1) * np.sqrt(T)
###Output
_____no_output_____
###Markdown
We calculate the Theta for a range of underlyings and times.
###Code
T = np.linspace(0.01, 2.0, 200)
S = np.linspace(0.01, 2.0, 200)
Vega = lambda s, t : BlackScholesVega(s, K, r, sigma, t, phi)
dv_dsigma = np.array([ Vega(S,t) for t in T ]).transpose()
print(dv_dr.shape)
# Check Vega via finite differences
eps = 1.0e-6
Vega_FD = lambda s, t : (BlackScholesPrice(s, K, r, sigma+eps, t, phi) - BlackScholesPrice(s, K, r, sigma-eps, t, phi))/2/eps
dv_dsigma_FD = np.array([ Vega_FD(S,t) for t in T ]).transpose()
print(np.max(np.abs(dv_dsigma - dv_dsigma_FD)))
fig = go.Figure(data=[go.Surface(x=T, y=S, z=dv_dsigma)])
fig.update_layout(
title='Black-Scholes Vega',
scene = dict(
xaxis = dict(
title = 'T',
),
yaxis = dict(
title = 's',
),
zaxis = dict(
title = 'Vega',
),
),
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____
###Markdown
Implied Volatility Analysis
We add an analysis of market-implied volatilities.
###Code
S0 = 1.0 # initial asset price
T = 1.4
putStrikes = [ 0.60, 0.70, 0.80, 0.90, 1.00 ]
putPrices = [ 0.0642, 0.0943, 0.1310, 0.1761, 0.2286 ]
callStrikes = [ 1.00, 1.10, 1.20, 1.30, 1.40 ]
callPrices = [ 0.2204, 0.1788, 0.1444, 0.1157, 0.0929 ]
###Output
_____no_output_____
###Markdown
We can use strike $K=1$ and put-call parity to calculate the implied risk-free rate $r$,
$$
r = -\frac{\log\left(1+\pi_{BS}\left(C^{put}\right)-\pi_{BS}\left(C^{call}\right)\right)}{T}
$$
###Code
r = - np.log(1 + putPrices[-1] - callPrices[0])/T
r
###Output
_____no_output_____
###Markdown
Next, we can calculate implied volatilities for puts and calls.
###Code
F = np.exp(r*T) * S0
putFwdPrices = [ np.exp(r*T)*p for p in putPrices ]
callFwdPrices = [ np.exp(r*T)*p for p in callPrices ]
putVols = [ BlackImpliedVol(p,K,F,T,-1) for p, K in zip(putFwdPrices, putStrikes) ]
callVols = [ BlackImpliedVol(p,K,F,T,+1) for p, K in zip(callFwdPrices,callStrikes) ]
print(putVols[-1])
print(callVols[0])
sigma = 0.5 * (putVols[-1] + callVols[0])
###Output
_____no_output_____
###Markdown
We calculate the corresponding Black-Scholes model prices.
###Code
bsPut = [ BlackScholesPrice(S0,K,r,sigma,T,-1) for K in putStrikes ]
bsCall = [ BlackScholesPrice(S0,K,r,sigma,T,+1) for K in callStrikes ]
print('Puts:')
for K, P in zip(putStrikes,bsPut):
print(' %4.2f %6.4f' % (K,P))
print('Calls:')
for K, P in zip(callStrikes,bsCall):
print(' %4.2f %6.4f' % (K,P))
###Output
_____no_output_____
###Markdown
Also, we plot the resulting impled volatility smile
###Code
fig = go.Figure()
fig.add_trace(go.Scatter(x=putStrikes, y=putVols, name='put' ))
fig.add_trace(go.Scatter(x=callStrikes, y=callVols, name='call'))
fig.update_layout(
title='Implied Black-Scholes Volatility, T=%.2f' % T,
xaxis_title="Strike K",
yaxis_title="Implied Volatility",
width=1200, height=800, autosize=False,
margin=dict(l=65, r=50, b=65, t=90),
)
fig.show()
###Output
_____no_output_____ |
notebooks/composer/composer.ipynb | ###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.braxlines import experiments
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import mappo
from brax.experimental.braxlines.training import ppo
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
elif mode == 'print_step':
jit_env_reset = jax.jit(env.reset)
jit_env_step = jax.jit(env.step)
state0 = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
state1 = jit_env_step(state0, jnp.zeros((env.action_size,)))
print(f'obs0={state0.obs.shape}')
print(f'obs1={state1.obs.shape}')
print(f'rew0={state0.reward}')
print(f'rew1={state1.reward}')
print(f'action0={(env.action_size,)}')
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [env_descs.py](https://github.com/google/brax/blob/main/brax/experimental/composer/env_descs.py)
#@markdown for more supported `env_name`.
env_name = 'pro_ant_run' # @param ['ant_chase_ma', 'custom_ant_push', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
mode = 'print_step'# @param ['print_step', 'print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
desc_edits = {
'custom_ant_push': {
'components.cap1.reward_fns.goal.scale': 5,
'components.cap1.reward_fns.goal.target_goal': 5,
},
'pro_ant_run': {
'components.agent1.component_params.num_legs': 8,
},
}
desc_edits = desc_edits.get(env_name, {})
env_descs = {
'custom_ant_push':
dict(
components=dict(
ant1=dict(
component='ant',
pos=(0, 0, 0),
),
cap1=dict(
component='singleton',
component_params=dict(size=0.5),
pos=(1, 0, 0),
observers=('root_z_joints',),
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=5,
scale=1,
target_goal=4)),
),
),
edges=dict(
ant1__cap1=dict(
extra_observers=[
dict(observer_type='root_vec', indices=(0, 1)),
],
reward_fns=dict(
dist=dict(reward_type='root_dist', offset=5)),
),)),
}
if env_name in env_descs:
env_desc = env_descs[env_name]
env_fn = composer.create_fn(
env_desc=env_desc, desc_edits=desc_edits)
else:
env_fn = composer.create_fn(env_name=env_name,
desc_edits=desc_edits)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
ppo_lib = mappo if env.metadata.agent_groups else ppo
ppo_params = experiments.defaults.get_ppo_params(
'ant', num_timesteps_multiplier)
train_fn = functools.partial(ppo_lib.train, **ppo_params)
times = [datetime.now()]
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
progress, _, _, _ = experiments.get_progress_fn(
plotpatterns, times, tab=tab, max_ncols=5,
xlim=[0, train_fn.keywords['num_timesteps']],
pre_plot_fn = lambda : clear_output(wait=True),
post_plot_fn = plt.show)
if skip_training:
action_size = env.group_action_shapes if env.metadata.agent_groups else env.action_size
params, inference_fn = ppo_lib.make_params_and_inference_fn(
env.observation_size, action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn, seed=seed,
extra_step_kwargs=False, progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.composer import register_default_components
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
register_default_components()
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [env_descs.py](https://github.com/google/brax/blob/main/brax/experimental/composer/env_descs.py)
#@markdown for more supported `env_name`.
env_name = 'pro_ant_run' # @param ['custom_ant_push', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
mode = 'viewer'# @param ['print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
desc_edits = {
'custom_ant_push': {
'components.cap1.reward_fns.goal.scale': 5,
'components.cap1.reward_fns.goal.target_goal': 5,
},
'pro_ant_run': {
'components.agent1.component_params.num_legs': 8,
},
}
desc_edits = desc_edits.get(env_name, {})
env_descs = {
'custom_ant_push':
dict(
components=dict(
ant1=dict(
component='ant',
pos=(0, 0, 0),
),
cap1=dict(
component='singleton',
component_params=dict(size=0.5),
pos=(1, 0, 0),
observers=('root_z_joints',),
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=5,
scale=1,
target_goal=4)),
score_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
target_goal=4)),
),
),
edges=dict(
ant1__cap1=dict(
extra_observers=[
dict(observer_type='root_vec', indices=(0, 1)),
],
reward_fns=dict(
dist=dict(reward_type='root_dist', offset=5)),
score_fns=dict(dist=dict(reward_type='root_dist')),
),)),
}
if env_name in env_descs:
env_desc = env_descs[env_name]
env_fn = composer.create_fn(
env_desc=env_desc, desc_edits=desc_edits)
else:
env_fn = composer.create_fn(env_name=env_name,
desc_edits=desc_edits)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
# We determined some reasonable hyperparameters offline and share them here.
n = num_timesteps_multiplier
train_fn = functools.partial(
ppo.train,
seed=seed,
num_timesteps=int(50_000_000 * n),
log_frequency=20, reward_scaling=10,
episode_length=1000, normalize_observations=True,
action_repeat=1, unroll_length=5,
num_minibatches=32, num_update_epochs=4,
discounting=0.95, learning_rate=3e-4,
entropy_cost=1e-2, num_envs=2048,
extra_step_kwargs=False, batch_size=1024)
times = [datetime.now()]
plotdata = {}
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
def progress(num_steps, metrics, params):
times.append(datetime.now())
plotkeys = []
for key, v in metrics.items():
assert not jnp.isnan(v), f'{key} {num_steps} NaN'
plotdata[key] = plotdata.get(key, dict(x=[], y=[]))
plotdata[key]['x'] += [num_steps]
plotdata[key]['y'] += [v]
if any(x in key for x in plotpatterns):
plotkeys += [key]
if num_steps > 0:
tab.add(num_steps=num_steps, **metrics)
tab.dump()
clear_output(wait=True)
num_figs = max(len(plotkeys), 2)
fig, axs = plt.subplots(ncols=num_figs, figsize=(3.5 * num_figs, 3))
for i, key in enumerate(plotkeys):
if key in plotdata:
axs[i].plot(plotdata[key]['x'], plotdata[key]['y'])
axs[i].set(xlabel='# environment steps', ylabel=key)
axs[i].set_xlim([0, train_fn.keywords['num_timesteps']])
fig.tight_layout()
plt.show()
if skip_training:
core_env = env_fn()
params, inference_fn = ppo.make_params_and_inference_fn(
core_env.observation_size,
core_env.action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn,
progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer.training import mappo
from brax.experimental.braxlines import experiments
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
from brax.experimental.braxlines import experiments
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(composer.unwrap(env).metadata.config_json)
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
# @title List [registered environments](https://github.com/google/brax/blob/main/brax/experimental/composer/envs)
max_n_env = 10 # @param {'type': 'number'}
env_list = composer.list_env()
print(f'# registered envs = {len(env_list)}, e.g.')
pprint.pprint(env_list[:max_n_env])
#@title Specify an environment
env_name = 'ant_run' # @param ['humanoid_run', 'squidgame', 'sumo', 'follow', 'chase', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
output_path = '' # @param {type: 'string'}
show_params = True # @param {'type':'boolean'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
if show_params:
supported_params, support_kwargs = composer.inspect_env(env_name=env_name)
print(f'supported_params for "{env_name}" =')
pprint.pprint(supported_params)
print(f'support unlisted kwargs? (i.e. **kwargs): {support_kwargs}')
#@title Create a custom env
# @markdown put a `None` or a `Dict` as `env_params`, based on `supported_params` above
env_params = None# @param{'type': 'raw'}
mode = 'viewer'# @param ['print_step', 'print_obs', 'print_sys', 'viewer']
ignore_kwargs = True # @param {'type':'boolean'}
# check supported params
env_params = env_params or {}
composer.assert_env_params(env_name, env_params, ignore_kwargs)
# create env
env_fn = composer.create_fn(env_name=env_name, **env_params)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
ppo_lib = mappo if env.is_multiagent else ppo
ppo_params = experiments.defaults.get_ppo_params(
'ant', num_timesteps_multiplier)
train_fn = functools.partial(ppo_lib.train, **ppo_params)
times = [datetime.now()]
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
progress, _, _, _ = experiments.get_progress_fn(
plotpatterns, times, tab=tab, max_ncols=5,
xlim=[0, train_fn.keywords['num_timesteps']],
pre_plot_fn = lambda : clear_output(wait=True),
post_plot_fn = plt.show)
if skip_training:
action_size = (env.group_action_shapes if
env.is_multiagent else env.action_size)
params, inference_fn = ppo_lib.make_params_and_inference_fn(
env.observation_size, action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn, seed=seed,
extra_step_kwargs=False, progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
batch_size = 0# @param {type: 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=batch_size,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
#@title Plot information of the trajectory
experiments.plot_states(states[1:], max_ncols=5)
plt.show()
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.composer import register_default_components
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
register_default_components()
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [env_descs.py](https://github.com/google/brax/blob/main/brax/experimental/composer/env_descs.py)
#@markdown for more supported `env_name`.
env_name = 'custom_ant_push' # @param ['custom_ant_push', 'ant_run', 'ant_chase', 'ant_push']
mode = 'print_obs'# @param ['print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
desc_edits = {
'components.cap1.reward_fns.goal.scale': 5,
'components.cap1.reward_fns.goal.target_goal': 5,
}
env_descs = {
'custom_ant_push':
dict(
components=dict(
ant1=dict(
component='ant',
pos=(0, 0, 0),
),
cap1=dict(
component='singleton',
component_params=dict(size=0.5),
pos=(1, 0, 0),
observers=('root_z_joints',),
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=5,
scale=1,
target_goal=4)),
score_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
target_goal=4)),
),
),
edges=dict(
ant1__cap1=dict(
extra_observers=[
dict(observer_type='root_vec', indices=(0, 1)),
],
reward_fns=dict(
dist=dict(reward_type='root_dist', offset=5)),
score_fns=dict(dist=dict(reward_type='root_dist')),
),)),
}
if env_name in env_descs:
env_desc = env_descs[env_name]
env_fn = composer.create_fn(
env_desc=env_desc, desc_edits=desc_edits)
else:
env_fn = composer.create_fn(env_name=env_name)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
# We determined some reasonable hyperparameters offline and share them here.
n = num_timesteps_multiplier
train_fn = functools.partial(
ppo.train,
num_timesteps=int(50_000_000 * n),
log_frequency=20, reward_scaling=10,
episode_length=1000, normalize_observations=True,
action_repeat=1, unroll_length=5,
num_minibatches=32, num_update_epochs=4,
discounting=0.95, learning_rate=3e-4,
entropy_cost=1e-2, num_envs=2048,
extra_step_kwargs=False, batch_size=1024)
times = [datetime.now()]
plotdata = {}
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
def progress(num_steps, metrics, params):
times.append(datetime.now())
plotkeys = []
for key, v in metrics.items():
assert not jnp.isnan(v), f'{key} {num_steps} NaN'
plotdata[key] = plotdata.get(key, dict(x=[], y=[]))
plotdata[key]['x'] += [num_steps]
plotdata[key]['y'] += [v]
if any(x in key for x in plotpatterns):
plotkeys += [key]
if num_steps > 0:
tab.add(num_steps=num_steps, **metrics)
tab.dump()
clear_output(wait=True)
num_figs = max(len(plotkeys), 2)
fig, axs = plt.subplots(ncols=num_figs, figsize=(3.5 * num_figs, 3))
for i, key in enumerate(plotkeys):
if key in plotdata:
axs[i].plot(plotdata[key]['x'], plotdata[key]['y'])
axs[i].set(xlabel='# environment steps', ylabel=key)
axs[i].set_xlim([0, train_fn.keywords['num_timesteps']])
fig.tight_layout()
plt.show()
if skip_training:
core_env = env_fn()
params, inference_fn = ppo.make_params_and_inference_fn(
core_env.observation_size,
core_env.action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn,
progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import pprint
import jax
import jax.numpy as jnp
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.composer import register_default_components
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
register_default_components()
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [env_descs.py](https://github.com/google/brax/blob/main/brax/experimental/composer/env_descs.py)
#@markdown for more supported `env_name`.
env_name = 'custom_ant_push' # @param ['custom_ant_push', 'ant_run', 'ant_chase', 'ant_push']
mode = 'print_obs'# @param ['print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
desc_edits = {
'components.cap1.reward_fns.goal.scale': 5,
'components.cap1.reward_fns.goal.target_goal': 5,
}
env_descs = {
'custom_ant_push':
dict(
components=dict(
ant1=dict(
component='ant',
pos=(0, 0, 0),
),
cap1=dict(
component='singleton',
component_params=dict(size=0.5),
pos=(1, 0, 0),
observers=('root_z_joints',),
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=5,
scale=1,
target_goal=4)),
score_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
target_goal=4)),
),
),
edges=dict(
ant1__cap1=dict(
extra_observers=[
dict(observer_type='root_vec', indices=(0, 1)),
],
reward_fns=dict(
dist=dict(reward_type='root_dist', offset=5)),
score_fns=dict(dist=dict(reward_type='root_dist')),
),)),
}
if env_name in env_descs:
env_desc = env_descs[env_name]
env_fn = composer.create_fn(
env_desc=env_desc, desc_edits=desc_edits)
else:
env_fn = composer.create_fn(env_name=env_name)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
# We determined some reasonable hyperparameters offline and share them here.
n = num_timesteps_multiplier
train_fn = functools.partial(
ppo.train,
num_timesteps=int(50_000_000 * n),
log_frequency=20, reward_scaling=10,
episode_length=1000, normalize_observations=True,
action_repeat=1, unroll_length=5,
num_minibatches=32, num_update_epochs=4,
discounting=0.95, learning_rate=3e-4,
entropy_cost=1e-2, num_envs=2048,
extra_step_kwargs=False, batch_size=1024)
times = [datetime.now()]
plotdata = {}
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
def progress(num_steps, metrics, params):
times.append(datetime.now())
plotkeys = []
for key, v in metrics.items():
assert not jnp.isnan(v), f'{key} {num_steps} NaN'
plotdata[key] = plotdata.get(key, dict(x=[], y=[]))
plotdata[key]['x'] += [num_steps]
plotdata[key]['y'] += [v]
if any(x in key for x in plotpatterns):
plotkeys += [key]
if num_steps > 0:
tab.add(num_steps=num_steps, **metrics)
tab.dump()
clear_output(wait=True)
num_figs = max(len(plotkeys), 2)
fig, axs = plt.subplots(ncols=num_figs, figsize=(3.5 * num_figs, 3))
for i, key in enumerate(plotkeys):
if key in plotdata:
axs[i].plot(plotdata[key]['x'], plotdata[key]['y'])
axs[i].set(xlabel='# environment steps', ylabel=key)
axs[i].set_xlim([0, train_fn.keywords['num_timesteps']])
fig.tight_layout()
plt.show()
if skip_training:
core_env = env_fn()
params, inference_fn = ppo.make_params_and_inference_fn(
core_env.observation_size,
core_env.action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn,
progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.composer.training import mappo
from brax.experimental.braxlines import experiments
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
elif mode == 'print_step':
jit_env_reset = jax.jit(env.reset)
jit_env_step = jax.jit(env.step)
state0 = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
state1 = jit_env_step(state0, jnp.zeros((env.action_size,)))
print(f'obs0={state0.obs.shape}')
print(f'obs1={state1.obs.shape}')
print(f'rew0={state0.reward}')
print(f'rew1={state1.reward}')
print(f'action0={(env.action_size,)}')
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [composer/envs](https://github.com/google/brax/blob/main/brax/experimental/composer/envs)
#@markdown for more registered `env_name`'s.
env_name = 'pro_ant_run' # @param ['follow', 'chase', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
env_params = {'num_legs': 2} # @param{'type': 'raw'}
mode = 'viewer'# @param ['print_step', 'print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
# check supported params
env_params = env_params or {}
supported_params = composer.inspect_env(env_name=env_name)
assert all(k in supported_params for k in env_params
), f'invalid {env_params} for {supported_params}'
# create env
env_fn = composer.create_fn(env_name=env_name,
**(env_params or {}))
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 1# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
ppo_lib = mappo if env.is_multiagent else ppo
ppo_params = experiments.defaults.get_ppo_params(
'ant', num_timesteps_multiplier)
train_fn = functools.partial(ppo_lib.train, **ppo_params)
times = [datetime.now()]
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
progress, _, _, _ = experiments.get_progress_fn(
plotpatterns, times, tab=tab, max_ncols=5,
xlim=[0, train_fn.keywords['num_timesteps']],
pre_plot_fn = lambda : clear_output(wait=True),
post_plot_fn = plt.show)
if skip_training:
action_size = (env.group_action_shapes if
env.is_multiagent else env.action_size)
params, inference_fn = ppo_lib.make_params_and_inference_fn(
env.observation_size, action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn, seed=seed,
extra_step_kwargs=False, progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer.training import mappo
from brax.experimental.braxlines import experiments
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
from brax.experimental.braxlines import experiments
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env.unwrapped))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
elif mode == 'print_step':
jit_env_reset = jax.jit(env.reset)
jit_env_step = jax.jit(env.step)
state0 = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
state1 = jit_env_step(state0, jnp.zeros((env.action_size,)))
print(f'obs0={state0.obs.shape}')
print(f'obs1={state1.obs.shape}')
print(f'rew0={state0.reward}')
print(f'rew1={state1.reward}')
print(f'action0={(env.action_size,)}')
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
# @title List registerd environments
#@markdown See [composer/envs](https://github.com/google/brax/blob/main/brax/experimental/composer/envs)
#@markdown for registered `env_name`'s.
env_list = composer.list_env()
print(f'{len(env_list)} registered envs, e.g. {env_list[:5]}...')
#@title Create a custom env
env_name = 'sumo' # @param ['squidgame', 'sumo', 'follow', 'chase', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
env_params = None # @param{'type': 'raw'}
mode = 'viewer'# @param ['print_step', 'print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
# check supported params
env_params = env_params or {}
supported_params, support_kwargs = composer.inspect_env(env_name=env_name)
assert support_kwargs or all(
k in supported_params for k in env_params
), f'invalid {env_params} for {supported_params}'
# create env
env_fn = composer.create_fn(env_name=env_name,
**(env_params or {}))
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
ppo_lib = mappo if env.is_multiagent else ppo
ppo_params = experiments.defaults.get_ppo_params(
'ant', num_timesteps_multiplier)
train_fn = functools.partial(ppo_lib.train, **ppo_params)
times = [datetime.now()]
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
progress, _, _, _ = experiments.get_progress_fn(
plotpatterns, times, tab=tab, max_ncols=5,
xlim=[0, train_fn.keywords['num_timesteps']],
pre_plot_fn = lambda : clear_output(wait=True),
post_plot_fn = plt.show)
if skip_training:
action_size = (env.group_action_shapes if
env.is_multiagent else env.action_size)
params, inference_fn = ppo_lib.make_params_and_inference_fn(
env.observation_size, action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn, seed=seed,
extra_step_kwargs=False, progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
batch_size = 0# @param {type: 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=batch_size,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
#@title Plot information of the trajectory
experiments.plot_states(states[1:-1], max_ncols=5)
plt.show()
###Output
_____no_output_____
###Markdown
Create Environments with Braxlines Composer[Braxlines Composer](https://github.com/google/brax/blob/main/brax/experimental/composer) allows modular composition of Brax environments. Let's try it out! ``` This is formatted as code```[](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/composer.ipynb)
###Code
#@title Colab setup and imports
#@markdown ## ⚠️ PLEASE NOTE:
#@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown.
from datetime import datetime
import functools
import os
import pprint
import jax
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_debug_nans", True)
from IPython.display import HTML, clear_output
import matplotlib.pyplot as plt
try:
import brax
except ImportError:
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
from brax.io import html
from brax.experimental.composer import composer
from brax.experimental.composer import component_editor
from brax.experimental.composer import register_default_components
from brax.experimental.braxlines.common import evaluators
from brax.experimental.braxlines.common import logger_utils
from brax.experimental.braxlines.training import ppo
register_default_components()
if "COLAB_TPU_ADDR" in os.environ:
from jax.tools import colab_tpu
colab_tpu.setup_tpu()
def show_env(env, mode):
if mode == 'print_obs':
pprint.pprint(composer.get_env_obs_dict_shape(env))
elif mode == 'print_sys':
pprint.pprint(env.unwrapped.composer.metadata.config_json)
elif mode == 'print_step':
jit_env_reset = jax.jit(env.reset)
jit_env_step = jax.jit(env.step)
state0 = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
state1 = jit_env_step(state0, jnp.zeros((env.action_size,)))
print(f'obs0={state0.obs.shape}')
print(f'obs1={state1.obs.shape}')
print(f'rew0={state0.reward}')
print(f'rew1={state1.reward}')
else:
jit_env_reset = jax.jit(env.reset)
state = jit_env_reset(rng=jax.random.PRNGKey(seed=0))
clear_output(wait=True)
return HTML(html.render(env.sys, [state.qp]))
#@title Create a custom env
#@markdown See [env_descs.py](https://github.com/google/brax/blob/main/brax/experimental/composer/env_descs.py)
#@markdown for more supported `env_name`.
env_name = 'pro_ant_run' # @param ['ant_chase_ma', 'custom_ant_push', 'pro_ant_run', 'ant_run', 'ant_chase', 'ant_push']
mode = 'viewer'# @param ['print_step', 'print_obs', 'print_sys', 'viewer']
output_path = '' # @param {type: 'string'}
if output_path:
output_path = f'{output_path}/{datetime.now().strftime("%Y%m%d")}'
output_path = f'{output_path}/{env_name}'
print(f'Saving outputs to {output_path}')
desc_edits = {
'custom_ant_push': {
'components.cap1.reward_fns.goal.scale': 5,
'components.cap1.reward_fns.goal.target_goal': 5,
},
'pro_ant_run': {
'components.agent1.component_params.num_legs': 8,
},
}
desc_edits = desc_edits.get(env_name, {})
env_descs = {
'custom_ant_push':
dict(
components=dict(
ant1=dict(
component='ant',
pos=(0, 0, 0),
),
cap1=dict(
component='singleton',
component_params=dict(size=0.5),
pos=(1, 0, 0),
observers=('root_z_joints',),
reward_fns=dict(
goal=dict(
reward_type='root_goal',
sdcomp='vel',
indices=(0, 1),
offset=5,
scale=1,
target_goal=4)),
),
),
edges=dict(
ant1__cap1=dict(
extra_observers=[
dict(observer_type='root_vec', indices=(0, 1)),
],
reward_fns=dict(
dist=dict(reward_type='root_dist', offset=5)),
),)),
}
if env_name in env_descs:
env_desc = env_descs[env_name]
env_fn = composer.create_fn(
env_desc=env_desc, desc_edits=desc_edits)
else:
env_fn = composer.create_fn(env_name=env_name,
desc_edits=desc_edits)
env = env_fn()
show_env(env, mode)
#@title Training the custom env
num_timesteps_multiplier = 3# @param {type: 'number'}
seed = 0 # @param{type: 'integer'}
skip_training = False # @param {type: 'boolean'}
log_path = output_path
if log_path:
log_path = f'{log_path}/training_curves.csv'
tab = logger_utils.Tabulator(output_path=log_path,
append=False)
# We determined some reasonable hyperparameters offline and share them here.
n = num_timesteps_multiplier
train_fn = functools.partial(
ppo.train,
seed=seed,
num_timesteps=int(50_000_000 * n),
log_frequency=20, reward_scaling=10,
episode_length=1000, normalize_observations=True,
action_repeat=1, unroll_length=5,
num_minibatches=32, num_update_epochs=4,
discounting=0.95, learning_rate=3e-4,
entropy_cost=1e-2, num_envs=2048,
extra_step_kwargs=False, batch_size=1024)
times = [datetime.now()]
plotdata = {}
plotpatterns = ['eval/episode_reward', 'eval/episode_score']
def progress(num_steps, metrics, params):
times.append(datetime.now())
plotkeys = []
for key, v in metrics.items():
assert not jnp.isnan(v), f'{key} {num_steps} NaN'
plotdata[key] = plotdata.get(key, dict(x=[], y=[]))
plotdata[key]['x'] += [num_steps]
plotdata[key]['y'] += [v]
if any(x in key for x in plotpatterns):
plotkeys += [key]
if num_steps > 0:
tab.add(num_steps=num_steps, **metrics)
tab.dump()
clear_output(wait=True)
num_figs = max(len(plotkeys), 2)
fig, axs = plt.subplots(ncols=num_figs, figsize=(3.5 * num_figs, 3))
for i, key in enumerate(plotkeys):
if key in plotdata:
axs[i].plot(plotdata[key]['x'], plotdata[key]['y'])
axs[i].set(xlabel='# environment steps', ylabel=key)
axs[i].set_xlim([0, train_fn.keywords['num_timesteps']])
fig.tight_layout()
plt.show()
if skip_training:
core_env = env_fn()
params, inference_fn = ppo.make_params_and_inference_fn(
core_env.observation_size,
core_env.action_size,
normalize_observations=True)
inference_fn = jax.jit(inference_fn)
else:
inference_fn, params, _ = train_fn(
environment_fn=env_fn,
progress_fn=progress)
print(f'time to jit: {times[1] - times[0]}')
print(f'time to train: {times[-1] - times[1]}')
print(f'Saved logs to {log_path}')
#@title Visualizing a trajectory of the learned inference function
eval_seed = 0 # @param {'type': 'integer'}
env, states = evaluators.visualize_env(
env_fn=env_fn, inference_fn=inference_fn,
params=params, batch_size=0,
seed = eval_seed, output_path=output_path,
verbose=True,
)
HTML(html.render(env.sys, [state.qp for state in states]))
###Output
_____no_output_____ |
Day3/titanic-dataset-data-cleansing.ipynb | ###Markdown
***Data Cleansing ON TITANIC DATASET***
###Code
# Importing Libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# Loading the data
df = pd.read_csv('/kaggle/input/titanic/train.csv')
df.head()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 PassengerId 891 non-null int64
1 Survived 891 non-null int64
2 Pclass 891 non-null int64
3 Name 891 non-null object
4 Sex 891 non-null object
5 Age 714 non-null float64
6 SibSp 891 non-null int64
7 Parch 891 non-null int64
8 Ticket 891 non-null object
9 Fare 891 non-null float64
10 Cabin 204 non-null object
11 Embarked 889 non-null object
dtypes: float64(2), int64(5), object(5)
memory usage: 83.7+ KB
###Markdown
Just by looking at the data I can tell that I have some missing data in my *Age*, *Embarked* and *Cabin* column. Lets see how much of the data is actually missing
###Code
pd.DataFrame(df.isna().sum().sort_values(ascending=False)/len(df)*100)
###Output
_____no_output_____
###Markdown
Now I can tell that in column - Cabin, Age and Embarked we have 77.10%, 19.86% and 0.22% of missing data respectively. ***brief description of each column*** PassengerId -- passenger identifiation unique ID Survived -- Survivor flag (0 = dead, 1 = survived) Pclass -- ticket class: 1 = 1st Class 2 = 2nd Class 3 = 3rd Class Name -- as is Sex -- as is Age -- as is SibSp -- number of siblings/spouses on Titanic Parch -- number of parents/children on Titanic Ticket -- ticket number Fare -- price Cabin -- Cabin number Embarked -- port of departure S = Southampton C = Cherbourg Q = Queenstown
###Code
# Number of people survived vs not servived
plt.figure(figsize=(16,8))
df["Survived"].value_counts().plot.bar()
plt.show
non_survived_passenger = round(df["Survived"].value_counts()[0]/len(df)*100,3)
survived_passenger = round(df["Survived"].value_counts()[1]/len(df)*100,3)
print(f"Percentage of passengers survived: {survived_passenger}%")
print(f"Percentage of passengers who could not survived: {non_survived_passenger}%")
#Maximum and Minimun age of the passengers
print(f"Maximum age is: {df['Age'].max()}")
print(f"Minimum age is: {df['Age'].min()}")
#Univariate Analysis
df.plot(kind="scatter",x="Age",y="Survived")
plt.title("AGE VS SURVIVED")
plt.show()
###Output
_____no_output_____
###Markdown
I did not see any correlations between Age and Survived columns
###Code
df.plot(kind="scatter", x="Fare", y="Survived")
plt.title("FARE VS SURVIVED")
plt.show()
plt.figure(figsize=(16,8))
plt.pie(df["SibSp"].value_counts().values, autopct="%1.0f%%", labels=[0,1,2,3,4,5,6])
plt.show()
df['Parch'].value_counts().plot.bar()
plt.figure(figsize=(16,8))
sns.histplot(df['Fare'])
plt.show()
plt.figure(figsize=(16,8))
df["Sex"].value_counts().plot.bar()
plt.show()
plt.figure(figsize=(16,8))
df["Embarked"].value_counts().plot.bar()
plt.show()
###Output
_____no_output_____
###Markdown
***Preparing the dataset for machine learning***Seprating numerical and categorical values. Performing preprocessing operations.
###Code
# Convert the parent children and sibling spouse columns into a float ones
# because they don't show a categorial value.
df['Parch'] = df['Parch'].astype(float)
df['SibSp'] = df['SibSp'].astype(float)
features = df.drop("Survived", axis=1)
labels = df["Survived"].values
num_cols = [col for col in features.columns if features[col].dtype in [float]]
print(num_cols)
# Checking for the cardinal value which should be less than 10
cat_cols = [col for col in features.columns if col not in num_cols and df[col].unique().shape[0]<10]
print(cat_cols)
df[cat_cols].head()
df[num_cols].head()
###Output
_____no_output_____
###Markdown
Columns with missing values are-1. Age1. Cabin1. Embarked
###Code
# Imputating numerical columns
# Taking the average value for Age column
df['Age'].mean()
features['Age'].mean()
features[num_cols]
# Option 1
# using fill na
features['Age'].fillna(features['Age'].mean()).values
features['Age'].fillna(features['Age'].mean()).isna().sum()
# option 2 (Prefered)
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
imputer = SimpleImputer(strategy="mean")
imputer.fit_transform(features[num_cols])
# Making a numerical data pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
("imputer",SimpleImputer(strategy='mean')),
("scaler",StandardScaler())
])
num_pipeline.fit_transform(features[num_cols])
###Output
_____no_output_____
###Markdown
So, We have prepared the numerical data. Lets move towards the categorical dataNote- There are no library for imputating categorical data
###Code
# Lets again observe the categorical data
features[cat_cols].head()
features[cat_cols].isna().sum()
###Output
_____no_output_____
###Markdown
Only two values are missing from the column "Embarked"
###Code
features["Embarked"].value_counts()
# From above we can see that the most frequent value here is "S".
# So let's fill that value in our 2 missing "Embarked" rows
features["Embarked"].fillna("S").isna().sum()
# Creating a custom imputer
from sklearn.base import BaseEstimator, TransformerMixin
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X], index=X.columns)
return self
def transform(self,X,y=None):
return X.fillna(self.most_frequent)
# One hot encoding
from sklearn.preprocessing import OneHotEncoder
cat_pipeline = Pipeline([
('cat_imputer',MostFrequentImputer()),
#('one_hot',OneHotEncoder(sparse=False))
('one_hot',OneHotEncoder())
])
cat_pipeline.fit_transform(features[cat_cols])
X_num = num_pipeline.fit_transform(features[num_cols])
X_cat = cat_pipeline.fit_transform(features[cat_cols]).toarray()
X_final = np.c_[X_num,X_cat]
print(X_final[:5])
X_final.shape
###Output
_____no_output_____ |
labs/lab4/lab4.ipynb | ###Markdown
DSCI 572 Lab 4
###Code
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
To install scikit-image, use```conda install -c conda-forge scikit-image```or ```pip install scikit-image```
###Code
from skimage.color import rgb2gray
from skimage.transform import resize
plt.rcParams['font.size'] = 16
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras import utils
from tensorflow.keras.applications.inception_v3 import InceptionV3
###Output
_____no_output_____
###Markdown
Instructionsrubric={mechanics:20}Follow the [general lab instructions](https://ubc-mds.github.io/resources_pages/general_lab_instructions/). Exercise 1: convolutionsFor each of the filters given below, convolve the given image (or a different image of your choice) with the given filter and discuss why the results look the way they do. You can perform 2D convolutions using [`scipy.signal.convolve2d`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.convolve2d.html).The suggested image size is around 100x100 pixels; if the image is too big, it will be hard to see the changes by eye using the very small filters given below. If you want to make an image smaller, try [scipy.misc.imresize](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.misc.imresize.html). This will be a lot faster than seam carving :)Note: depending on your versions of various packages, you might get warnings when you run the code. It's OK.
###Code
def preprocess_image(filename):
img = plt.imread(filename) # read in the image
img = resize(img, (100,100), mode='reflect') # resize it if you want
return rgb2gray(img) # make it grayscale
def show_conv(img, filt):
plt.figure(figsize=(8,16))
plt.subplot(1,2,1)
plt.imshow(img, cmap='gray')
plt.xticks(())
plt.yticks(())
plt.title("original")
I_filt = convolve2d(img,filt, boundary='symm', mode='same')
I_filt = np.maximum(0, I_filt) # set negative values to 0, for visualization purposes
I_filt = np.minimum(1, I_filt) # set values greater than 1 to 1, for visualization purposes
plt.subplot(1,2,2)
if np.sum(filt) == 0: # a trick to make the images easier to see, not part of the "math"
plt.imshow(I_filt/np.max(I_filt), cmap='gray')
else:
plt.imshow(I_filt, cmap='gray')
plt.xticks(())
plt.yticks(())
plt.title("filtered")
return I_filt
img = preprocess_image("milad_cropped.png")
###Output
_____no_output_____
###Markdown
**Example** (you don't need to do this one)
###Code
ft = 0.1*np.ones(10)[None]
print(ft.shape)
print(ft)
res = show_conv(img, ft)
###Output
(1, 10)
[[0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1]]
###Markdown
**Example answer:** The filter is a horizontal bar all containing all $0.1$s. Therefore I would expect a blurring in the horizontal direction, meaning the _vertical_ edges get blurred (because these are the ones that change rapidly in the horizontal direction). This seems to be happening in the result. 1(a)rubric={reasoning:5}
###Code
ft = 0.1*np.ones(10)[:,None]
print(ft.shape)
print(ft)
res = show_conv(img, ft)
###Output
_____no_output_____
###Markdown
1(b)rubric={reasoning:5}
###Code
ft = np.zeros((5,5))
ft[2,2] = 1
print(ft.shape)
print(ft)
res = show_conv(img, ft)
###Output
_____no_output_____
###Markdown
1(c)rubric={reasoning:5}
###Code
ft = 0.01*np.ones((10,10))
print(ft.shape)
res = show_conv(img, ft)
###Output
_____no_output_____
###Markdown
1(d)rubric={reasoning:5}
###Code
ft = -np.ones((3,3))/8
ft[1,1] = 1
print(ft.shape)
print(ft)
res6 = show_conv(img, ft)
###Output
_____no_output_____
###Markdown
(optional) 1(e)rubric={reasoning:1}Earlier in this course we talked about gradients and numerical differentiation. Think about part (d) above: does this have anything to do with the topics from earlier on? Can you relate these edge detection operations to "derivatives" or "gradients"?Also, by the way, back in the seam carving lab of DSCI 512 we gave you a function that calculated the "energy" of an image, and we then looked for low energy seams. Here's the code we gave you:
###Code
from scipy.ndimage.filters import convolve
def energy(image):
dy = np.array([-1, 0, 1])[:,None,None]
dx = np.array([-1, 0, 1])[None,:,None]
energy_img = convolve(image, dx)**2 + convolve(image, dy)**2
return np.sum(energy_img, axis=2)
###Output
_____no_output_____
###Markdown
(There's no particular reason I switched from [`scipy.ndimage.filters.convolve`](https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.convolve.html) to [`scipy.signal.convolve2d`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.convolve2d.html); they perform the same function for our purposes.) I thought you might enjoy looking back at this formerly mysterious code with your newfound knowledge. And it's also a bit of a hint: the seam carving energy function looked for "edges" or "changes" or ... derivatives! The above actually calculates the magnitude squared of the "gradient" at every point. The whole thing should make sense now as well -- when seam carving we wanted to remove pixels for which there wasn't much going on in the immediate vicinity. Exercise 2. Convolutional networks for MNISTSorry to continue with MNIST so long. It's just _THE_ classic data set for this stuff. Below is some code that trains a convnet on MNIST. The code is adapted from the book [Deep Learning with Python](https://machinelearningmastery.com/deep-learning-with-python/) with permission from the author.
###Code
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][channels][width][height]
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = utils.to_categorical(y_train)
y_test = utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# take a subset of the data for speed
subset_size = 10000
X_train = X_train[:subset_size]
y_train = y_train[:subset_size]
# define a simple CNN model
def build_mnist_CNN():
mnist_model = Sequential()
mnist_model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'))
mnist_model.add(MaxPooling2D(pool_size=(2, 2)))
mnist_model.add(Dropout(0.2))
mnist_model.add(Flatten())
mnist_model.add(Dense(128, activation='relu'))
mnist_model.add(Dense(num_classes, activation='softmax'))
# Compile model
mnist_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return mnist_model
mnist_model = build_mnist_cnn()
# Fit the model
mnist_model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=256)
# Final evaluation of the model
scores = mnist_model.evaluate(X_test, y_test, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))
###Output
_____no_output_____
###Markdown
2(a)rubric={reasoning:15}Run the code above. How does it compare to your fully-connected ("Dense") neural net from lab 3? Discuss in 2-3 sentences. (Keep in mind that here we're only using a subset of the training data for speed.) (optional) 2(b)rubric={reasoning:1}Let's assess what happens if we permute the rows of the images (both both training and testing). Below we permute the images, retrain the network, and re-evaluate the network. The accuracy is now lower. But we used the same data, just shuffled - can you explain why this operation hurt the accuracy?
###Code
perm = np.random.permutation(X_train.shape[1])
perm
n_plots = 3
for i in range(n_plots):
ind = np.random.randint(X_train.shape[0])
plt.subplot(2,2,1)
plt.imshow(X_train[ind,...,0], cmap='gray');
plt.title("Original");
plt.subplot(2,2,2)
plt.imshow(X_train[ind,perm,:,0], cmap='gray');
plt.title("Permuted");
plt.show()
###Output
_____no_output_____
###Markdown
Above: this is what a permuted training example looks like, with its rows shuffled.
###Code
mnist_model_perm = build_mnist_CNN()
# Fit the model
mnist_model_perm.fit(X_train[:,perm], y_train, validation_data=(X_test[:,perm], y_test), epochs=10, batch_size=256)
# Final evaluation of the model
scores = mnist_model_perm.evaluate(X_test[:,perm], y_test, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))
###Output
_____no_output_____
###Markdown
2(c)rubric={reasoning:30}You will now deploy Keras/TensorFlow on the cloud using [Kaggle Kernels](https://www.kaggle.com/kernels). This will allow you to train on a GPU and assess the benefits of training neural networks on GPUs. Kaggle Kernels offers 30 hours of free GPU usage per account. This should be much more than adequate for this lab.Note: last year we used [Google Colab](https://colab.research.google.com/) instead of Kaggle Kernels. That would have been fine for this exercise - they are roughly equivalent. But later in the lab, when we want to access a Kaggle dataset, Kaggle Kernels are way more convenient! (Furthermore... two years ago we used Amazon AWS and that was truly a huge hassle because they wouldn't recognize your @alumni.ubc.ca email addresses as "student email addresses".) Follow these steps:1. Save this Jupyter notebook so that it contains your latest work. Also push it to GitHub to be safe.2. Go to https://www.kaggle.com/kernels3. Make an account if you don't have one4. Select New Notebook7. Create8. File->Upload notebook9. Upload this notebook itself, lab4.ipynb, which you just saved.5. On the right-hand side, go to Settings.1. Make sure Internet is enabled.1. Make sure GPU is enabled.**SUGGESTION:** once you're done all your work on Kaggle (which means this exercise and the next one), you can download the notebook from Kaggle and overwrite your local version. That way any work you did on Kaggle won't be lost. (They allow working directly off a notebook on GitHub, but that feature won't work for us since we're using github.ubc.ca.) Now, run the same MNIST experiment as above but on a Kaggle Kernel with the GPU active. 1. How much faster is it (as a ratio) to run the exact same code on the GPU vs. your laptop? 2. Notice the code above takes a subset of 10,000 training examples for speed. With the speed of the GPU, you should now use the full 60,000 training examples on AWS. Report your performance after 10 epochs when training on the full data set. How does it compare to the validation error you were able to get on your local machine (which presumably required using the smaller training set to run in reasonable time)? 3. Again, compare to the fully connected network from lab 3. Exercise 3: Transfer learningIn this exercise we will work on the concept of _transfer learning_, in which we'll use a model trained on one task as a starting point for learning to perform another task. A natural question is, why is transfer learning helpful to us? Why can't we just train a model with the second task's objectives from the beginning? A key motivation is the difficulty in obtaining labeled data: ususally we need a whole lot of data in order to solve complex problems, and it can be hard to collect the data. (Another motivation is the time and effort -- both computational time and human time -- needed to train the original model. Someone did the work already, so we don't have to.)In this exercise we'll apply transfer learning to fine-grained image classification. Here, the goal is to recognize different subclasses within a higher-level class. In our case, the higher level question could be, "Is this a dog?" and the fine-grained question is, "What breed of dog is this?"We will use the [Kaggle dog breed identification](https://www.kaggle.com/c/dog-breed-identification/data) dataset. In the dataset, each dog image is labeled according to one of 120 breeds. We'll start with a pre-trained model that was already trained on a more high-level image classification task, namely the famous [ImageNet dataset](http://www.image-net.org/). You can see some sample ImageNet images [here](http://image-net.org/explore?wnid=n04516672).We'll consider three approaches to the dog breed classification problem:1. No transfer learning, just end-to-end training of a CNN directly for the dog breed classification task.2. Use the pre-trained model as a feature extractor; then, add new layers in order to train it with the dog-breed dataset.3. Some fine tuning of the weights of the pre-trained model (instead of freezing them all and only adding new layers, is in approach 2).Attribution: In designing this exercise, we took some inspiration from [here](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html). But I think our version is more interesting because the classes in our new task are not part of the original task. Preliminaries I am assuming you already have your Kaggle Kernel set up as in the previous exercise, with the GPU and Internet enabled. Next, you will need to add the dataset to your Kaggle Kernel. (FYI: this is the part that is so much easier with Kaggle Kernels than Google Colab, where we had to install the Kaggle API on the Colab instance, set up key-based authentication, and then upload many GB worth of data from one cloud to the other, which turned out to work fine on ubcsecure wifi but not on eduroam wifi... lessons learned!)- Go to https://www.kaggle.com/c/dog-breed-identification/rules, make sure you're signed in to Kaggle, and accept the competition rules.- In your Kaggle Kernel, press "+ Add Data" at the upper-right.- From the tabs at the top, select "Competition Data" (do not skip this step!)- Search for "dog breed identification" in the search box. It should be the first result.- Press "Add". Note: this will cause your kernel to restart.- When asked if you want code to load the data, you can select "No" - I already have the code for you in this notebook, below. What you should doAs with the previous exercise, you should do this on the GPU on Kaggle. Your task for now is to read along and, **whenever there are code cells below, you should run them as you go along.** There will be some questions interspersed in the document, **which you should answer**. Next, we take only the first 2000 samples from the original dataset. We want to simulate having only a small labeled dataset available to us, and see the effect of transfer learning.
###Code
data = pd.read_csv('../input/dog-breed-identification/labels.csv')
data = data[:2000]
data['image_path'] = data.apply( lambda row: (os.path.join("../input/dog-breed-identification/train", row["id"] + ".jpg") ), axis=1)
data.head()
###Output
_____no_output_____
###Markdown
Above: you can see some of the breeds that we're predicting.
###Code
target_labels = data['breed']
total_classes = len(set(target_labels))
print("number of dog breeds:", total_classes)
# read images from the image directory.
images = np.array([img_to_array(
load_img(img, target_size=(256,256))
) for img in data['image_path'].values.tolist()])
images.shape
###Output
_____no_output_____
###Markdown
Above: we have 2000 images, each of size $256 \times 256$ and with 3 colour channels.
###Code
images = images.astype('float32')/255.0
###Output
_____no_output_____
###Markdown
Above: it's very important to scale the images!
###Code
plt.imshow(images[0]);
plt.grid(True);
plt.xticks([]);
plt.yticks([]);
plt.title("Breed = " + target_labels[0]);
###Output
_____no_output_____
###Markdown
Above: this is a sample image from the dog breed data set.
###Code
X_train, X_valid, y_train, y_valid = train_test_split(images, target_labels,
stratify=np.array(target_labels),
random_state=42)
print(X_train.shape)
print(X_valid.shape)
###Output
_____no_output_____
###Markdown
3(a)rubric={reasoning:10}Before we start, do some EDA to assess whether there is serious class imbalance in the training data. What training accuracy would you get with `DummyClassifier`? Briefly discuss your results. 3(b)rubric={reasoning:5}How many training examples do we have per breed of dog, roughly? In the context of other classification tasks we've done in MDS, do you consider this to be a lot or a little? Next, we do the one-hot encoding.
###Code
Y_train = pd.get_dummies(y_train.reset_index(drop=True)).values
Y_valid = pd.get_dummies(y_valid.reset_index(drop=True)).values
print(Y_train.shape)
print(Y_valid.shape)
# Note: it would be better to use keras.utils.to_categorical, or something else like that,
# just in case one of the classes is absent in one of the two sets.
# But this works for now.
###Output
_____no_output_____
###Markdown
Approach 1Now, we try Approach 1, which is training an end-to-end CNN on the dog breed classification task.
###Code
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3)))
model.add(Activation('relu')) # this is just different syntax for specifying the activation function
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(total_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, Y_train, epochs=10, validation_data=(X_valid, Y_valid))
# FYI: it's often a good idea to save your weights after training or during training.
# But you don't have to here.
# model.save_weights('my_conv_net.h5')
###Output
_____no_output_____
###Markdown
3(c)rubric={reasoning:1}What do you think of the results? Are you impressed? Approach 2Here we load a pre-trained model and add some layers on top. The syntax is not what you're used to - that's OK, don't worry about it.
###Code
# Get the InceptionV3 model trained on the ImageNet data set
base_inception = InceptionV3(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
###Output
_____no_output_____
###Markdown
Note the `include_top=False`. This throws away the last layer. It wasn't useful to us anyway. ImageNet has 1000 classes, but we're not interested in those classes. Another way to think of it is that the original model is a crazy feature extractor plus logistic regression for the 1000 ImageNet classes. We are using the feature extractor and discarding the logistic regression part.
###Code
top_block = base_inception.output
top_block = GlobalAveragePooling2D()(top_block) # pool over height/width to reduce number of parameters
top_block = Dense(256, activation='relu')(top_block) # add a Dense layer
predictions = Dense(total_classes, activation='softmax')(top_block) # add another Dense layer
model_transfer = Model(inputs=base_inception.input, outputs=predictions)
###Output
_____no_output_____
###Markdown
Above: the syntax is not what you're used to - that's OK, don't worry about it. If you want to know more, see [this documentation](https://keras.io/models/model/). However, at a high level we're grabbing the base model, doing some pooling, and then adding two new dense layers at the top.
###Code
for layer in base_inception.layers:
layer.trainable = False
###Output
_____no_output_____
###Markdown
Above: this is a key step. We "freeze" the layers of the base model, so that only our two new Dense layers at the top are trainable. That means we only update the weights in the new top layers - all the other weights (the ones from the base model) are fixed ("frozen") during training.
###Code
model_transfer.compile(Adam(lr=.001), loss='categorical_crossentropy', metrics=['accuracy'])
model_transfer.summary() # run me if you dare
###Output
_____no_output_____
###Markdown
Above: that's a lot of layers!
###Code
history = model_transfer.fit(X_train, Y_train, validation_data=(X_valid, Y_valid), epochs=10)
###Output
_____no_output_____
###Markdown
3(d)rubric={reasoning:1}How does this result compare to the "from scratch" CNN? Approach 3Below, we un-freeze the last "15" layers, which is really only the last one or two layers, since the list of Keras layer objects doesn't really correspond to our idea of a layer (see `model.summary()`).
###Code
for i, layer in enumerate(reversed(model_transfer.layers)):
layer.trainable = True
# print(layer)
if i > 15:
break
# compile the model with a SGD/momentum optimizer and a very slow learning rate.
model_transfer.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# fine-tune the unfrozen layers
history = model_transfer.fit(X_train, Y_train, validation_data=(X_valid, Y_valid), epochs=10)
###Output
_____no_output_____
###Markdown
(optional) 3(e)rubric={reasoning:1}Un-freezing some of the layers seems to have a small effect here. Was it actually useful at all, or could we have achieved the same results by just training our top layers for more epochs? 3(f)rubric={reasoning:5}In Lab 3 we noticed that unlike scikit-learn's `fit`, Keras's `fit` doesn't re-initialize the weights, but rather continues on from where you were. In the above code, we benefitted from this. Briefly describe how/why this behaviour was useful to us. 3(g)rubric={reasoning:10}Brainstorm 3 other applications of this type of transfer learning, where you use a pre-trained network plus some modifications. In each case, what is the original task and what is the new task? (It's OK if you don't actually have access to a pre-trained network to do the original task; we're just brainstorming here.) (optional) 3(h)rubric={reasoning:3}There are two perspectives on what we did in Approach 2: one is that we froze most of the layers and just fine-tuned the last layers. The other perspective is that we used a pre-trained feature extractor and then just used a simple model on top. In the above we added 2 layers on top, but if we added just one layer on top then it would just be a softmax logistic regression. Following this second perspective, can you get reasonable results by chaining together the feature extractor and a multi-class scikit-learn `LogisticRegression`? Perhaps this would be a good use case for a scikit-learn `Pipeline`? WARNING: I have not tried this myself, so there is a chance things will go wrong. If you get something to work, please let me know - I'm curious! (You are now done with your Kaggle Kernel. If you were editing the file there, you should download it to your local machine before closing the Kaggle Kernel!) Exercise 4: Pondering 4(a) rubric={reasoning:10}When we previously worked on the handwritten digits dataset, we did something quite silly: we "flattened" images into vectors; for example $28\times 28$ MNIST images became vectors of length $28\times 28 = 784$. This is arguably insane! One reason it's insane is that we were completely discarding the "spatial information" contained in the image and just pretended we had 784 different features, whereas convnets preserve the 2D structure and take 2D convolutions. But there is another, related reason it's a bad idea to just flatten the images... what would go wrong if we tried to use fully connected nets on larger images, like $1000 \times 1000$ pixels? 4(b)rubric={reasoning:10}For each of the following, would increasing this quantity typically increase, decrease, or have no effect on the number of parameters of the model? 1. Dropout probability, e.g. `0.2`2. Filter size, e.g. `(5,5)`3. Number of filters, e.g. `32` 4(c)rubric={reasoning:10}For each of the following, would increasing this quantity typically increase, decrease, or have no effect on the training error? No need to explain. 1. Dropout probability, e.g. `0.2`2. Filter size, e.g. `(5,5)`3. Number of filter, e.g. `32` 4(d)rubric={reasoning:15}What are the pros/cons of neural nets, vs. approaches previously learned (for both regression and classification)? Choose one method from a previous course (561, 571, 573) and compare it with what you've done in deep learning. Write a paragraph summarizing the results. -----------------All the rest are optional; if you want to be done, you're done! (optional) 4(e) rubric={reasoning:1}The code below shows that the MNIST model from Exercise 2 has 592,074 parameters. Explain where this number comes from by going layer by layer and accounting for all the parameters.
###Code
mnist_model.summary()
###Output
_____no_output_____
###Markdown
(optional) 4(f)rubric={reasoning:1}Consider this CNN architecture:
###Code
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
model.summary()
###Output
_____no_output_____
###Markdown
Now, we remove (comment out) pooling from the _first_ convolutional layer:
###Code
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
model.summary()
###Output
_____no_output_____
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 60.32 cm
Rear distance: 79.41 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[180]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[540]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 89.86 cm
Right distance: 96.95 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
rc_utils.draw_circle(image, (radius, radius), (0, 255, 0), CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
new_scan = scan[scan < max_range]
for angle in range(len(new_scan)):
if new_scan[angle] != 0:
x = round(radius + (new_scan[angle] * np.cos(np.radians(angle / 2))))
y = round(radius - (new_scan[angle] * np.sin(np.radians(angle / 2))))
if x == rc_utils.clamp(x, 0, radius * 2 - 1) and y == rc_utils.clamp(y, 0, radius * 2 - 1):
rc_utils.draw_circle(image, (y, x), (0, 0, 255), radius=1)
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for coord in highlighted_samples:
x = round(radius - (coord[1] * np.sin(np.radians(coord[0]))))
y = round(radius + (coord[1] * np.cos(np.radians(coord[0]))))
if x == rc_utils.clamp(x, 0, radius * 2 - 1) and y == rc_utils.clamp(y, 0, radius * 2 - 1):
rc_utils.draw_circle(image, (x, y), (255, 100, 100), radius=2)
image = np.rot90(image, -3)
image = np.flip(image, axis=1)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
scan = rc.lidar.get_samples_async()
show_lidar(scan)
# rc.display.show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
show_lidar(scan, highlighted_samples=[(0, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
window = [round((angle - round(window_angle/2)) * 2), round((angle + round(window_angle/2)) * 2)]
scan = np.take(scan, range(window[0], window[1]), mode="wrap")
new_scan = scan[scan != 0.0]
if len(new_scan) == 0:
return 0
np.sum(new_scan)
return np.sum(new_scan) / len(new_scan)
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
scan = rc.lidar.get_samples_async()
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 78.31 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
scan = rc.lidar.get_samples_async()
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 60.31 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
window = list(window)
for angle_index in range(len(window)):
window[angle_index] = window[angle_index] % 360
scan = (scan - 0.01) % 1000000
samples1_index = round(window[0] * len(scan) / 360)
samples2_index = round(window[1] * len(scan) / 360)
# Loop around case
if samples1_index > samples2_index:
samples1 = scan[samples1_index:]
samples2 = scan[: samples2_index + 1]
# Getting lowest samples
samples1_min_index = np.argmin(samples1)
samples2_min_index = np.argmin(samples2)
samples1_min = samples1[samples1_min_index]
samples2_min = samples2[samples2_min_index]
print(samples1_min, samples2_min)
# Return lower value
if samples1_min < samples2_min:
return (samples1_index + samples1_min_index) * 360 / scan.shape[0], samples1_min
else:
return samples2_index * 360 / scan.shape[0], samples2_min
else: # No loop around case
samples = scan[samples1_index: samples2_index + 1]
samples_min_index = np.argmin(samples)
samples_min = samples[samples_min_index]
return samples_min_index * 360 / scan.shape[0], samples_min
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
scan = rc.lidar.get_samples_async()
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
scan = rc.lidar.get_samples_async()
angle, distance = rc_utils.get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 61.5 degrees
Distance: 82.9 cm
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
scan = rc.lidar.get_samples_async()
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
scan = rc.lidar.get_samples_async()
angle, distance = rc_utils.get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
59.131725 58.89159
Angle: 30.0 degrees
Distance: 58.9 cm
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
_____no_output_____
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
_____no_output_____
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[0]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[0]
print(f"Right distance: {right_distance:.2f} cm")
###Output
_____no_output_____
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
# TODO: Draw a red pixel for each non-zero sample less than max_range
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
_____no_output_____
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
_____no_output_____
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
_____no_output_____
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
_____no_output_____
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
_____no_output_____
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 207.54 cm
Rear distance: 0.00 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[180]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[540]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 69.51 cm
Right distance: 56.04 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 4
rc_utils.draw_circle(image, (radius, radius), (0,255,0), CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
for i in range(scan.size) :
if scan[i] < max_range :
center = (radius - (int(scan[i] * math.cos(math.radians(i / 2)))), radius + int(scan[i] * math.sin(math.radians(i / 2))))
if center[0] < 2 * radius and center[1] < 2 * radius and center[0] > 0 and center[1] > 0:
image[center[0]][center[1]][2] = 255
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for i in highlighted_samples :
center = (radius - (int(i[1] * math.cos(math.radians(i[0])))), radius + int(i[1] * math.sin(math.radians(i[0]))))
if center[0] < 2 * radius and center[1] < 2 * radius and center[0] > 0 and center[1] > 0 :
rc_utils.draw_circle(image, center, (255,0,0), HIGHLIGHT_DOT_RADIUS)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
scale = len(scan) / 360 # 720/360
center_index = int(scale * angle) # scale angle to scan size
side_range = int(scale * window_angle / 2)
left_index = (center_index - side_range) % len(scan) # % wraparounds for negative indices
right_index = (center_index + side_range) % len(scan)
samples = 0
# Handle angle window wraparound
if left_index > right_index :
samples = scan[left_index:].tolist() + scan[0 : right_index + 1].tolist()
else :
samples = scan[left_index : right_index + 1].tolist()
if len(samples) == 0 : return 0.0
return sum(samples) / len(samples)
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 77.84 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 60.34 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
left_index = window[0] % len(scan)
right_index = window[1] % len(scan)
scan = (scan - 0.01) % 10000
out: NDArray[Any, np.float32]
if left_index > right_index :
out = scan[left_index:].tolist() + scan[0 : right_index].tolist()
else :
out = scan[left_index : right_index + 1].tolist()
angle = np.argmin(out) * 360 / rc.lidar.get_num_samples()
return (angle, out[int(angle)])
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 59.0 degrees
Distance: 97.9 cm
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 16.5 degrees
Distance: 62.7 cm
###Markdown
1. Getting Started
**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
2. Gathering LIDAR Data
The racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.
In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 56.65 cm
Rear distance: 77.69 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
3. Visualizing LIDAR data
In this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:
1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.
1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).
1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.
1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.
1. Display the color image in the Jupyter Notebook with Matplotlib.
The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.
**Finish implementing this approach in the `show_lidar` function below.**
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
4. Handling Noise
Just like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.
**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 78.11 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.
**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**. Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.
**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
window *= 2
window = (window[0] % 720, window[1] %720)
scan_copy = scan[window[0] : window[1]] if window [1] > window[0] else np.append(scan[window[0] :720], scan[0 : window[1]])
scan_copy = (scan_copy - 0.01) % 10000
min_ind = np.argmin(scan_copy)
return ((min_ind/2 + window[0]/2) % 360, scan_copy[min_ind])
###Output
_____no_output_____
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 63.02 cm
Rear distance: 78.60 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[180]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[540]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 92.14 cm
Right distance: 95.11 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
center = (radius, radius)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
# TODO: Draw a red pixel for each non-zero sample less than max_range
scale = radius / max_range
for i in range(0, num_samples):
if scan[i] < max_range:
scaled_val = scale * scan[i]
row = radius - int(scaled_val * np.cos(math.radians(i/2)))
col = radius + int(scaled_val * np.sin(math.radians(i/2)))
rc_utils.draw_circle(image, (row, col), (0, 0, 255), 2)
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for val in highlighted_samples:
scaled_val = scale * val[1]
row = radius - int(scaled_val * np.cos(math.radians(val[0])))
col = radius + int(scaled_val * np.sin(math.radians(val[0])))
rc_utils.draw_circle(image, (row, col), (255, 255, 0), 2)
rc_utils.draw_circle(image, center, (0, 255, 0), CAR_DOT_RADIUS)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
avg = []
upper_bound = (angle + window_angle/2) * 2
i = (angle - window_angle/2) * 2
while i < upper_bound:
j = i
if j > 720:
j = 720 - j
if scan[int(j)]:
avg.append(scan[int(j)])
i += 1
if len(avg) == 0:
return 0.0
average = sum(avg)/len(avg)
return average
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 453.37 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 38.78 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan, window = (0, 360)
):
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
scan = (scan - 0.01) % 10000
lower_bound = window[0]
upper_bound = window[1]
distance = None
if lower_bound > upper_bound:
distance = 360 - lower_bound + upper_bound
else:
distance = upper_bound - lower_bound
sub_list = []
for i in range(distance):
if lower_bound + i > 360:
sub_list.append(scan[lower_bound + i - 360])
else:
sub_list.append(scan[lower_bound + i])
angle = np.argmin(sub_list) * 360 / rc.lidar.get_num_samples()
print(angle)
distance = scan[int(angle)]
return (angle, distance)
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
34.0
Angle: 34.0 degrees
Distance: 10000.0 cm
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
11.5
Angle: 11.5 degrees
Distance: 61.2 cm
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 63.35 cm
Rear distance: 76.65 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[540]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[180]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 97.78 cm
Right distance: 85.92 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
rc_utils.draw_circle(image, (0, 0), (0, 255, 0), CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
angle = 0
for i in scan:
if i < max_range:
length = (radius * i) / max_range
x = int(radius - round(length * np.cos(np.deg2rad(angle)), None))
y = int(radius + round(length * np.sin(np.deg2rad(angle)), None))
image[x][y] = (0, 0, 255)
angle += 0.5
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for i in highlighted_samples:
length = (radius * i[1]) / max_range
x = int(radius - round(length * np.cos(np.deg2rad(i[0])), None))
y = int(radius + round(length * np.sin(np.deg2rad(i[0])), None))
rc_utils.draw_circle(image, (x, y), (255, 0, 0), HIGHLIGHT_DOT_RADIUS)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
sum = 0
num_samples = 0
start_range = (angle * 2) - window_angle
end_range = (angle * 2) + window_angle
if start_range < 0:
start_range = 720 - abs(start_range)
if start_range > 720:
start_range = start_range - 720
if end_range < 0:
end_range = 720 - abs(end_range)
if end_range > 720:
end_range = end_range - 720
if start_range > end_range:
samples = scan[start_range:].tolist() + scan[0 : end_range + 1].tolist()
else:
samples = scan[start_range : end_range + 1].tolist()
for i in samples:
if i > 0:
sum += i
num_samples += 1
if num_samples == 0:
return 0
return sum / num_samples
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 78.23 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 60.92 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
num_samples = rc.lidar.get_num_samples()
scan = (scan - 0.01) % 10000
min_degree, max_degree = window
min_degree *= 2
max_degree *= 2
if min_degree < 0:
min_degree = num_samples - abs(min_degree)
if min_degree > num_samples:
min_degree = min_degree - num_samples
if max_degree < 0:
max_degree = num_samples - abs(max_degree)
if max_degree > num_samples:
max_degree = max_degree - num_samples
if min_degree > max_degree:
samples = scan[min_degree:].tolist() + scan[0 : max_degree + 1].tolist()
else:
samples = scan[min_degree : max_degree + 1].tolist()
min_point = np.argmin(samples)
angle = min_degree + min_point
if angle > num_samples:
angle -= num_samples
return ((angle * 360) / num_samples, samples[min_point])
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 86.5 degrees
Distance: 85.8 cm
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 3.5 degrees
Distance: 58.3 cm
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
_____no_output_____
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 59.46 cm
Rear distance: 77.36 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[540]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[540]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 98.05 cm
Right distance: 98.05 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
rc_utils.draw_circle(image, [radius, radius], (0,255,0), CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
for i in range(0,len(scan)):
if scan[i] < max_range:
angle = math.radians(i/2)
dist = scan[i]
row = int(radius - math.cos(angle) * dist)
col = int(radius + math.sin(angle) * dist)
image[row][col] = (0,0,255)
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for i in range(0, len(highlighted_samples)):
angle = math.radians(highlighted_samples[i][0]/2)
dist = highlighted_samples[i][1]
row = int(radius - math.cos(angle) * dist)
col = int(radius + math.sin(angle) * dist)
rc_utils.draw_circle(image, [row,col], (255,255,0), HIGHLIGHT_DOT_RADIUS)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
indices = range((angle - window_angle//2)*2, (angle+window_angle//2)*2)
tol = [val for val in scan.take(indices, mode = 'wrap') if val > 0.0]
print(indices)
print(tol)
if len(tol) == 0: return 0.0
return sum(tol) / len(tol)
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
range(354, 366)
[77.65286, 75.04198, 77.731445, 81.03676, 76.302246, 77.46473, 77.35829, 73.47301, 79.78393, 77.66942, 80.28007, 77.58201]
Rear distance (6 degree window): 77.61 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
range(-6, 6)
[58.75335, 61.39671, 58.5773, 61.16245, 60.172592, 60.661354, 59.45982, 58.402287, 59.967598, 58.66929, 58.12845, 59.242188]
Forward distance (6 degree window): 59.55 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
range(-6, 6)
[]
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
scan = (scan - 0.01) % 100000
i1= window[0] * 2 % rc.lidar.get_num_samples()
i2= window[1] * 2 % rc.lidar.get_num_samples()
if i1 > i2:
angle_1 = np.argmin(scan[0:i2])
angle_2 = np.argmin(scan[i1:rc.lidar.get_num_samples()-1]) + i1
if scan[angle_1] < scan[angle_2]:
angle = angle_1
else:
angle = angle_2
else:
angle = np.argmin(scan[i1:i2]) + i1
distance = scan[angle]
return [angle * 360 / rc.lidar.get_num_samples(), distance]
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
print(rc_utils.get_lidar_closest_point(scan, (30,150)))
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 78.0 degrees
Distance: 85.0 cm
(78.0, 85.01908111572266)
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
print(rc_utils.get_lidar_closest_point(scan, (-30,30)))
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 2.0 degrees
Distance: 58.1 cm
(2.0, 58.11845016479492)
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 60.83 cm
Rear distance: 78.81 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[540]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[180]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 92.80 cm
Right distance: 87.08 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
imgX = image.shape[0]
imgY = image.shape[1]
center = (imgX // 2, imgY // 2)
rc_utils.draw_circle(image, center, rc_utils.ColorBGR.green.value, CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
num_samples: int = len(scan)
for i in range(num_samples):
if 0.0 < scan[i] < max_range:
angle: float = 2 * math.pi * i / num_samples
length: float = radius * scan[i] / max_range
r: int = int(radius - length * math.cos(angle))
c: int = int(radius + length * math.sin(angle))
image[r][c][2] = 255
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
for (angle, distance) in highlighted_samples:
if 0 < distance < max_range:
angle_rad = angle * math.pi / 180
length: float = radius * distance / max_range
r: int = int(radius - length * math.cos(angle_rad))
c: int = int(radius + length * math.sin(angle_rad))
image[r][c][0] = 255
image[r][c][1] = 255
image[r][c][2] = 0
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
angle %= 360
center_index = int(angle * scan.shape[0] / 360)
num_side_samples = int(window_angle / 2 * scan.shape[0] / 360)
left_index = int((center_index - num_side_samples) % len(scan))
right_index = int(center_index + num_side_samples) % len(scan)
samples = []
if right_index < left_index:
samples = scan[left_index:].tolist() + scan[0 : right_index + 1].tolist()
else:
samples = scan[left_index : right_index + 1].tolist()
samples = [i for i in samples if i > 0]
if len(samples) == 0:
return 0.0
return sum(samples) / len(samples)
###Output
_____no_output_____
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 77.94 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 59.86 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
min_angle = window[0] % 360
max_angle = window[1] % 360
if min_angle == max_angle:
samples = (scan - 0.01) % 1000000
min_index = np.argmin(samples)
return min_index * 360 / scan.shape[0], samples[min_index]
first_sample: int = round(min_angle * len(scan) / 360)
last_sample: int = round(max_angle * len(scan) / 360) + 1
if first_sample > last_sample:
left_samples = scan[first_sample:]
right_samples = scan[: last_sample + 1]
left_samples = (left_samples - 0.01) % 1000000
right_samples = (right_samples - 0.01) % 1000000
left_min_index = np.argmin(left_samples)
left_min = left_samples[left_min_index]
right_min_index = np.argmin(right_samples)
right_min = right_samples[right_min_index]
if left_min < right_min:
return (first_sample + left_min_index) * 360 / scan.shape[0], left_min
else:
return right_min_index * 360 / scan.shape[0], right_min
samples = (scan[first_sample : last_sample + 1] - 0.01) % 1000000
min_index = np.argmin(samples)
return (first_sample + min_index) * 360 / scan.shape[0], samples[min_index]
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (30, 150))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 96.5 degrees
Distance: 83.9 cm
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 356.0 degrees
Distance: 58.3 cm
###Markdown
Lab 4 Exploration: LIDARIn this notebook, we will learn how to use the racecar's LIDAR to measure distance and find closest points.Throughout this notebook, **text in bold red** indicates a change you must make to the following code block before running it. Table of Contents1. [Getting Started](GettingStarted)1. [Gathering LIDAR Data](GatheringLidarData)1. [Visualizing LIDAR Data](VisualizingLidarData)1. [Handling Noise](HandlingNoise)1. [Closest Point](ClosestPoint) 1. Getting Started**If you are running the car in RacecarSim, set `isSimulation` to `True`**. Leave `isSimulation` `False` if you are using a physical car.
###Code
# TODO: Update isSimulation if necessary
isSimulation = True
###Output
_____no_output_____
###Markdown
Next, we will import the necessary libraries for this notebook, including Python libraries (`cv`, `numpy`, etc.) and the Racecar library (`racecar_core`).
###Code
# Import Python libraries
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
import statistics
from nptyping import NDArray
from typing import Any, Tuple, List, Optional
# Import Racecar library
import sys
sys.path.append("../../library")
import racecar_core
import racecar_utils as rc_utils
###Output
_____no_output_____
###Markdown
Finally, we will create a racecar object. If this step fails, make sure that `isSimulation` has the correct value.
###Code
# Create Racecar
rc = racecar_core.create_racecar(isSimulation)
###Output
[95m>> Racecar created with the following options:
Simulation (-s): [True]
Headless (-h): [False]
Initialize with display (-d): [False][00m
###Markdown
2. Gathering LIDAR DataThe racecar's LIDAR data is stored as a one-dimensional numpy array of 720 distance measurements. Each measurement is 1/2 of a degree apart, ordered clockwise, with the 0th entry directly in front of the car.In Jupyter Notebook, we can access the car's LIDAR data using `rc.lidar.get_samples_async()`. Outside of Jupyter Notebook, we must use `rc.lidar.get_samples()` instead.
###Code
# Access the current LIDAR scan
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Let's access the measurements directly in front of and behind the car.
###Code
# Calculate and show the forward and rear distances
forward_distance = scan[0]
print(f"Forward distance: {forward_distance:.2f} cm")
rear_distance = scan[360]
print(f"Rear distance: {rear_distance:.2f} cm")
###Output
Forward distance: 59.54 cm
Rear distance: 80.06 cm
###Markdown
**Set `left_distance` and `right_distance` in the following code block to the LIDAR measurements directly to the left and directly to the right of the car.**
###Code
# TODO: Calculate and show the left and right distances
left_distance = scan[270]
print(f"Left distance: {left_distance:.2f} cm")
right_distance = scan[90]
print(f"Right distance: {right_distance:.2f} cm")
###Output
Left distance: 92.78 cm
Right distance: 89.22 cm
###Markdown
3. Visualizing LIDAR dataIn this section, we will write a function to convert LIDAR data into a color image providing a top-down view of the data. We will use the following approach:1. Create an all-black BGR image (a 3D numpy array ordered rows, columns, color channels) of the specified radius.1. Denote the car by drawing a green dot at the center of the image with [`rc_utils.draw_circle`](https://mitll-racecar.readthedocs.io/en/latest/racecar_utils.htmlracecar_utils.draw_circle).1. For each LIDAR sample, set the corresponding pixel to red. We can calculate the angle of each sample based on its index in the scan. By scaling the distance such that `max_range` is the edge of the image, we can convert this angle and distance into a row and column in the image.1. `highlighted_samples` contains a list of `(angle, distance)` measurements that we wish to highlight with light blue dots. These can be plotted similarly to the samples in the previous step and drawn with `rc_utils.draw_circle`.1. Display the color image in the Jupyter Notebook with Matplotlib.The result should look similar to the LIDAR visualization shown in the left sidebar of RacecarSim.**Finish implementing this approach in the `show_lidar` function below.**
###Code
def show_lidar(
scan: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 400,
highlighted_samples: List[Tuple[int, int]] = []
) -> None:
"""
Displays a visual representation of a LIDAR scan in Jupyter Notebook.
Args:
scan: The LIDAR scan to show.
radius: Half of the width and height (in pixels) of the generated image.
max_range: The farthest distance to show in the image in cm. Any sample past this range is not shown.
highlighted_samples: A list of samples in (angle, distance) format to show as a blue dot.
"""
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(scan)
# TODO: Draw a green dot at the center of the image to denote the car
# Hint: Use rc_utils.draw_circle
CAR_DOT_RADIUS = 2
rc_utils.draw_circle(image, [radius,radius], [0, 255, 0], CAR_DOT_RADIUS)
# TODO: Draw a red pixel for each non-zero sample less than max_range
for i in range(num_samples):
if 0 < scan[i] < max_range:
angle = 2 * math.pi * i / num_samples
length = radius * scan[i] / max_range
r = int(radius - length * math.cos(angle))
c = int(radius + length * math.sin(angle))
image[r][c][2] = 255
# TODO: Draw a light blue dot for each point in highlighted_samples
# Hint: Use rc_utils.draw_circle
HIGHLIGHT_DOT_RADIUS = 2
HIGHLIGHT_DOT_RADIUS = 2
color = [255, 191, 0]
for (angle, distance) in highlighted_samples:
if 0 < distance < max_range:
angle_rad = angle * math.pi / 180
length = radius * distance / max_range
r = int(radius - length * math.cos(angle_rad))
c = int(radius + length * math.sin(angle_rad))
point = [r, c]
rc_utils.draw_circle(image, point, color, HIGHLIGHT_DOT_RADIUS)
# Show the image with Matplotlib
plt.imshow(cv.cvtColor(image, cv.COLOR_BGR2RGB))
plt.show()
###Output
_____no_output_____
###Markdown
Let's use this function to visualize our LIDAR scan.
###Code
show_lidar(scan)
###Output
_____no_output_____
###Markdown
For now, we will test out the `highlighted_samples` feature by highlighting points 100 cm in each cardinal direction.
###Code
show_lidar(scan, highlighted_samples=[(0, 100), (90, 100), (180, 100), (270, 100)])
###Output
_____no_output_____
###Markdown
4. Handling NoiseJust like depth images, LIDAR data is also subject to noise and null values. To help combat this, we will once again average several neighboring samples across an *angle window* instead of relying on a single measurement. For example, if we want to measure the distance at 60 degrees with a 4 degree window angle, we would average all of the samples from 58 to 62 degrees. To reduce the impact of null values, we should not consider any 0.0 measurement in our average.**Implement this approach in `get_lidar_average_distance`**. You may wish to use a Python [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions).
###Code
def get_lidar_average_distance(
scan: NDArray[Any, np.float32], angle: float, window_angle: float = 4
) -> float:
"""
Finds the average distance of the object at a particular angle relative to the car.
Args:
scan: The samples from a LIDAR scan
angle: The angle (in degrees) at which to measure distance, starting at 0
directly in front of the car and increasing clockwise.
window_angle: The number of degrees to consider around angle.
Returns:
The average distance of the points at angle in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
Increasing window_angle reduces noise at the cost of reduced accuracy.
"""
# TODO: average the samples in the specified window
angle %= 360
center = int(angle * 2)
side = int(window_angle)
left_index = (center - side) % len(scan)
right_index = (center + side) % len(scan)
samples: List[float]
if right_index < left_index:
samples = scan[left_index:].tolist() + scan[0 : right_index + 1].tolist()
else:
samples = scan[left_index : right_index + 1].tolist()
samples = [e for e in samples if e > 0]
if len(samples) == 0:
return 0.0
else:
average = sum(samples) / len(samples)
return average
print(scan.shape[0] / 360)
###Output
2.0
###Markdown
Let's try it out. Increasing the window angle will decrease noise by including more samples, but will also decrease accuracy by including less relevant samples.
###Code
WINDOW_ANGLE = 6
rear_distance = get_lidar_average_distance(scan, 180, WINDOW_ANGLE)
print(f"Rear distance ({WINDOW_ANGLE} degree window): {rear_distance:.2f} cm")
###Output
Rear distance (6 degree window): 77.81 cm
###Markdown
`get_lidar_average_distance` must handle when the angle window passes over the edge of the array. For example, with an angle of 0 and a window angle of 6, we must consider all samples in the range 357 to 3 degrees.**If you have not done so already, update `get_lidar_average_distance` to support angle windows that cross the edge of the array.**.
###Code
forward_distance = get_lidar_average_distance(scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window): {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window): 59.92 cm
###Markdown
Finally, we must handle when there is no data in the specified angle window. In this case, we should return `0.0`.**If you have not done so already, update `get_lidar_average_distance` to return `0.0` when the specified range does not contain any data.**.
###Code
null_scan = np.zeros(rc.lidar.get_num_samples(), np.float32)
forward_distance = get_lidar_average_distance(null_scan, 0, WINDOW_ANGLE)
print(f"Forward distance ({WINDOW_ANGLE} degree window) in null scan: {forward_distance:.2f} cm")
###Output
Forward distance (6 degree window) in null scan: 0.00 cm
###Markdown
5. Closest PointWe can use the LIDAR to find the angle of the closest object in 360 degrees around the car. Just like with the depth image, we should convert null values to a large number so they are not considered for the minimum. An efficient way to do this is to shift down each value by a small amount (such as 0.01 cm) and then mod by a large number (such as 10,000 cm). This way, 0.0 becomes -0.01, which after modding becomes 9,999.99 cm, a very large distance that will not interfere with the true minimum.```scan = (scan - 0.01) % 10000```In lecture, we saw an example of how to find the angle of the closest point using the Numpy [argmin](https://numpy.org/doc/1.19/reference/generated/numpy.argmin.html) function.```scan = (scan - 0.01) % 10000angle = np.argmin(scan) * 360 / rc.lidar.get_num_samples()```However, we may only wish to consider samples within a particular range, such as samples ranging from 30 to 150 degrees. **Implement `get_closest_pixel` to find the angle and distance of the closest point within a specified window in a LIDAR scan**.
###Code
def get_lidar_closest_point(
scan: NDArray[Any, np.float32], window: Tuple[float, float] = (0, 360)
) -> Tuple[float, float]:
"""
Finds the closest point from a LIDAR scan.
Args:
scan: The samples from a LIDAR scan.
window: The degree range to consider, expressed as (min_degree, max_degree)
Returns:
The (angle, distance) of the point closest to the car within the specified
degree window. All angles are in degrees, starting at 0 directly in front of the
car and increasing clockwise. Distance is in cm.
Note:
Ignores any samples with a value of 0.0 (no data).
In order to define a window which passes through the 360-0 degree boundary, it
is acceptable for window min_degree to be larger than window max_degree. For
example, (350, 10) is a 20 degree window in front of the car.
"""
# TODO: Return the (angle, distance) of the closest point within the specified window
min_angle = window[0] % 360
max_angle = window[1] % 360
if min_angle == max_angle:
samples = (scan - 0.01) % 1000000
min_index = np.argmin(samples)
return min_index * 360 / scan.shape[0], samples[min_index]
scan = (scan - 0.01) % 1000000
samples1_index = round(window[0] * len(scan) / 360)
samples2_index = round(window[1] * len(scan) / 360)
if samples1_index > samples2_index:
samples1 = scan[samples1_index:]
samples2 = scan[: samples2_index + 1]
samples1_min_index = np.argmin(samples1)
samples2_min_index = np.argmin(samples2)
samples1_min = samples1[samples1_min_index]
samples2_min = samples2[samples2_min_index]
if samples1_min < samples2_min:
return (samples1_index + samples1_min_index) * 360 / scan.shape[0], samples1_min
else:
return samples2_index * 360 / scan.shape[0], samples2_min
else:
samples = scan[samples1_index: samples2_index + 1]
samples_min_index = np.argmin(samples)
samples_min = samples[samples_min_index]
return samples_min_index * 360 / scan.shape[0], samples_min
###Output
_____no_output_____
###Markdown
Let's use `get_lidar_closest_point` to find the closest point to the right of the car.
###Code
angle, distance = get_lidar_closest_point(scan, (0, 360))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
scan = rc.lidar.get_samples_async()
###Output
_____no_output_____
###Markdown
Once again, we should handle when the specified degree range passes over the edge of our array. **If you have not done so already, update `get_lidar_closest_point` to support negative angles and windows that cross the edge of the array.**.
###Code
angle, distance = get_lidar_closest_point(scan, (-30, 30))
print(f"Angle: {angle:.1f} degrees")
print(f"Distance: {distance:.1f} cm")
show_lidar(scan, highlighted_samples=[(angle, distance)])
###Output
Angle: 330.0 degrees
Distance: 85.3 cm
###Markdown
Lab 4 - (discrete) AutoEncoder Plan for today* train a basic Autoencoder* try out two ways of differentially sampling from a discrete distribution * score function * Gumbel-softmax* train a discrete autoencoder using those two methods
###Code
import torch
from torch.optim import SGD
from torch import nn
import torch
from typing import List
from torchvision.datasets import MNIST
from torchvision import transforms as tv
from torch.utils.data import DataLoader, Subset
from sklearn.metrics import accuracy_score
from tqdm.notebook import tqdm
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from time import sleep
from torchvision.models import vgg16, vgg16_bn, resnet50, resnet18
import seaborn as sns
from sklearn.cluster import KMeans
from collections import Counter
from typing import Tuple
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
ds_train = MNIST(root="data", train=True, download=True, transform=tv.ToTensor())
ds_test = MNIST(root="data", train=False, download=True, transform=tv.ToTensor())
batch_size=128
dl_train = DataLoader(ds_train, batch_size, shuffle=True, drop_last=False) # dataloader with full dataset
dl_test = DataLoader(ds_test, batch_size, shuffle=False)
###Output
_____no_output_____
###Markdown
1 - Warmup: vanilla AutoEncoderLet's start with training a classic AutoEncoder on MNIST. The architecture and training loop have been provided below.
###Code
class Reshape(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, x):
return x.reshape(-1, *self.shape)
class AE(nn.Module):
def __init__(self, hidden_sizes: List[int], downsize_steps: List[int], chw: int = (1, 28, 28), latent_size: int = 128):
"""
Args:
hidden sizes - hidden size of each consecutive convolution
downsize_steps - numbers of convolutions for which there will be stride = 2
chw - size of the input image
latent_size: latent dimension
Forward method returns the reconstructed batch *and* the latent vectors.
"""
super().__init__()
c, h, w = chw
conv_1 = nn.Conv2d(c, hidden_sizes[0], 3, padding=1)
conv_n = nn.ConvTranspose2d(hidden_sizes[0], c, 3, padding=1)
encoder = []
decoder = []
ht, wt = h, w
for i, (in_hs, out_hs) in enumerate(zip(hidden_sizes[:-1], hidden_sizes[1:])):
downsize = (i in downsize_steps)
encoder.append(nn.Sequential(
nn.Conv2d(
in_hs, out_hs, 3, padding=1,
stride=2 if downsize else 1
),
nn.BatchNorm2d(out_hs),
nn.ReLU()
))
decoder = [nn.Sequential(
nn.ConvTranspose2d(
out_hs, in_hs, 3, padding=1, output_padding=1,
stride=2 if downsize else 1,
),
nn.BatchNorm2d(in_hs),
nn.ReLU()
)] + decoder
if downsize:
ht = ht // 2
wt = wt // 2
le = nn.Sequential(
nn.Flatten(),
nn.Linear(ht * wt * hidden_sizes[-1], latent_size)
)
ld = nn.Sequential(
nn.Linear(latent_size, ht * wt * hidden_sizes[-1]),
Reshape(( hidden_sizes[-1], ht, wt))
)
self.encoder = nn.Sequential(conv_1, *encoder, le)
self.decoder = nn.Sequential(ld, *decoder, conv_n)
def forward(self, x):
l = self.encoder(x)
d = self.decoder(l)
return d, l
def draw_reconstruction(original: torch.Tensor, expected: torch.Tensor, epoch: int):
assert len(original) == len(expected)
fig, ax = plt.subplots(nrows=2, ncols=len(original), figsize=(3*len(original), 6))
for i, (o,e) in enumerate(zip(original, expected)):
ax[0][i].imshow(o.squeeze())
ax[1][i].imshow(e.squeeze())
plt.suptitle(f"{epoch}: Original / expected")
plt.show()
def train_ae(net, train_dl, test_dl, num_epochs: int = 20, lr: float = 3e-4, loss_fn = None):
net = net.to(device)
loss_fn = loss_fn or nn.MSELoss()
opt = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=3e-4)
history = []
epoch_progress = tqdm(list(range(num_epochs)))
for i in epoch_progress:
train_loss = 0
net.train()
for iteration, (X_train, y_train) in enumerate(train_dl):
X_train, y_train = [t.to(device) for t in [X_train, y_train]]
opt.zero_grad()
latent = net.encoder(X_train)
X_pred = net.decoder(latent)
loss = loss_fn(X_train, X_pred)
loss.backward()
opt.step()
train_loss += loss.item()
val_loss = 0
net.eval()
with torch.no_grad():
for iteration, (X_val, y_val) in enumerate(test_dl):
X_val, y_val = [t.to(device) for t in [X_val, y_val]]
X_pred, _ = net(X_val)
if iteration == 0:
draw_reconstruction(
X_val[:10].detach().cpu().numpy(), X_pred[:10].detach().cpu().numpy(), epoch=i
)
loss = loss_fn(X_val, X_pred)
val_loss += loss.item()
epoch_progress.set_description(f'#Epoch: {i}, train loss: {train_loss:.2f}, val loss: {val_loss:.2f}')
history.append({"e": i, "train_loss": train_loss, "val_loss": val_loss})
for m in ["train_loss", "val_loss"]:
plt.plot([h["e"] for h in history], [h[m] for h in history], label=m)
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Task for you - train the autoencoder* a relatively small model (2 convolutions in the encoder and 2 convolutions in the decoder) should suffice* try out 2 different latent sizes * try out [MSELoss](https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html) or [BCEWithLogitsLoss](https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html)
###Code
ae = AE(..., latent_size=...)
train_ae(ae, train_dl=dl_train, test_dl=dl_test, num_epochs=10, loss_fn=...)
###Output
_____no_output_____
###Markdown
**Question 1** - What are the advantages / disadvantages of using MSE and BCE as reconstruction losses? Which one, in your opinion, makes more sense for this dataset? Which one would you use for a dataset such as CIFAR-10?**YOUR ANSWER HERE** Task for you - visualize the latent spaceRun the autoencoder on the validation dataset and visualize the latent embeddings on 2-D space using T-SNE or PCA. Please color each embedding according to it's ground-truth label.Since the AE returns a tuple of `, `, you should be able to re-use the `draw_embeddings` function from Lab 3.
###Code
def draw_embeddings(net, dl):
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
2 - Discrete AutoEncoderWe would like our AutoEncoder to model a **discrete** data distribution. In other words, if the latent of the AE vector has shape $K$, we want it to take only $K$ different values, represented by one-hot vectors. Notice that this is equivalent to **clustering** - the encoder assigns $x$ into one of the $K$ clusters!How to achieve that in practice?* the Encoder should transform each image $x$ into a one-hot vector $z$ of shape $K$ * we can apply e.g. $softmax$ to the latent vector output by the Encoder in order to obtain the parameters of discrete data distribution $p$* the Decoder should transform a one-hot vector of shape $K$ sampled from $p$ into something as similar to $x$ as possible.* we should minimize the **expected** value of cost: $$\mathbb{E}_{z \sim softmax(E(x))} || x - D(z)||^2$$The rough training procedure for a sample $x$ will be to:1. Estimate the distribution: $ p(x) = softmax(E(x)) = [\pi_1, ..., \pi_K]$ with the encoder2. Sample $M$ one-hot vectors $(z_1, ..., z_M)$ from distribution $p(x)$3. Use the Decoder to transform $(z_1, ..., z_M)$ into $(\hat{x}_1, ..., \hat{x}_M)$4. Minimize the cost: $\frac{1}{M} \sum_{i=1}^M ||x - \hat{x_i}||^2$Step **2** is especially tricky - *how to sample from the distribution generated by the encoder in a differentiable way*? 2a - score functionOne of the ways we can do this is to multiply the cost of each sample $\hat{x_i}$ by the **probability** of that sample:$$\frac{1}{M} \sum_{i=1}^M ||x - \hat{x_i}||^2 \cdot \mathbf{p_{\hat{z_i}}}$$For example, let's say that for some $x$ the Encoder generated a discrete distribution $p$ = $[0.1, 0.3, 0.2, 0.4]$. We sample from this distribution $(M=4)$ one-hot vectors, e.g:* $z_1 = [0, 1, 0, 0] \rightarrow p_{z_1} = 0.3$* $z_2 = [0, 0, 0, 1] \rightarrow p_{z_2} = 0.4$* $z_3 = [0, 0, 0, 1] \rightarrow p_{z_3} = 0.4$* $z_4 = [0, 0, 1, 0] \rightarrow p_{z_4} = 0.2$And the cost function of reconstructing $x$ will be given by:$$\frac{ (||x - D(z_1)||^2 \cdot p_{z_1}) + (||x - D(z_2)||^2 \cdot p_{z_2}) + (||x - D(z_3)||^2 \cdot p_{z_3}) + (||x - D(z_4)||^2 \cdot p_{z_4}) }{4}$$This is called score function. In this case, the operation of sampling is non-differentiable, but the gradient flows from decoder to the encoder through the values of vector $p = softmax(E(x))$. Task for you - train a discrete VAE using scoring function* modify the below training loop to calculate the loss function * use a latent size bigger than number of classes, e.g. 64* try out a couple of different values of $M$, e.g. 1, 2, 4.* in each validation step you can simply transform the latent vector into one-hot using softmax* after the training, draw the embeddings generated by the encoder. How many clusters are there?
###Code
def train_discrete_ae_gumbel(net, train_dl, test_dl, num_epochs: int = 20, lr: float = 3e-4, loss_fn = None):
net = net.to(device)
loss_fn = loss_fn or nn.MSELoss()
opt = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=3e-4)
history = []
epoch_progress = tqdm(list(range(num_epochs)))
for i in epoch_progress:
train_loss = 0
net.train()
for iteration, (X_train, y_train) in enumerate(train_dl):
X_train, y_train = [t.to(device) for t in [X_train, y_train]]
opt.zero_grad()
latent = net.encoder(X_train)
###### YOUR CODE HERE ########
loss = ... # score function loss
##############################
loss.backward()
opt.step()
train_loss += loss.item()
val_loss = 0
net.eval()
with torch.no_grad():
for iteration, (X_val, y_val) in enumerate(test_dl):
X_val, y_val = [t.to(device) for t in [X_val, y_val]]
latent = net.encoder(X_val)
softmax_latent = torch.softmax(latent, -1)
### YOUR CODE here
# transform the softmax_latent to one-hot vectors
onehot_latent = ...
###
X_pred = net.decoder(onehot_latent)
if iteration == 0:
draw_reconstruction(
X_val[:10].detach().cpu().numpy(), X_pred[:10].detach().cpu().numpy(), epoch=i
)
loss = loss_fn(X_val, X_pred)
val_loss += loss.item()
epoch_progress.set_description(f'#Epoch: {i}, train loss: {train_loss:.2f}, val loss: {val_loss:.2f}')
history.append({"e": i, "train_loss": train_loss, "val_loss": val_loss})
for m in ["train_loss", "val_loss"]:
plt.plot([h["e"] for h in history], [h[m] for h in history], label=m)
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
2b - Gumbel-softmaxAn alternative way is to approximate the discrete distribution with a continuous one, through a Gumbel-softmax trick.Suppose we have a discrete distribution with probabilities:$$\pi = [\pi_1, ..., \pi_K]$$In order to sample from such a distribution, we can sample a vector of $K$ values from a uniform distribution:$$u = [u_1, ..., u_K];\\u \sim Unif(0,1)\$$then, transform it into a vector $g$:$$g = -log(-log(u)) = [g_1, ..., g_K]$$Let's now sum $g$ and $log(\pi)$ and divide it by **temperature** $\tau$. Finally, let's apply $softmax$ to the resulting vector:$$y = softmax((g + log(\pi)) / \tau)$$The result of this operation is a vector of $K$ values which sums up to 1. Since vectors $g$ come from a continuous uniform distribution and vectors $y$ are a function of $g$, $\pi$ and $\tau$, it turns out that vectors $y$ come from a continuous distribution with parameters $\pi$ and $\tau$. Task for you - please implement sampling from such a distribution* select a non-trivial categorical distribution $\pi$* try out sampling for different values of $\tau$, ranging between 10^2 ... 10^-5* for each $\tau$ sample 1000 vectors and calculate the mean sample
###Code
def gumbel_sample(p: torch.Tensor, t: float, n: int):
"""
p: tensor of shape [d] - represents a categorical distribution with `d` categories
t: temperature
n: number of samples
returns:
a tensor of shape [n, d] with n samples from distribution Y
"""
p = torch.tensor([.25, .50, .15, .1]).float()
gumbel_sample(p, t=100, n=1000).mean(dim=0)
###Output
_____no_output_____
###Markdown
**Question 2** - what have you noticed for large and small values of $\tau$?**YOUR ANSWER HERE** Task for you - train a discrete autoencoder with Gumbel-softmax* in each training step, for each image $x$: * generate distribution $E(x) = \pi_x$ with the decoder * sample $M$ one-hot vectors $[z_1^x, ..., z_M^x]$ from $\pi_x$ * use the decoder to transform $[z_1^x, ..., z_M^x]$ into $[\hat{x_1}, ..., \hat{x_M}]$ * minimize the mean difference (MSE or BCE) between $x$ and $[\hat{x_1}, ..., \hat{x_M}]$ * with each epoch, decrease the temperature $\tau$* in each validation step you can simply transform the latent vector into one-hot using softmaxAfter training:* visualize the latent clusters with colors from ground-truth labels (just like with the vanilla AE)* for each dimension of the latent generate a one-hot vector and visualize the output of the decoder for that vector. For example, if our latent size was $4$, we would visualize in total four decoder outputs for the following one-hot vectors: * $[1, 0, 0, 0]$ * $[0, 1, 0, 0]$ * $[0, 0, 1, 0]$ * $[0, 0, 0, 1]$
###Code
def train_discrete_ae_gumbel(net, train_dl, test_dl, num_epochs: int = 20, lr: float = 3e-4, loss_fn = None):
net = net.to(device)
loss_fn = loss_fn or nn.MSELoss()
opt = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=3e-4)
history = []
epoch_progress = tqdm(list(range(num_epochs)))
for i in epoch_progress:
train_loss = 0
net.train()
for iteration, (X_train, y_train) in enumerate(train_dl):
X_train, y_train = [t.to(device) for t in [X_train, y_train]]
opt.zero_grad()
latent = net.encoder(X_train)
###### YOUR CODE HERE ########
loss = ...
##############################
loss.backward()
opt.step()
train_loss += loss.item()
val_loss = 0
net.eval()
with torch.no_grad():
for iteration, (X_val, y_val) in enumerate(test_dl):
X_val, y_val = [t.to(device) for t in [X_val, y_val]]
latent = net.encoder(X_val)
softmax_latent = torch.softmax(latent, -1)
### YOUR CODE here
# transform the softmax_latent to one-hot vectors
onehot_latent = ...
###
X_pred = net.decoder(onehot_latent)
if iteration == 0:
draw_reconstruction(
X_val[:10].detach().cpu().numpy(), X_pred[:10].detach().cpu().numpy(), epoch=i
)
loss = loss_fn(X_val, X_pred)
val_loss += loss.item()
epoch_progress.set_description(f'#Epoch: {i}, train loss: {train_loss:.2f}, val loss: {val_loss:.2f}')
history.append({"e": i, "train_loss": train_loss, "val_loss": val_loss})
for m in ["train_loss", "val_loss"]:
plt.plot([h["e"] for h in history], [h[m] for h in history], label=m)
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____ |
sessio3/2-CNN.ipynb | ###Markdown
AI4ALL Sessió 3. Classificació d'imatges**Objectius**Aquesta sessió introdueix l'ús pràctic de les xarxes neuronals dins el marc de la classificació multi-classe. Per a això cal dominar els conceptes relacionats amb les xarxes neuronals i els paràmetres associats.Els objectius d'aquesta sessió són:* Aplicar models de xarxes neurals, posant l'èmfasi en: 1. Aplicar diferents topologies de xarxes neurals i entendre els avantatges de cada una. 2. Avaluar correctament l'error del model 3. Visualitzar les dades i el model resultant* Ésser capaç d'aplicar tècniques de xarxes neurals en casos reals* Validar els resultats en dades reals* Fomentar la capacitat per presentar resultats tècnics d'aprenentatge computacional de forma adequada davant altres persones**Materials**1. Base de dades amb imatges de digits (github). Aquesta base de dades està separada en els següents conjunts: * Train: la xarxa neural s'haurà d'entrenar sobre aquest conjunt. * Validació: la xarxa entrenada en el conjunt de Train s'haurà d'evaluar sobre el conjunt de validació. 2. [Llibreria pytorch de python.](https://pytorch.org/docs/stable/index.html)3. Codi d'exemple.**Base de dades**Entrenarem la xarxa neural a la base de dades MNIST, una base de dades formada per imatges de digits escrits a mà.Concretament, s'haurà d'aprendre una xarxa neural per a classificar cadascun dels tipus de digit a la base de dades $y \in \{0, 1, 2, 3, 4, 5, 6, 7, 8, 9\}$. Per tant, la xarxa tindrá 10 sortides i el digit predit correspondrà a la posició amb una magnitut més gran. Per exemple, el següent vector correspón al número 3. El 8 és lleugerament més provable que la resta ja que 3 i 8 comparteixen algunes semblances.``[0.01, 0.01, 0.01, 0.9, 0.01, 0.01, 0.01, 0.01, 0.02, 0.01]``A continuació us proporcionem un exemple interactiu de codi i del funcionament de PyTorch. **Recordeu que cada cel·la depen de l'anterior i cal executar-les totes en ordre**.**Exemple amb PyTorch**0. Instal·lació de la llibreria. 1. Triar la configuració dessitjada i seguir la comanda especificada a la [pàgina de pytorch](https://pytorch.org/). 2. Instal·lar dependències per a executar aquest notebook (en cas que el volgueu executar interactivament). * `numpy` * `matplotlib` 3. [Jupyter notebook](http://jupyter.org/install). 4. (Opcional) Nvidia Drivers i CUDA per a permetre execució en paral·lel en una GPU. Recordeu ser consistents en la versió de python que utilitzeu. Recomanem utilitzar python3.6. 1. Entrenem una xarxa neural1. Carreguem les llibreries i i la base de dades. En aquest cas les dades estan comprimides en un arxiu de tipus ``.npy`` però podria ser perfectament una carpeta amb imatges.
###Code
%matplotlib inline
import torch # Import main library
from torch.utils.data import DataLoader # Main class for threaded data loading
import matplotlib.pyplot as plt
import numpy as np
# Optimizaiton config
batch_size = 200 # Number of samples used to estimate the gradient (bigger = stable training & bigger learning rate)
learning_rate = 0.004 # Optimizer learning rate
epochs = 0 # Number of iterations over the whole dataset.
# Prepare data
train_data = np.load('cnn/data/train.npy')
val_data = np.load('cnn/data/val.npy')
train_images = np.array(train_data.item()["images"])
val_images = np.array(val_data.item()["images"])
train_labels = np.array(train_data.item()["labels"])
val_labels = np.array(val_data.item()["labels"])
train_size = train_labels.shape[0]
val_size = val_labels.shape[0]
print(train_size, "training images of size", train_images.shape[1:])
###Output
47995 training images of size (28, 28)
###Markdown
Verifiquem que les dades s'hagin carregat correctament (s'hauria de veure 25 dibuixos de digits):
###Code
plt.figure()
train_indices = np.random.permutation(train_size)
for i in range(5):
for j in range(5):
plt.subplot(5, 5, 1 + i*5+j)
plt.imshow(train_images[train_indices[i*5+j], :, :], cmap="gray")
###Output
_____no_output_____
###Markdown
2. Creem una xarxa neural amb 3 capes: input, output i una hidden layer (layer2). És important que les dimensions d'entrada i sortida siguin correctes: l'entrada ha de ser 28 * 28 (num de pixels de la imatge, que té mida 28 x 28), i la sortida ha de ser de mida 10, ja que hi ha 10 tipus de digits.
###Code
class NeuralNet(torch.nn.Module):
def __init__(self, n_neurones=8, n_sortides=10):
super().__init__() # Necessary for torch to detect this class as trainable
# Here define network architecture
self.layer1 = torch.nn.Linear(28**2, n_neurones) # Linear layer with n_neurones
self.layer2 = torch.nn.Linear(n_neurones, n_neurones * 2) # Linear layer with n_neurones neurones * 2
self.output = torch.nn.Linear(n_neurones * 2, 10) # Linear layer with 1 neuron (binary output)
def forward(self, x):
# Here define architecture behavior
x = torch.sigmoid(self.layer1(x))
x = torch.sigmoid(self.layer2(x))
return torch.sigmoid(self.output(x)) # Binary output
# Instantiate network
model = NeuralNet()
###Output
_____no_output_____
###Markdown
3. Creem l'optimitzador, declarem la funció a optimitzar, i la resta de funcions auxiliars per a optimitzar el model.
###Code
# Create optimizer for the network parameters
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
# Instantiate loss function
criterion = torch.nn.CrossEntropyLoss() # Binary logistic regression
# Function to iterate the training set and update network weights with batches of images.
def train(model, optimizer, criterion, dimensions_entrada=[-1, 28**2]):
model.train() # training mode
running_loss = 0
running_corrects = 0
total = 0
# Loop through the the training set in steps of batch size
for idx in range(0, train_size, batch_size):
optimizer.zero_grad() # make the gradients 0
# Prepare images (x) and labels (y) to pytorch format
x = torch.from_numpy(train_images[train_indices[idx:(idx + batch_size)], ...]).float()
y = torch.from_numpy(train_labels[train_indices[idx:(idx + batch_size)], ...]).long()
output = model(x.view(*dimensions_entrada)) # forward pass
preds = torch.max(output, 1)[1]
loss = criterion(output, y) # calculate the loss value
loss.backward() # compute the gradients
optimizer.step() # uptade network parameters
# gather training statistics
running_loss += loss.item() * x.size(0)
running_corrects += torch.sum(preds==y).item() # .item() converts type from torch to python float or int
total += float(y.size(0))
epoch_loss = running_loss / total # mean epoch loss
epoch_acc = running_corrects / total # mean epoch accuracy
return epoch_loss, epoch_acc
# Function to iterate the validation set and update network weights with batches of images.
def val(model, criterion, dimensions_entrada=[-1, 28**2]):
model.eval() # validation mode
running_loss = 0
running_corrects = 0
total = 0
# Loop through the the validation set in steps of batch size
with torch.no_grad(): # We are not backpropagating trhough the validation set, so we can save speed
for idx in range(0, val_size, batch_size):
x = torch.from_numpy(val_images[idx:(idx + batch_size), ...]).float()
y = torch.from_numpy(val_labels[idx:(idx + batch_size), ...]).long()
output = model(x.view(*dimensions_entrada)) # forward pass
preds = torch.max(output, 1)[1]
loss = criterion(output, y) # calculate the loss value
# statistics
running_loss += loss.item() * x.size(0)
running_corrects += torch.sum(preds==y).item() # .item() converts type from torch to python float or int
total += float(y.size(0))
epoch_loss = running_loss / total # mean epoch loss
epoch_acc = running_corrects / total # mean epoch accuracy
return epoch_loss, epoch_acc
###Output
_____no_output_____
###Markdown
4. Iterem sobre el dataset sencer $epochs$ vegades i mostrem el train loss i accuracy. Recordeu de donar un valor a la variable ``epochs``.
###Code
# Main training loop
train_loss = []
train_accuracy = []
val_loss = []
val_accuracy = []
# Remove this line out of jupyter notebooks
from IPython import display
for epoch in range(epochs):
t_loss, t_acc = train(model, optimizer, criterion)
v_loss, v_acc = val(model, criterion)
train_loss.append(t_loss)
train_accuracy.append(t_acc)
val_loss.append(v_loss)
val_accuracy.append(v_acc)
plt.subplot(1,2,1)
plt.title("loss")
plt.plot(train_loss, 'b-')
plt.plot(val_loss, 'r-')
plt.legend(["train", "val"])
plt.subplot(1,2,2)
plt.title("accuracy")
plt.plot(train_accuracy, 'b-')
plt.plot(val_accuracy, 'r-')
plt.legend(["train", "val"])
display.clear_output(wait=True)
display.display(plt.gcf())
display.clear_output(wait=True)
###Output
_____no_output_____
###Markdown
Hauria d'acabar a ~80% train accuracy i val accuracy (percentatge d'imatges classificats correctement). 2. Xarxes neurals ConvolucionalsCom s'ha vist a la part teòrica, hi ha un tipus de xarxa neural més adequat per a imatges. Aquestes son les xarxes neurals convolucionals. La única diferència respecte una xarxa neural tradicional és que la xarxa convolucional utilitza un nou tipus de capa de neurones, la capa convolucional. Aquesta capa no es connecta a tota la imatge d'entrada sinó només a una regió, i les connexions es reutilitzen de regió en regió fins que s'ha passat per tota la imatge.a pytorch podem definir una xarxa convolucional de la següent manera ([veure documentació](https://pytorch.org/docs/stable/search.html?q=conv2d&check_keywords=yes&area=default)):
###Code
canals_entrada = 1 # les imatges son en blanc i negre, per tant, no cal tres canals de color
canals_sortida = 16 # Apliquem 16 neurones que es desplacen per la imatge
mida_finestra = 5 # mida de la finestra que apliquem
conv_layer = torch.nn.Conv2d(canals_entrada, canals_sortida, mida_finestra)
###Output
_____no_output_____
###Markdown
Penseu com si cada canal de sortida (cada neurona) detectés patrons diferents dins de la imatge. Si afegim una nova capa convolucional a sobre d'aquesta, cada punt dels $5\times 5$ punts de la finestra detectarà combinacions dels 16 patrons detectats per la capa anterior. D'aquesta manera anem construint una representació incrementalment abstracta del contingut de la imatge (1. línies, 2. corves, 3. idea general del número (número de panxes, per exemple), 4. el número).Podem provar d'aplicar la capa sobre un dels números de la base de dades per a veure què passa:
###Code
imatge = torch.from_numpy(train_images[0]) # passem imatge a format pytorch
imatge = imatge.view(1,1,28,28) # pytorch espera dimensions = mida_batch, canals, alçada, amplada
imatge = imatge.float() # convertim a nombre real
out = conv_layer(imatge)
print(out.size())
###Output
torch.Size([1, 16, 24, 24])
###Markdown
Podem veure que la sortida té 16 canals, i s'ha empetitit, de 28 a 24 pixels. Això es deu a que la finestra és de $5\times5$, i per tant, el punt central està a la posició 3 de la finestra. D'aquesta manera, el punt central comença a la posició 3 de la imatge, i acaba al final - 3, deixant-se els dos primers pixels i els dos últims. Per això baixa de 28 a 24 pixels.Quin aspecte té la sortida?
###Code
def mostra_imatge(im):
out = im.data.numpy()
out = out - out.min()
out = out / out.max()
out = out * 255
out = out.repeat(3, 2)
out = out.astype('uint8')
plt.imshow(out)
plt.figure()
plt.title("entrada")
mostra_imatge(imatge.view(28,28,1))
plt.figure()
for i in range(4):
for j in range(4):
plt.subplot(4,4,1 + i * 4 + j)
plt.title("canal_%d" %(i*4+j))
canal = out[0,i*4+j,:,:].view(24, 24, 1)
mostra_imatge(canal)
###Output
_____no_output_____
###Markdown
Recordeu que aquesta convolució no ah estat entrenada, i, per tant, és inicialitzada aleatoriament. Tot i això es pot comprovar com algun canal s'especialitza en detectar cantonades interiors, exteriors, o corves (en color blanc més intens).Ara ja podem entrenar una xarxa convolucional. Com a exercici, podeu provar de modificar la xarxa anterior sense mirar el següent codi:
###Code
import torch.nn as nn
class CNN(nn.Module):
def __init__(self, n_neurones, n_sortides, mida_finestra):
super().__init__()
self.layer1 = nn.Conv2d(1, n_neurones, mida_finestra)
self.pool1 = nn.MaxPool2d(2, 2)
self.nolinear1 = nn.ReLU()
self.layer2 = nn.Conv2d(n_neurones, n_neurones * 2, mida_finestra)
self.pool2 = nn.MaxPool2d(2, 2)
self.nolinear2 = nn.ReLU()
self.layer3 = nn.Linear(4*4*n_neurones*2, n_sortides)
def forward(self, x):
x = self.layer1(x)
x = self.nolinear1(x)
x = self.pool1(x)
x = self.layer2(x)
x = self.nolinear2(x)
x = self.pool2(x)
x = x.view(x.size(0), -1)
return self.layer3(x)
epochs = 0 # numero d'iteracions d'entrenament
learning_rate = 0.001 # coefficient d'aprenentatge
model = CNN(16, 10, 5)
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
###Output
_____no_output_____
###Markdown
Com podeu veure hem afegit una nova capa: Pool2d. El que fa aquesta capa és moure una finestra lliscant sobre la sortida d'una de les capes de la xarxa i retorna el màxim de la finestra en cada punt. Això té dos efectes:1. Si tenim una finestra de mida 2, com per cada 2 activacions en retornem una (el màxim), la mida de la sortida queda dividida entre 2.2. Com que dels dos només retornem el màxim, no importa si aquest es troba a la dreta o a l'esquerra de la finestra. Això vol dir que si movem la imatge un pixel a la dreta o a l'esquerra, la sortida de la xarxa no canviarà. Per tant, la xarxa és molt més robusta a petits desplaçaments.**ATENCIÓ: Si va molt lent, passar a següent apartat**Podem reutilitzar codi per a entrenar aquest model:
###Code
# Main training loop
train_loss = []
train_accuracy = []
val_loss = []
val_accuracy = []
# Remove this line out of jupyter notebooks
from IPython import display
for epoch in range(epochs):
t_loss, t_acc = train(model, optimizer, criterion, [-1, 1, 28, 28])
v_loss, v_acc = val(model, criterion, [-1, 1, 28, 28])
train_loss.append(t_loss)
train_accuracy.append(t_acc)
val_loss.append(v_loss)
val_accuracy.append(v_acc)
plt.subplot(1,2,1)
plt.title("loss")
plt.plot(train_loss, 'b-')
plt.plot(val_loss, 'r-')
plt.legend(["train", "val"])
plt.subplot(1,2,2)
plt.title("accuracy")
plt.plot(train_accuracy, 'b-')
plt.plot(val_accuracy, 'r-')
plt.legend(["train", "val"])
display.clear_output(wait=True)
display.display(plt.gcf())
#torch.save({"net": model.state_dict()}, "cnn/model_cnn_gran.pth") # guardem pesos
display.clear_output(wait=True)
###Output
_____no_output_____
###Markdown
Models pre-entrenatsHe entrenat una xarxa neural convolucional utiltzant GPU. Podeu utilitzar les seves connexions per a inicialitzar la xarxa de l'apartat anterior.
###Code
import torch
pesos = torch.load("cnn/model_cnn.pth", map_location="cpu")
model.load_state_dict(pesos["net"])
# Mirem quin percentatge d'encert te el model amb pesos carregats sobre el conjunt de validació
v_loss, v_acc = val(model, criterion, [-1, 1, 28, 28])
print("%.02f%% d'encert" %(100*v_acc))
from scipy.misc import imresize
###Output
97.30% d'encert
|
notebooks/Tests/TestPeakAndBinarySearch.ipynb | ###Markdown
SearchingTry running it in a live notebook for animation!* peakSearch* bracketSearch* binarySearch
###Code
# Reload modules every time code is called. Set autoreload 0 to disable
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
from lightlab.util.search import peakSearch, binarySearch, SearchRangeError
livePlots = False
###Output
_____no_output_____
###Markdown
You want to find a peak? Sweeping is not good enough
###Code
center = .82
amp = .7
fwhm = .2
defaultNoise = amp * 5e-3
noise = defaultNoise
assertionTolerance = .2
def myPeakedFun(x):
y = amp / (1 + (2 * (x - center) / fwhm) ** 2) + noise * np.random.randn()
return y
xq = np.linspace(0,3, 10)
plt.plot(xq, myPeakedFun(xq))
plt.title('Poor, low-res sampling of underlying peak')
###Output
_____no_output_____
###Markdown
Peak searchThis demonstrates noise tolerance when `nSwarm` is greater than 3
###Code
for noi, nSwarm in zip([defaultNoise, 5e-2], [3, 7]):
noise = noi
xPeak, yPeak = peakSearch(evalPointFun=myPeakedFun, startBounds=[0,3],
nSwarm=nSwarm, xTol=assertionTolerance/4, livePlot=livePlots)
assert abs(xPeak - center) < assertionTolerance
assert abs(yPeak - amp) < assertionTolerance
noise = defaultNoise
###Output
_____no_output_____
###Markdown
Interactive peak descent through binary search
###Code
binSearchOpts = dict(evalPointFun=myPeakedFun, xTol=.005, livePlot=livePlots)
###Output
_____no_output_____
###Markdown
This is easy, well bounded
###Code
rightBounds = [xPeak, 3]
leftBounds = [0, xPeak]
hwhmKwargs = dict(targetY=0.5*yPeak, **binSearchOpts)
xRightHalf = binarySearch(startBounds=rightBounds, **hwhmKwargs)
xLeftHalf = binarySearch(startBounds=leftBounds, **hwhmKwargs)
assert abs(xLeftHalf - (center - fwhm/2)) < assertionTolerance
assert abs(xRightHalf - (center + fwhm/2)) < assertionTolerance
###Output
_____no_output_____
###Markdown
Non-monotonic but still well definedThere is only one value in the domain that satisfies. It starts off bracketedNo test for when there is a peak in the middle and it starts *not* bracketed, i.e. if rightStart fwhm was 0.75To handle this, bracketSearch would have to report that it bracketed on both sides
###Code
rightStart = center + fwhm*.4
for leftStart in [0, center - fwhm, center - 0.6 * fwhm]:
xLeftHalf = binarySearch(startBounds=[leftStart, rightStart], **hwhmKwargs)
assert abs(xLeftHalf - (center - fwhm/2)) < assertionTolerance
###Output
_____no_output_____
###Markdown
Bad bound conditioning saved by `bracketSearch`
###Code
noise = defaultNoise / 10 # turn down noise a little bit
# Bad domain that totally misses peak
xLeftHalf = binarySearch(startBounds=[0, xPeak/2], **hwhmKwargs)
assert abs(xLeftHalf - (center - fwhm/2)) < assertionTolerance
# Target very close to peak
for trialAgainstNoise in range(5):
try:
xRightOnPeak = binarySearch(startBounds=[0, xPeak/4], targetY=0.99*amp, **binSearchOpts)
break
except RangeError as err:
if 'probably noise' in err.args[0]:
continue
else:
raise err
else:
raise Exception('We tried multiple times but noise killed this one')
assert abs(xRightOnPeak - center) < assertionTolerance
noise = defaultNoise
###Output
_____no_output_____
###Markdown
Graceful failures
###Code
# Targeting something too high, with peak within startBounds
goodAsItGets = binarySearch(startBounds=[0, center + .5 * fwhm], targetY=2, **binSearchOpts)
assert abs(goodAsItGets - center) < assertionTolerance
# Peak starts outside of startBounds
goodAsItGets = binarySearch(startBounds=[center + .5 * fwhm, 3], targetY=2, **binSearchOpts)
assert abs(goodAsItGets - center) < assertionTolerance
###Output
_____no_output_____
###Markdown
These should generate errors
###Code
# Targeting outside of hard constrain domain
try:
binarySearch(startBounds=[xPeak, xPeak+.1], targetY=0, hardConstrain=True, **binSearchOpts)
assert False
except SearchRangeError as err:
assert err.args[1] == 'low'
###Output
_____no_output_____ |
part1_feature.ipynb | ###Markdown
历史订单信息
###Code
df_history_action.head()
# 获取 wave 最后一次 step 信息
df_temp = df_history_action.groupby(['group'])['expect_time'].apply(
lambda x: x.values.tolist()[-1]).reset_index()
df_temp.columns = ['group', 'current_time']
df_feature = df_feature.merge(df_temp, how='left')
df_temp = df_history_action.groupby(['group'])['tracking_id'].apply(
lambda x: x.values.tolist()[-1]).reset_index()
df_temp.columns = ['group', 'last_tracking_id']
df_feature = df_feature.merge(df_temp, how='left')
df_temp = df_history_action.groupby(['group'])['action_type'].apply(
lambda x: x.values.tolist()[-1]).reset_index()
df_temp.columns = ['group', 'last_action_type']
df_feature = df_feature.merge(df_temp, how='left')
###Output
_____no_output_____
###Markdown
distance 表相关特征
###Code
df_distance.head()
df_distance = df_distance.rename(columns={'tracking_id': 'last_tracking_id',
'source_type': 'last_action_type', 'target_tracking_id': 'tracking_id', 'target_type': 'action_type'})
df_feature = df_feature.merge(df_distance.drop(
['courier_id', 'wave_index', 'date'], axis=1), how='left')
df_feature.head()
###Output
_____no_output_____
###Markdown
order 表相关特征
###Code
df_order.head()
df_feature = df_feature.merge(
df_order[['tracking_id', 'weather_grade', 'aoi_id', 'shop_id', 'promise_deliver_time',
'estimate_pick_time']], how='left')
df_feature.head()
###Output
_____no_output_____
###Markdown
courier 表相关特征
###Code
df_feature = df_feature.merge(df_courier, how='left')
df_feature.to_pickle('./temp/part1_feature.plk')
df_feature.shape
###Output
_____no_output_____ |
Sessions/Session06/Day4/IntroductionToGlueSolutions.ipynb | ###Markdown
Introduction to Glue-Viz**version 0.1*****By AA Miller (Northwestern CIERA/Adler Planetarium)03 May 2018 Introduction [All of my slides from Tuesday morning] ... that is all GlueAs a point of review, on Tuesday we learned about ParaView. I'd summarize the major strength of ParaView as providing an interface to create really nice 3D representations of data (and we barely scratched the surface of the most complex renderings that you can create). On Wednesday, we learned about `bokeh`. I would summarize the major strengths of `bokeh` as being the ability to create linked plots, as well as the relative ease of getting the output from bokeh into a server and on the web. Today we are going to learn about [`glue`](http://glueviz.org), which is a pure python library that designed to explore the relationships between related datasets. `glue` is actually developed by astronomers (in collaboration with medical imaging researchers), so a lot of the functionality is designed with *our* needs in mind.(though note - it is created as a general purpose tool. But, if there is something that you'd like to see in `glue` that does not exist, then you can reach out and maybe they will develop it) `glue` includes elements that we have already explored this week. In particular, `glue`, due to the medical imaging connection, provides nice functionality for visualizing 3D data sets. Additionally, given the large collection of heterogenous catalogs in astronomy, `glue` is designed to make linking between data sets very straightforward. You should have already installed `glue`, but if not conda install -c glue glue Furthermore, our first example will use the data included in this tarball: https://northwestern.box.com/s/uiwq47ir8r4h6njlxv6njtx174wdeoox Problem 1) Using Glue **Problem 1a** Open `glue`. The standard way to do this is to launch the program from the command line: `glue`. At this stage you will notice 4 primary windows within the application: * upper left ––– data collection (lists all the open data, as well as the selected subsets) * middle left ––– viewer layers (shows the different layers, and allows control over which are displayed) * lower left ––– viewer options (includes global options for the active viewer) * right ––– visualization canvas (this is where the data renderings are actually shown) **Problem 1b** Open the w5 fits image in `glue`. As a quick note - this image is from the [*WISE*](https://www.nasa.gov/mission_pages/WISE/main/index.html) satellite and it is showing the [Westerhout 5 (W5)](https://en.wikipedia.org/wiki/Westerhout_5) star forming region.*Hint* - you can drag and drop, or select the file path in [*File $\rightarrow$ Open Data Set.*] **Problem 1c**Render the image, by dragging the w5 entry in the data collection window to the visualization canvas. The will pop up a drop down menu asking what type of render you would like. Select the best option.*Hint* - you may want to resize the window within the visualization canvas. As previously noted, one of the great strengths of `glue` is the ability to drill down on subsets of linked data. At the top of the 2D image window there are 5 different methods for selecting subsets of the data. From left to right they include: rectangular selection, vertical selection, horizontal selection, circular selection, and finally freeform selection [this has similar functionality to `bokeh`'s lasso.]. **Problem 1d**Use the horizontal selection tool to select the subset of the data near the center of the image (this is done via drag and click). Then, use the vertical selection tool to select the subset of the data near the center of the image. Notice that there are now 2 subsets in the data collection panel, as well as additional entries in the view layers panels.**Problem 1e**Adjust the color of subset 1 to be "DSFP blue" and adjust the transparency bar to its maximum (i.e. minimize the transparency of the selection.Adjust the color of subset 2 to be "DSFP light grey" and make this selection more transparent. At this point, it is a little difficult to see the emission under the selected subsets. **Problem 1f**Select the w5 data in the data collection, and adjust the data scaling to match the optimal choice for astronomical images.*Hint* - think back to Zolt's lecture.[You may want to adjust the colorbar and range of the data being displayed. Be sure the subset panels can still be seen after making these changes.] There is a bright knot of emission in the northwest portion of the nebula, we will now focus on that.**Problem 1g**Adjust the subset selections to be centered on the bright emission knot in the northwest portion of the nebula. This can be done by selecting the subset in the data collection and then holding *cntl* while dragging the mouse over a new region to redefine the subset. **Problem 1h**Create a histogram of the brightness data in the fits image [drag the w5 data from the data collection into the visualization canvas and select the appropriate option from the drop down menu]. Notice that you now have histograms in 3 different colors. This is because the data linking in `glue` is (in some cases) automatic. By creating the histrogram for the data, you have also automatically created a histogram for the two subsets of the data. You will also notice that the histogram, as currently constructed, is not particularly informative. **Problem 1i**Update the range of the histogram to extend to a maximum value of 1000. Increase the number of bins to 25. Finally, normalize the histogram. Does the resulting histogram make sense? The current histograms are strongly polluted by background pixels. We can improve this with the selection tools.**Problem 1j**Select the pixels in the bright knot by changing the selection mode to "remove" (5th option). Then select the horizontal selection tool in the histogram plot. Drag and click to select the region with pixel values less than 500 to remove those from the selection.How do the histograms look now? Does the resulting image layers/histrogram make sense?*Note* - don't forget to return to the default selection mode after you have done this. Problem 2) Linking Data SetsSo far we have only employed automatic linking via data subsets. This has some utility (for instance, I could imagine teaching non-experts about source detection using the steps we just covered regarding the removal of faint pixels), but the real power of `glue` is in linking heterogeneous data sets. **Problem 2a**Open the second data file from the tarball `w5_psc.vot`. *Aside* - this VO table file includes sources in the W5 region that were detected by the [*Spitzer*](http://www.spitzer.caltech.edu/) space telescope. One reason for comparing *WISE* to *Spitzer* is that *WISE* covers the entire sky, while *Spitzer* offers higher resolution and greater depth, so it has more complete catalogs in the areas that it has observed. Given that the catalog and image are heterogeneous, linking will not be automatic (as it was for the subsets created in problem 1). **Problem 2b**Link the data sets by selecting the *Link Data* option in the top of the menu bar.Select an appropriate component from the image and catalog data, and then link those components by clicking on the *glue* button. Get it? Link the things by "glueing" them together, using `glue`. ...Get it? No seriously, **Do you get it?** Be sure that you glue both of the relevant variables that connect these two data sets. Hold on, now it's about to get real. With the catalog and image now linked, subsets selected in either space (e.g., the bright emission knot selected in Problem 1) will automatically be reflected in the other space. **Problem 2c**Create a scatter plot of the the catalog data by dragging `w5_psc.vot` into the visualization canvas.For the scatter plot show the [4.5] - [5.8] vs. [3.6] color magnitude diagram. **Problem 2d** Remove the previously created subsets. In the 2D image, choose the circular selection tool and highlight a small region centered on the bright know in the northwest portion of the nebula. What do you notice when you make this selection? **Problem 2e**Show the individual *Spitzer* point sources on the image by selecting the subset in the data collection and dragging it onto the 2D image. Look at the overlap of the sources relative to the bright knot - does this make sense? **Problem 2f**Adjust the plot of the subset of points to provide a linear colormap for the data. Color the points by their [3.6] magnitude? Does the image make sense? What about the reverse? Can we select interesting sources in CMD space and highlight their spatial positions in the cluster? This could be useful, for example, to identify the location of the youngest stars within the W5 star-forming region. **Problem 2g**Select the *Spitzer* point source catalog in the data collection. Then, using the rectangular selection tool in the CMD, choose all the red sources with [4.5] - [5.8] > 1 mag. What can you say about the positions of the red sources relative to the 12 micron emission? Problem 3) ReproducibilityHopefully at this point it is clear that `glue` can be very powerful in the way that it allows linking across image and catalog data. However, everything we have done has been in an interactive mode that may be hard to reproduce. Fortunately, `glue` provides multiple different ways to save your work.You can either save your entire session, save specific plots from your session, or save subsets created via the various selection tools from your session. Problem 4) Easy (?) False Color Images You should have already unpacked a tarball with 5 fits images: https://northwestern.box.com/s/hmitigmvcfi2tuzlgt1psatebkyrk0e3 **Problem 4a**Open each of the 5 fits files (named g, r, i, z, y) in glue. *Note* - as you open each image after the first you will be prompted to "merge" the data. Select no on that option for now. **Problem 4b**Create a 2D image of the g-band data. **Problem 4c**Drag and drop the data from each of the other filters on to the g band image. **Problem 4d**Change the color option from colorbar to "one color per channel". Then select 3 layers for the 2D image, assigning RGB to one of each of the layers. **Problem 4e**Adjust the scalings (and colors if necessary) to create a nice false color image of the galaxy. Problem 5) 3D scatter plots in glue Warning 13D viewing in `glue` is relatively new, and as such it does not provide the full range of functionality that is eventually expected within the package.[Read the docs](http://docs.glueviz.org/en/latest/gui_guide/3d_viewers.html) for caveats regarding the use of the 3D viewer. Warning 2There is a very very good chance that you may have a non-working version of `glue` on your machine if you have not updated your `anaconda` software since session 4. At this point, please proceed carefully to make sure you have the correct install for 3D rendering in `glue`.As a first test, please try: conda list glueIf that returns something like this: Name Version Build Channel glue-core 0.13.2 py36_0 glueviz glue-vispy-viewers 0.10 py36_1 glueviz glueviz 0.13.2 0 gluevizNamely, `glue-core` and `glueviz` versions 0.13.x **AND** `glue-vispy-viewers` version 0.10 –– then you are safe and ready to proceed. Alternatively, if you have something like this: Name Version Build Channel glue-core 0.12.5 py36_0 glueviz glue-vispy-viewers 0.10 py36_1 glueviz glueviz 0.13.0 0 gluevizOr, any combination of `glue-core` or `glueviz` <= 0.12.x **AND** `glue-vispy-viewers` version 0.10 –– then 3D viewing is likely not going to be supported in your installation. The easiest way to address right now is to roll back your `glueviz` packages: conda install -c glueviz glueviz=0.12.4 conda install -c glueviz glueviz=0.12.4 conda install -c glueviz glue-vispy-viewers=0.9 If you are unsure about any of this, please raise your hand and I'll stop by to make sure everything is set up correctly. As an example of a 3D scatter plot in `glue`, we will create a fits table using the training data from the feature engineering then render the data. **Problem 5a** Create `astropy.io.fits` columns for each of the 3 data arrays.*Hint* - `fits.Column` takes `name`, `format`, and `array` as optional arguments. For `format` "D" = double precision, and "J" = integer. You'll want to pass `np.array`s to the `array` argument.
###Code
import pandas as pd
from astropy.io import fits
import numpy as np
train_df = pd.read_csv("training_sources.csv")
col1 = fits.Column(name="mean", format="D", array=np.array(train_df["mean"]))
col2 = fits.Column(name="nobs", format="J", array=np.array(train_df["nobs"]))
col3 = fits.Column(name="duration", format="J", array=np.array(train_df["duration"]))
###Output
_____no_output_____
###Markdown
**Problem 5b**Merge the columns into a `fits` hdu object.
###Code
hdu = fits.BinTableHDU.from_columns([col1, col2, col3])
###Output
_____no_output_____
###Markdown
**Problem 5c**Write the hdu object to a fits file.
###Code
hdu.writeto("training_set.fits")
###Output
_____no_output_____ |
Part 2 Introduction to Data Analysis/study materials /drawing-conclusions-solutions.ipynb/drawing-conclusions-solutions.ipynb | ###Markdown
Drawing ConclusionsUse the space below to address questions on datasets `clean_08.csv` and `clean_18.csv`
###Code
import pandas as pd
import matplotlib.pyplot as plt
% matplotlib inline
# load datasets
df_08 = pd.read_csv('clean_08.csv')
df_18 = pd.read_csv('clean_18.csv')
df_08.head(1)
###Output
_____no_output_____
###Markdown
Q1: Are more unique models using alternative sources of fuel? By how much? Let's first look at what the sources of fuel are and which ones are alternative sources.
###Code
df_08.fuel.value_counts()
df_18.fuel.value_counts()
###Output
_____no_output_____
###Markdown
Looks like the alternative sources of fuel available in 2008 are CNG and ethanol, and those in 2018 ethanol and electricity. (You can use Google if you weren't sure which ones are alternative sources of fuel!)
###Code
# how many unique models used alternative sources of fuel in 2008
alt_08 = df_08.query('fuel in ["CNG", "ethanol"]').model.nunique()
alt_08
# how many unique models used alternative sources of fuel in 2018
alt_18 = df_18.query('fuel in ["Ethanol", "Electricity"]').model.nunique()
alt_18
plt.bar(["2008", "2018"], [alt_08, alt_18])
plt.title("Number of Unique Models Using Alternative Fuels")
plt.xlabel("Year")
plt.ylabel("Number of Unique Models");
###Output
_____no_output_____
###Markdown
Since 2008, the number of unique models using alternative sources of fuel increased by 24. We can also look at proportions.
###Code
# total unique models each year
total_08 = df_08.model.nunique()
total_18 = df_18.model.nunique()
total_08, total_18
prop_08 = alt_08/total_08
prop_18 = alt_18/total_18
prop_08, prop_18
plt.bar(["2008", "2018"], [prop_08, prop_18])
plt.title("Proportion of Unique Models Using Alternative Fuels")
plt.xlabel("Year")
plt.ylabel("Proportion of Unique Models");
###Output
_____no_output_____
###Markdown
Q2: How much have vehicle classes improved in fuel economy? Let's look at the average fuel economy for each vehicle class for both years.
###Code
veh_08 = df_08.groupby('veh_class').cmb_mpg.mean()
veh_08
veh_18 = df_18.groupby('veh_class').cmb_mpg.mean()
veh_18
# how much they've increased by for each vehicle class
inc = veh_18 - veh_08
inc
# only plot the classes that exist in both years
inc.dropna(inplace=True)
plt.subplots(figsize=(8, 5))
plt.bar(inc.index, inc)
plt.title('Improvements in Fuel Economy from 2008 to 2018 by Vehicle Class')
plt.xlabel('Vehicle Class')
plt.ylabel('Increase in Average Combined MPG');
###Output
_____no_output_____
###Markdown
Q3: What are the characteristics of SmartWay vehicles? Have they changed over time? We can analyze this by filtering each dataframe by SmartWay classification and exploring these datasets.
###Code
# smartway labels for 2008
df_08.smartway.unique()
# get all smartway vehicles in 2008
smart_08 = df_08.query('smartway == "yes"')
# explore smartway vehicles in 2008
smart_08.describe()
###Output
_____no_output_____
###Markdown
Use what you've learned so for to further explore this dataset on 2008 smartway vehicles.
###Code
# smartway labels for 2018
df_18.smartway.unique()
# get all smartway vehicles in 2018
smart_18 = df_18.query('smartway in ["Yes", "Elite"]')
smart_18.describe()
###Output
_____no_output_____
###Markdown
Use what you've learned so for to further explore this dataset on 2018 smartway vehicles. Q4: What features are associated with better fuel economy? You can explore trends between cmb_mpg and the other features in this dataset, or filter this dataset like in the previous question and explore the properties of that dataset. For example, you can select all vehicles that have the top 50% fuel economy ratings like this.
###Code
top_08 = df_08.query('cmb_mpg > cmb_mpg.mean()')
top_08.describe()
top_18 = df_18.query('cmb_mpg > cmb_mpg.mean()')
top_18.describe()
###Output
_____no_output_____ |
Coursera/IBM Python 01/Course04/1.2derivativesandGraphsinPytorch_v2.ipynb | ###Markdown
Differentiation in PyTorch Table of ContentsIn this lab, you will learn the basics of differentiation. Derivatives Partial DerivativesEstimated Time Needed: 25 min Preparation The following are the libraries we are going to use for this lab.
###Code
# These are the libraries will be useing for this lab.
import torch
import matplotlib.pylab as plt
###Output
_____no_output_____
###Markdown
Derivatives Let us create the tensor x and set the parameter requires_grad to true because you are going to take the derivative of the tensor.
###Code
# Create a tensor x
x = torch.tensor(2.0, requires_grad = True)
print("The tensor x: ", x)
###Output
The tensor x: tensor(2., requires_grad=True)
###Markdown
Then let us create a tensor according to the equation $ y=x^2 $.
###Code
# Create a tensor y according to y = x^2
y = x ** 2
print("The result of y = x^2: ", y)
###Output
The result of y = x^2: tensor(4., grad_fn=<PowBackward0>)
###Markdown
Then let us take the derivative with respect x at x = 2
###Code
# Take the derivative. Try to print out the derivative at the value x = 2
y.backward()
print("The dervative at x = 2: ", x.grad)
###Output
The dervative at x = 2: tensor(4.)
###Markdown
The preceding lines perform the following operation: $\frac{\mathrm{dy(x)}}{\mathrm{dx}}=2x$ $\frac{\mathrm{dy(x=2)}}{\mathrm{dx}}=2(2)=4$
###Code
print('data:',x.data)
print('grad_fn:',x.grad_fn)
print('grad:',x.grad)
print("is_leaf:",x.is_leaf)
print("requires_grad:",x.requires_grad)
print('data:',y.data)
print('grad_fn:',y.grad_fn)
print('grad:',y.grad)
print("is_leaf:",y.is_leaf)
print("requires_grad:",y.requires_grad)
###Output
data: tensor(4.)
grad_fn: <PowBackward0 object at 0x7ff474ddbbe0>
grad: None
is_leaf: False
requires_grad: True
###Markdown
Let us try to calculate the derivative for a more complicated function.
###Code
# Calculate the y = x^2 + 2x + 1, then find the derivative
x = torch.tensor(2.0, requires_grad = True)
y = x ** 2 + 2 * x + 1
print("The result of y = x^2 + 2x + 1: ", y)
y.backward()
print("The dervative at x = 2: ", x.grad)
###Output
The result of y = x^2 + 2x + 1: tensor(9., grad_fn=<AddBackward0>)
The dervative at x = 2: tensor(6.)
###Markdown
The function is in the following form:$y=x^{2}+2x+1$ The derivative is given by: $\frac{\mathrm{dy(x)}}{\mathrm{dx}}=2x+2$$\frac{\mathrm{dy(x=2)}}{\mathrm{dx}}=2(2)+2=6$ Practice Determine the derivative of $ y = 2x^3+x $ at $x=1$
###Code
# Practice: Calculate the derivative of y = 2x^3 + x at x = 1
# Type your code here
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- x = torch.tensor(1.0, requires_grad=True)y = 2 * x ** 3 + xy.backward()print("The derivative result: ", x.grad) --> We can implement our own custom autograd Functions by subclassing torch.autograd.Function and implementing the forward and backward passes which operate on Tensors
###Code
class SQ(torch.autograd.Function):
@staticmethod
def forward(ctx,i):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
result=i**2
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
i, = ctx.saved_tensors
grad_output = 2*i
return grad_output
###Output
_____no_output_____
###Markdown
We can apply it the function
###Code
x=torch.tensor(2.0,requires_grad=True )
sq=SQ.apply
y=sq(x)
y
print(y.grad_fn)
y.backward()
x.grad
###Output
<torch.autograd.function.SQBackward object at 0x7ff4fd2434a8>
###Markdown
Partial Derivatives We can also calculate Partial Derivatives. Consider the function: $f(u,v)=vu+u^{2}$ Let us create u tensor, v tensor and f tensor
###Code
# Calculate f(u, v) = v * u + u^2 at u = 1, v = 2
u = torch.tensor(1.0,requires_grad=True)
v = torch.tensor(2.0,requires_grad=True)
f = u * v + u ** 2
print("The result of v * u + u^2: ", f)
###Output
The result of v * u + u^2: tensor(3., grad_fn=<AddBackward0>)
###Markdown
This is equivalent to the following: $f(u=1,v=2)=(2)(1)+1^{2}=3$ Now let us take the derivative with respect to u:
###Code
# Calculate the derivative with respect to u
f.backward()
print("The partial derivative with respect to u: ", u.grad)
###Output
The partial derivative with respect to u: tensor(4.)
###Markdown
the expression is given by: $\frac{\mathrm{\partial f(u,v)}}{\partial {u}}=v+2u$$\frac{\mathrm{\partial f(u=1,v=2)}}{\partial {u}}=2+2(1)=4$ Now, take the derivative with respect to v:
###Code
# Calculate the derivative with respect to v
print("The partial derivative with respect to u: ", v.grad)
###Output
The partial derivative with respect to u: tensor(1.)
###Markdown
The equation is given by: $\frac{\mathrm{\partial f(u,v)}}{\partial {v}}=u$$\frac{\mathrm{\partial f(u=1,v=2)}}{\partial {v}}=1$ Calculate the derivative with respect to a function with multiple values as follows. You use the sum trick to produce a scalar valued function and then take the gradient:
###Code
# Calculate the derivative with multiple values
x = torch.linspace(-10, 10, 10, requires_grad = True)
Y = x ** 2
y = torch.sum(x ** 2)
###Output
_____no_output_____
###Markdown
We can plot the function and its derivative
###Code
# Take the derivative with respect to multiple value. Plot out the function and its derivative
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.detach().numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The orange line is the slope of the blue line at the intersection point, which is the derivative of the blue line. The method detach() excludes further tracking of operations in the graph, and therefore the subgraph will not record operations. This allows us to then convert the tensor to a numpy array. To understand the sum operation Click Here The relu activation function is an essential function in neural networks. We can take the derivative as follows:
###Code
# Take the derivative of Relu with respect to multiple value. Plot out the function and its derivative
x = torch.linspace(-10, 10, 1000, requires_grad = True)
Y = torch.relu(x)
y = Y.sum()
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.detach().numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
###Code
y.grad_fn
###Output
_____no_output_____
###Markdown
Practice Try to determine partial derivative $u$ of the following function where $u=2$ and $v=1$: $ f=uv+(uv)^2$
###Code
# Practice: Calculate the derivative of f = u * v + (u * v) ** 2 at u = 2, v = 1
# Type the code here
u = torch.tensor(2.0,requires_grad=True)
v = torch.tensor(1.0,requires_grad=True)
f = u * v + (u * v) ** 2
print("The result of u * v + (u * v) ** 2: ", f)
f.backward()
print("The partial derivative with respect to u: ", u.grad)
print("The partial derivative with respect to u: ", v.grad)
###Output
The result of u * v + (u * v) ** 2: tensor(6., grad_fn=<AddBackward0>)
The partial derivative with respect to u: tensor(5.)
The partial derivative with respect to u: tensor(10.)
|
notebooks/curate_bouts-z_r12r13_21-plotly.ipynb | ###Markdown
Searching for bouts for a day of alsa recording
###Code
import os
import glob
import socket
import logging
import pickle
import numpy as np
import pandas as pd
from scipy.io import wavfile
from scipy import signal
### Fuck matplotlib, I'm using poltly now
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from importlib import reload
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.info('Running on {}'.format(socket.gethostname()))
from ceciestunepipe.file import bcistructure as et
###Output
_____no_output_____
###Markdown
Get the file locations for a session (day) of recordings
###Code
reload(et)
sess_par = {'bird': 'z_b15k6_21',
'sess': '2021-07-28'}
exp_struct = et.get_exp_struct(sess_par['bird'], sess_par['sess'], ephys_software='alsa')
raw_folder = exp_struct['folders']['alsa']
bouts_folder = os.path.join(exp_struct['folders']['derived'], 'bouts_ceciestunepipe')
exp_struct['folders']
###Output
_____no_output_____
###Markdown
read a session's bouts pandas- get the location of the file- load the pandas pickle- load the search/spectrogram parameters
###Code
from ceciestunepipe.util.sound import boutsearch as bs
### load the search/spectrogram parameters
### these functions are necessary to load the pickle files, which is bad.
### need to not write modules/functions in the pickle
# function for getting one channel out of a wave file
def read_wav_chan(wav_path: str, chan_id: int=0) -> tuple:
s_f, x = wavfile.read(wav_path, mmap=True)
return s_f, x[:, chan_id]
def sess_file_id(f_path):
n = int(os.path.split(f_path)[1].split('-')[-1].split('.wav')[0])
return n
hparams_file_path = os.path.join(bouts_folder, 'bout_search_params.pickle')
with open(hparams_file_path, 'rb') as fh:
hparams = pickle.load(fh)
#bpd, x, p = bs.get_bouts_in_file(one_wav_path, hparams)
bouts_auto_file_path = os.path.join(bouts_folder, hparams['bout_auto_file'])
#load. It is important to reset index because the manual curation requires unique indexing
bpd = pd.read_pickle(bouts_auto_file_path).reset_index(drop=True)
bpd.head(1)
###Output
_____no_output_____
###Markdown
if it wasnt saved (which is a bad mistake), read the sampling rate from the first file in the session
###Code
def sample_rate_from_wav(wav_path):
x, sample_rate = wavfile.read(wav_path)
return sample_rate
if hparams['sample_rate'] is None:
one_wav_path = bpd.loc[0, 'file']
logger.info('Sample rate not saved in parameters dict, searching it in ' + one_wav_path)
hparams['sample_rate'] = sample_rate_from_wav(one_wav_path)
def cleanup(bout_pd: pd.DataFrame):
## check for empty waveforms (how woudld THAT happen???)
bout_pd['valid_waveform'] = bout_pd['waveform'].apply(lambda x: (False if x.size==0 else True))
# valid is & of all the validated criteria
bout_pd['valid'] = bout_pd['valid_waveform']
# drop not valid and reset index
bout_pd.drop(bout_pd[bout_pd['valid']==False].index, inplace=True)
bout_pd.reset_index(drop=True, inplace=True)
cleanup(bpd)
###Output
_____no_output_____
###Markdown
compute the spectrograms
###Code
hparams
hparams['frame_shift_ms'] = 1
hparams['frame_lenght_ms'] = 5
hparams['fmax'] = 10000
hparams['num_freq'] = 2048
bpd['spectrogram'] = bpd['waveform'].apply(lambda x: bs.gimmepower(x, hparams)[2])
logger.info('saving bout pandas with spectrogram to ' + bouts_auto_file_path)
bpd.to_pickle(bouts_auto_file_path)
bpd
###Output
_____no_output_____
###Markdown
inspect the bouts and curate them visualize one bout
###Code
bpd.iloc[0]
import plotly.express as px
import plotly.graph_objects as go
from ipywidgets import widgets
def viz_one_bout(df: pd.Series, sub_sample=10):
# get the power and the spectrogram
sxx = df['spectrogram'][:, ::sub_sample]
x = df['waveform'][::sub_sample]
# the trace
tr_waveform = go.Scatter(y=x)
figwidg_waveform = go.FigureWidget(data=[tr_waveform],
layout= {'height': 300,'width':1000})
# the spectrogram
fig_spectrogram = px.imshow(sxx,
labels={},
color_continuous_scale='Inferno',
aspect='auto')
fig_spectrogram.update_layout(width=1000, height=300, coloraxis_showscale=False)
fig_spectrogram.update_xaxes(showticklabels=False)
fig_spectrogram.update_yaxes(showticklabels=False)
figwidg_spectrogram = go.FigureWidget(fig_spectrogram)
display(widgets.VBox([figwidg_waveform,
figwidg_spectrogram]))
viz_one_bout(bpd.iloc[0])
bpd.head(2)
###Output
_____no_output_____
###Markdown
use it in a widget add a 'confusing' label, for not/sure/mixed.we want to avoid having things we are not sure of in the training dataset
###Code
bpd.reset_index(drop=True, inplace=True)
## Set confusing by default, will only be False once asserted bout/or not
bpd['confusing'] = True
bpd['bout_check'] = False
### Create a counter object (count goes 1:1 to DataFrame index)
from traitlets import CInt, link
class Counter(widgets.DOMWidget):
value = CInt(0)
value.tag(sync=True)
class VizBout():
def __init__(self, hparams, bouts_pd):
self.bout = None
self.bouts_pd = bouts_pd
self.bout_series = None
self.is_bout = None
self.is_confusing = None
self.bout_counter = None
self.bout_id = None
self.buttons = {}
self.m_pick = None
self.fig_waveform = None
self.fig_spectrogram = None
self.figwidg_waveform = None
self.figwidg_spectrogram = None
self.fig_width = 2
self.sub_sample = 10
self.x = None
self.sxx = None
self.tr_waveform = None
self.s_f = hparams['sample_rate']
self.init_fig()
self.init_widget()
self.show()
def init_fig(self):
# the trace
self.tr_waveform = go.Scatter(y=np.zeros(500))
self.figwidg_waveform = go.FigureWidget(data=[self.tr_waveform],
layout={'width': 1000, 'height':300})
# the spectrogram
self.fig_spectrogram = px.imshow(np.random.rand(500, 500),
labels={},
color_continuous_scale='Inferno',
aspect='auto')
self.fig_spectrogram.update_layout(width=1000, height=300, coloraxis_showscale=False)
self.fig_spectrogram.update_xaxes(showticklabels=False)
self.fig_spectrogram.update_yaxes(showticklabels=False)
self.figwidg_spectrogram = go.FigureWidget(self.fig_spectrogram)
def init_widget(self):
# declare elements
# lay them out
#
self.bout_counter = Counter()
self.is_bout = widgets.Checkbox(description='is bout')
self.is_confusing = widgets.Checkbox(description='Not sure or mixed')
self.buttons['Next'] = widgets.Button(description="Next", button_style='info',
icon='plus')
self.buttons['Prev'] = widgets.Button(description="Prev", button_style='warning',
icon='minus')
self.buttons['Check'] = widgets.Button(description="Check", button_style='success',
icon='check')
self.buttons['Uncheck'] = widgets.Button(description="Uncheck", button_style='danger',
icon='wrong')
[b.on_click(self.button_click) for b in self.buttons.values()]
left_box = widgets.VBox([self.buttons['Prev'], self.buttons['Uncheck']])
right_box = widgets.VBox([self.buttons['Next'], self.buttons['Check']])
button_box = widgets.HBox([left_box, right_box])
self.m_pick = widgets.IntSlider(value=0, min=0, max=self.bouts_pd.index.size-1,step=1,
description="Bout candidate index")
control_box = widgets.HBox([button_box,
widgets.VBox([self.is_bout, self.is_confusing]),
self.m_pick])
link((self.m_pick, 'value'), (self.bout_counter, 'value'))
self.update_bout()
self.is_bout.observe(self.bout_checked, names='value')
self.is_confusing.observe(self.confusing_checked, names='value')
self.m_pick.observe(self.slider_change, names='value')
all_containers = widgets.VBox([control_box,
self.figwidg_waveform, self.figwidg_spectrogram])
display(all_containers)
# display(button_box)
# display(self.m_pick)
# display(self.is_bout)
# display(self.fig)
def button_click(self, button):
self.bout_id = self.bout_counter.value
curr_bout = self.bout_counter
if button.description == 'Next':
curr_bout.value += 1
elif button.description == 'Prev':
curr_bout.value -= 1
elif button.description == 'Check':
self.bouts_pd.loc[self.bout_id, 'bout_check'] = True
self.bouts_pd.loc[self.bout_id, 'confusing'] = False
curr_bout.value += 1
elif button.description == 'Uncheck':
self.bouts_pd.loc[self.bout_id, 'bout_check'] = False
self.bouts_pd.loc[self.bout_id, 'confusing'] = False
curr_bout.value += 1
# handle the edges of the counter
if curr_bout.value > self.m_pick.max:
curr_bout.value = 0
if curr_bout.value < self.m_pick.min:
curr_bout.value = self.m_pick.max
def slider_change(self, change):
#logger.info('slider changed')
#self.bout_counter = change.new
#clear_output(True)
self.update_bout()
self.show()
def bout_checked(self, bc):
# print "bout checked"
# print bc['new']
# print self.motiff
self.bouts_pd.loc[self.bout_id, 'bout_check'] = bc['new']
def confusing_checked(self, bc):
# print "bout checked"
# print bc['new']
# print self.motiff
self.bouts_pd.loc[self.bout_id, 'confusing'] = bc['new']
def update_bout(self):
self.bout_id = self.bout_counter.value
self.bout_series = self.bouts_pd.iloc[self.bout_id]
self.is_bout.value = bool(self.bout_series['bout_check'])
self.is_confusing.value = bool(self.bout_series['confusing'])
self.x = self.bout_series['waveform'][::self.sub_sample]
self.sxx = self.bout_series['spectrogram'][::self.sub_sample]
def show(self):
#self.fig.clf()
#self.init_fig()
# update
# self.update_bout()
#plot
#logger.info('showing')
# Show the figures
with self.figwidg_waveform.batch_update():
self.figwidg_waveform.data[0].y = self.x
self.figwidg_waveform.data[0].x = np.arange(self.x.size) * self.sub_sample / self.s_f
with self.figwidg_spectrogram.batch_update():
self.figwidg_spectrogram.data[0].z = np.sqrt(self.sxx[::-1])
viz_bout = VizBout(hparams, bpd)
np.where(viz_bout.bouts_pd['bout_check']==True)[0].size
###Output
_____no_output_____
###Markdown
save it
###Code
hparams
### get the curated file path
##save to the curated file path
bouts_curated_file_path = os.path.join(bouts_folder, hparams['bout_curated_file'])
viz_bout.bouts_pd.to_pickle(bouts_curated_file_path)
logger.info('saved curated bout pandas to pickle {}'.format(bouts_curated_file_path))
viz_bout.bouts_pd.head(5)
###Output
_____no_output_____ |
OrderMailWithSpacy.ipynb | ###Markdown
Smarter conversations using SpacyCreated on March 21st 2020 and updated in August 2021 by Patrick Rotzetterhttps://www.linkedin.com/in/rotzetter/**Small experiment for automatic order mail processing** Pre-requisitesLet us import required libraries and check that some pre-requisites are met
###Code
# Import libraries
import spacy
from spacy.pipeline import EntityRuler
from spacy.matcher import Matcher,PhraseMatcher
from spacy.symbols import nsubj, VERB, dobj, NOUN, root, xcomp
from spacy import displacy
from spacy.matcher import Matcher
from pathlib import Path
import random
# Let us confirm we have the right Python and spacy version, we never know !
from platform import python_version
print(python_version())
!pip show spacy
# validate libraries and models are well installed
!python -m spacy validate
# load spacy model, in this case we will use the transformer model
import en_core_web_trf
nlp = en_core_web_trf.load()
#read mail file which contains the possible dialog content
text = open('ordermail.txt').read().replace('\n', ' ')
print(text)
###Output
Hello, I would like to order a notebook with 16GB and 256 GB disk, I would like to spend less than 1000 Francs, what would be the options Thanks a lot Patrick
###Markdown
Processing the text content with spacy Standard Named Entities RecognitionLet us start with named entity recognition and see what Spacy could detect.
###Code
#process the mail trough standard spacy pipeline
docMail=nlp(text)
# print text entities detected
for ent in docMail.ents :
print(ent.text, ent.label_,)
###Output
16GB QUANTITY
256 GB QUANTITY
less than 1000 Francs MONEY
Patrick PERSON
###Markdown
We can also visualize the result directly in the text with highlighted entities.
###Code
#Let us visualize the result directly in the text
displacy.render(docMail, style='ent', minify=True)
###Output
_____no_output_____
###Markdown
Domain specific entitiesThe default model does not seem to detect notebook and disk as entities, but identifies the sender as a person and identifies the RAM and disk size as quantities. This is a good start, but still far away from a practical solution. So, let us add some domain specific entities that will help us later on.
###Code
# add domain specific entities and add to the pipeline
patterns = [{"label": "CURRENCY", "pattern": [{"lower": "francs"}]},
{"label": "PART", "pattern": [{"lower": "disk"}]}]
config = {
"phrase_matcher_attr": None,
"validate": True,
"overwrite_ents": True,
"ent_id_sep": "||",
}
ruler=nlp.add_pipe('entity_ruler',config=config)
ruler.add_patterns(patterns)
#process the mail again with added entities
docMail=nlp(text)
for ents in docMail.ents:
# Print the entity text and its label
print(ents.text, ents.label_,)
###Output
16GB QUANTITY
256 GB QUANTITY
disk PART
Francs CURRENCY
Patrick PERSON
###Markdown
Now the results look a bit better, the model could detect 'disk' and 'francs' for example.
###Code
displacy.render(docMail, style='ent', minify=True)
###Output
_____no_output_____
###Markdown
Matching some specific patterns Sometimes it is not enough to match only entities, for example we have defined the RAM size as 16 GB. So let us see how to detect the memory size automatically
###Code
matcher = PhraseMatcher(nlp.vocab)
terms = ["16 GB","256 GB"]
# Only run nlp.make_doc to speed things up
patterns = [nlp.make_doc(t) for t in terms]
matcher.add("MEMORY", None, *patterns)
doc = nlp(text)
matches = matcher(doc)
for match_id, start, end in matches:
span = doc[start:end]
print(span.text)
###Output
16GB
256 GB
###Markdown
Quite cool, it detected the patterns and matched the text related to memory size. Unfortunately, the issue is that we do not know to what it refers to, so we need to start a different kind of analysis. Dependency Parsing: Identify verbs, modifiers and objectsOne of the key features of Spacy is its linguistic and predictive features. Indeed, Spacy is able to make a prediction of which tag or label most likely applies in a specific context.Let us start with displaying the result of part of speech tagging and dependency analysis. As we can see below, the code is pretty simple
###Code
for token in docMail:
print(token.text, token.dep_, token.head.text, token.head.pos_,
[child for child in token.children])
displacy.render(docMail, style="dep", minify=True, jupyter=True)
###Output
_____no_output_____
###Markdown
The result is quite impressive, it shows all predicted tags for each word and the dependency tree with the associated dependency labels. For example ‘I’ is a pronoun and is subject to the verb ‘like’. Let us detect the numerical modifiers, as we will need them to identify the memory size required
###Code
for token in docMail:
if token.dep_ == 'nummod':
print(f"Numerical modifier: {token.text} --> object: {token.head}")
###Output
Numerical modifier: 16 --> object: GB
Numerical modifier: 256 --> object: GB
Numerical modifier: 1000 --> object: Francs
###Markdown
This is again quite cool, we can associate quantities to different words in the text. Identifying the action verbsSpacy provides all the required tagging to find the action verbs, we want to know if the customer wants to order something or is just interested by some information for example. Let us iterate through all tokens in the text and search for an open clausal complement ( refer to for all possible dependency tags https://spacy.io/api/annotationpos-tagging )
###Code
verbs = set()
for possible_verbs in docMail:
if possible_verbs.dep == xcomp and possible_verbs.head.pos == VERB :
verbs.add(possible_verbs)
print(verbs)
###Output
{spend, order}
###Markdown
We have now identified ‘spend’ and ‘order’ as possible actions in the text. We can also do the same to find objects or items in the text that are the referred to by the client Identifying itemsLet us find possible items in the text using the dependency tag ‘dobj’ for direct objects of a verb.
###Code
items = set()
for possible_item in docMail:
if possible_item.dep == dobj and possible_item.head.pos == VERB:
items.add(possible_item)
print(items)
###Output
{Francs, notebook}
###Markdown
‘Francs’ and ‘notebook’ have been found. Now we can think of using word similarities to find what kind of item the client is referring to. We could also use other techniques, but let us try a simple way for now. We will compare similarities between identified obejcts and the word ‘laptop’. The word ‘notebook’ is much closer to ‘laptop’ than Francs.
###Code
import en_core_web_lg
nlpwithvectors = en_core_web_lg.load() # we are using the large language model as we need vectors to compute word similarities
orderobject=nlpwithvectors("laptop")
for sub in items:
print(f"Similarity of {sub.text} to laptop is {nlpwithvectors(sub.text).similarity(orderobject):.4f}")
###Output
Similarity of Francs to laptop is 0.0016
Similarity of notebook to laptop is 0.8022
###Markdown
Finally putting it together, we can think of automatically detecting the required action verb using a heuristic. Let us assume that if the similarity is more than 80%, then we have found the right verb. We then search for the direct object of the similar verb. That could look like this
###Code
orderword=nlpwithvectors("order")
for verb in verbs:
if (nlpwithvectors(verb.text).similarity(orderword)) >=0.8:
for v in verb.children:
if v.dep==dobj:
print(v.text)
###Output
notebook
|
red-wine-quality-eda-and-ml.ipynb | ###Markdown
***Overview ***This project mainly explores the relationship between red wine quality with wine’s physicochemical and sensory variables (1 - fixed acidity; 2 - volatile acidity; 3 - citric acid; 4 - residual sugar; 5 – chlorides; 6 - free sulfur dioxide; 7 - total sulfur dioxide; 8 – density; 9 – pH; 10 – sulphates; 11 - alcohol Output variable). In addition, EDA(seaborn and ggplot) and multiple machine learning algorithms are used to determine which physiochemical properties have impact on a wine’s quality.
###Code
!pip install -q plotnine
from plotnine import *
%matplotlib inline
import pandas as pd
import numpy as np
df_wine = pd.read_csv('../input/winequality-red.csv')
df_wine.head()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis Seaborn: Correlation Heatmap
###Code
import seaborn as sns
color = sns.color_palette()
import matplotlib.pyplot as plt
sns.set(style="white")
# Calculate the correlation
corr= df_wine.corr()
corr
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
###Output
_____no_output_____
###Markdown
Based on the heatmap above, except for "residual sugar", 'free sulfur dioxide' and 'pH', other variables seem to have some relationships with “quality”.
###Code
df_wine.drop(["residual sugar",'free sulfur dioxide','pH'],axis = 1,inplace = True)
df_wine.head()
###Output
_____no_output_____
###Markdown
Seaborn: pairplot Arbitrary cutoffs are set for the dependent variable (wine quality) and independent variable (alcohol) based on their distributions in order to facilitate the further analysis.
###Code
sns.distplot(df_wine['quality'])
plt.show()
# Bin "quality" variable into three levels: poor, normal and excellent
bins = [0, 4, 6, 10]
labels = ["poor","normal","excellent"]
df_wine['binned_quality'] = pd.cut(df_wine['quality'], bins=bins, labels=labels)
df_wine.head()
df_wine.drop('quality',axis =1, inplace = True)
sns.distplot(df_wine['alcohol'])
plt.show()
# Bin "alcohol" variable into three levels: low, median and high
bins = [0, 10, 12, 15]
labels = ["low alcohol","median alcohol","high alcohol"]
df_wine['binned_alcohol'] = pd.cut(df_wine['alcohol'], bins=bins, labels=labels)
df_wine.drop('alcohol',axis =1, inplace = True)
df_wine.head()
sns_plot = sns.pairplot(df_wine, hue="binned_quality", palette="husl",
diag_kind="kde")
sns_plot.savefig("pairplot.png")
###Output
_____no_output_____
###Markdown
According to the pairplot above, "volatile acidity" and "citric acid" are two variables whose distributions are rather distinguishable among three-level quality. ggplot: Faceted plot, Violin boxplot and Generic boxplot
###Code
(ggplot(df_wine, aes('citric acid', 'volatile acidity', color = 'binned_alcohol',
size = 'binned_alcohol',
shape = 'binned_alcohol'))
+ geom_point(alpha=0.3)
+ facet_wrap("binned_quality",ncol =1)
+ theme_xkcd())
(
ggplot(df_wine) +
geom_violin(
aes(x = 'binned_quality',
y = 'volatile acidity')) +
labs(
title ='Distribution of volatile acidity by quality',
x = 'wine quality',
y = 'volatile acidity',
))
(
ggplot(df_wine) +
geom_boxplot(
aes(x = 'binned_quality',
y = 'citric acid')
) +
labs(
title ='Distribution of citric acid by quality',
x = 'wine quality',
y = 'citric acid',
)
)
###Output
_____no_output_____
###Markdown
Based on the three plots below, we can conclude that compared with poor quality level, excellent quality level has higher proportion of high alcohol wine; on average, higher level the wine quality, lower the volatile acidity and higher the citric acid. ML algorithms and model comparison
###Code
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
df_wine_ml = df_wine.copy()
df_wine_ml.info()
#get dummies
df_wine_ml = pd.get_dummies(df_wine_ml, columns=["binned_alcohol"], drop_first=True)
df_wine_ml.head()
###Output
_____no_output_____
###Markdown
sklearn StandardScaler
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df_wine_ml.drop('binned_quality',axis=1))
scaled_features = scaler.transform(df_wine_ml.drop('binned_quality',axis=1))
df_wine_ml_sc = pd.DataFrame(scaled_features, columns=df_wine_ml.columns.difference(['binned_quality']))
###Output
_____no_output_____
###Markdown
train_test_split
###Code
# use 70% of the data for training and 30% for testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_wine_ml.drop( "binned_quality",axis=1), df_wine_ml["binned_quality"], test_size=0.30, random_state=101)
X_train_sc, X_test_sc, y_train_sc, y_test_sc = train_test_split(df_wine_ml_sc, df_wine_ml["binned_quality"], test_size=0.30, random_state=101)
# unscaled
X_train_all = df_wine_ml.drop("binned_quality",axis=1)
y_train_all = df_wine_ml["binned_quality"]
# scaled
X_train_all_sc = df_wine_ml_sc
y_train_all_sc = df_wine_ml["binned_quality"]
###Output
_____no_output_____
###Markdown
1. Logistic Regression
###Code
logreg = LogisticRegression()
logreg.fit(X_train,y_train)
pred_logreg = logreg.predict(X_test)
print(accuracy_score(y_test, pred_logreg))
logreg.coef_
###Output
_____no_output_____
###Markdown
2. Gaussian Naive Bayes¶
###Code
gnb=GaussianNB()
gnb.fit(X_train,y_train)
pred_gnb = gnb.predict(X_test)
print(accuracy_score(y_test, pred_gnb))
###Output
_____no_output_____
###Markdown
3. kNN
###Code
knn = KNeighborsClassifier(n_neighbors=20)
knn.fit(X_train_sc,y_train_sc)
pred_knn = knn.predict(X_test)
print(accuracy_score(y_test, pred_knn))
###Output
_____no_output_____
###Markdown
4. Decision Tree
###Code
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
pred_dtree = dtree.predict(X_test)
print(accuracy_score(y_test, pred_dtree))
dtree_2 = DecisionTreeClassifier(max_features=7 , max_depth=6, min_samples_split=8)
dtree_2.fit(X_train,y_train)
pred_dtree_2 = dtree_2.predict(X_test)
print(accuracy_score(y_test, pred_dtree_2))
###Output
_____no_output_____
###Markdown
5. Random Forest
###Code
rfc = RandomForestClassifier(max_depth=6, max_features=7)
rfc.fit(X_train, y_train)
pred_rfc = rfc.predict(X_test)
print(accuracy_score(y_test, pred_rfc))
# feature importance
importances = pd.DataFrame({'feature':X_train.columns,
'importance':np.round(rfc.feature_importances_,3)})
importances = importances.sort_values('importance',ascending=False).set_index('feature')
importances.head(15)
###Output
_____no_output_____
###Markdown
6. SVM
###Code
svc = SVC(gamma = 0.01, C = 100, probability=True)
svc.fit(X_train_sc, y_train_sc)
pred_svc = svc.predict(X_test_sc)
print(accuracy_score(y_test_sc, pred_svc))
###Output
_____no_output_____
###Markdown
K fold cross-validation Among 6 algorithms above, logistic regression, kNN, SVM and random forest have the highest accuracy rate. Thus, K fold cross-validation is used here to further estimate model accuracy. **For logistic regression:**
###Code
scores_logreg = cross_val_score(logreg, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_logreg)
print(scores_logreg.mean())
###Output
_____no_output_____
###Markdown
**For knn:**
###Code
scores_knn = cross_val_score(knn, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_knn)
print(scores_knn.mean())
###Output
_____no_output_____
###Markdown
**For SVM:**
###Code
scores_svc = cross_val_score(svc, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_svc)
print(scores_svc.mean())
###Output
_____no_output_____
###Markdown
**For rfc:**
###Code
scores_rfc = cross_val_score(rfc, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_rfc)
print(scores_rfc.mean())
###Output
_____no_output_____
###Markdown
Based on k fold cross-validation, SVM(Support vector machine) has the best performance. Confusion matrix, without normalization for SVM
###Code
df= pd.DataFrame(y_test_sc)
df['binned_quality'].value_counts()
from sklearn.metrics import confusion_matrix
# creating a confusion matrix
cm = confusion_matrix(y_test_sc, pred_svc)
cm
names = ["excellent","normal","poor"]
df = pd.DataFrame(cm, index=names, columns=names)
df
###Output
_____no_output_____ |
Chapter11/model_performance_drifts/monitoring_models_drifts.ipynb | ###Markdown
Import relevant libraries
###Code
import xgboost as xgb
import mlflow
import pandas as pd
from evidently.tabs import ClassificationPerformanceTab
from sklearn.model_selection import train_test_split
from evidently.dashboard import Dashboard
###Output
_____no_output_____
###Markdown
Get reference dataset
###Code
reference_data = pd.read_csv("training_data.csv",
header=None,
names=[ "day{}".format(i) for i in range(0,14) ]+["target"] )
X=reference_data.iloc[:,:-1]
Y=reference_data.iloc[:,-1]
reference, production, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=4284, stratify=Y)
reference_train = xgb.DMatrix(reference,label=y_train)
dproduction= xgb.DMatrix(production)
dreference=xgb.DMatrix(reference)
###Output
_____no_output_____
###Markdown
Train your model
###Code
mlflow.xgboost.autolog()
EXPERIMENT_NAME="reports_model_performance"
mlflow.set_experiment(EXPERIMENT_NAME)
threshold=0.5
with mlflow.start_run() as run:
model=xgb.train(dtrain=reference_train,params={})
train_proba_predict = model.predict(dreference)
test_proba_predict = model.predict(dproduction)
test_predictions = [1. if y_cont > threshold else 0. for y_cont in test_proba_predict]
train_predictions = [1. if y_cont > threshold else 0. for y_cont in train_proba_predict]
reference['target'] = y_train
reference['prediction'] = train_predictions
production['target'] = y_test
production['prediction'] = test_predictions
classification_performance = Dashboard(
tabs=[ClassificationPerformanceTab])
classification_performance.calculate(reference,production)
classification_performance.save('.reports/'+EXPERIMENT_NAME+'.html')
mlflow.log_artifact('.reports/'+EXPERIMENT_NAME+'.html')
###Output
2021/07/05 19:12:22 WARNING mlflow.xgboost: Failed to infer model signature: could not sample data to infer model signature: please ensure that autologging is enabled before constructing the dataset.
|
Inference-FaceMesh.ipynb | ###Markdown
Making predictions with BlazeFaceThis notebook shows how to use the model for face detection.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
print("PyTorch version:", torch.__version__)
print("CUDA version:", torch.version.cuda)
print("cuDNN version:", torch.backends.cudnn.version())
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu
###Output
_____no_output_____
###Markdown
Load the model
###Code
from facemesh import FaceMesh
net = FaceMesh().to(gpu)
net.load_weights("facemesh.pth")
###Output
_____no_output_____
###Markdown
Make a predictionThe input image should be 128x128. BlazeFace will not automatically resize the image, you have to do this yourself!
###Code
img = cv2.imread("test.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (192, 192))
detections = net.predict_on_image(img).numpy()
detections.shape
plt.imshow(img, zorder=1)
x, y = detections[:, 0], detections[:, 1]
plt.scatter(x, y, zorder=2, s=1.0)
plt.show()
###Output
_____no_output_____
###Markdown
Let's convert model to ONNX format
###Code
torch.onnx.export(
net,
(torch.randn(1,3,192,192, device='cpu'), ),
"facemesh.onnx",
input_names=("image", ),
output_names=("preds", "conf"),
opset_version=9
)
###Output
/Users/george/Library/Mobile Documents/com~apple~CloudDocs/Projects/matt-research/snapchat_like/MediaPipe-PyTorch/facemesh.py:113: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
print(x.shape)
###Markdown
Making predictions with BlazeFaceThis notebook shows how to use the model for face detection.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
print("PyTorch version:", torch.__version__)
print("CUDA version:", torch.version.cuda)
print("cuDNN version:", torch.backends.cudnn.version())
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu
###Output
_____no_output_____
###Markdown
Load the model
###Code
from facemesh import FaceMesh
net = FaceMesh().to(gpu)
net.load_weights("facemesh.pth")
###Output
_____no_output_____
###Markdown
Make a predictionThe input image should be 128x128. BlazeFace will not automatically resize the image, you have to do this yourself!
###Code
img = cv2.imread("test.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (192, 192))
detections = net.predict_on_image(img).numpy()
detections.shape
plt.imshow(img, zorder=1)
x, y = detections[:, 0], detections[:, 1]
plt.scatter(x, y, zorder=2, s=1.0)
plt.show()
###Output
_____no_output_____
###Markdown
Let's convert model to ONNX format
###Code
torch.onnx.export(
net,
(torch.randn(1,3,192,192, device='cpu'), ),
"facemesh.onnx",
input_names=("image", ),
output_names=("preds", "conf"),
opset_version=9
)
###Output
/Users/george/Library/Mobile Documents/com~apple~CloudDocs/Projects/matt-research/snapchat_like/MediaPipe-PyTorch/facemesh.py:113: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
print(x.shape)
|
stamp_prices.ipynb | ###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set plot title
plt.title('Stamp Prices over time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'],stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import needed libraries
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn csv file into a panda dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
#First set a plot title
plt.title('Stamp Prices over time in the US')
#Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US stamp prices over time
###Code
# Import needed libraries
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This data set is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv into a pandas dataframe
stamp_prices=pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
#type will return the type of the object
type(stamp_prices)
# head returns the colum names and the first few rows from the dataset
stamp_prices.head()
# plot year versus price of US stamps
#set a plot title
plt.title('Stamp Prices Over Time')
# set columns to plot
plt.scatter(stamp_prices['Year'],stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv ('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type () will return the type of the object
type (stamp_prices)
# head () will return the column names and the first few rows from the dataframe
stamp_prices.head ()
# Plot the year vs the prices
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need.
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from: https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# "pd.read_csv" will turn a csv file into a pandas dataframe.
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# "type()" will return the type of an object
type(stamp_prices)
# "head()" will return the column names, and the first few rows from the dataframe
stamp_prices.head()
# Plot the year vs. the price
# First, set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
plot us stamp prices over time
###Code
# Import of needed libraries
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This data set is from https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv
###Code
# pd.read_csv will trun a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the coulmn names and the first few rows from the dataframe
stamp_prices.head()
# plot the year vs. the price
# first set the plot tile
plt.title('Stamp Prices over Time in the US')
# set the coulmns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
plot US Stamp Prices Over TIme
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head will return the column names and the first few rows form the data frame
stamp_prices.head()
# plot the year vs the prices
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
#Set the column to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This data is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the colums to plot
plt.scatter(stamp_prices['Year'],stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('http://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type () will return the type of an object
type (stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the prince
# First set a plot title
plt.title('stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices=pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the dataframe
stamp_prices.head()
# Plot the year versus the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter (stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data Sourced from: (https://vincentarelbundock.github.io/Rdatasets/datasets.html)
###Code
# pd.read_csv converts csv file into pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names, as well as the first few rows of our dataframe
stamp_prices.head()
# plot the year vs price
plt.title('Stamp Prices Over Time in the US')
# set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# import libraries that we need.
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read._csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type will return the type of an object
type(stamp_prices)
#head will return the column names and the first few rows from the dataframe
stamp_prices.head()
# plot the year Vs. the price
# First set the plot title
plt.title('Stamp Price Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
###Code
Test stuff.
###Output
_____no_output_____
###Markdown
Plot Stamp Prices Over Time
###Code
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
###Output
_____no_output_____
###Markdown
###Code
stamp_prices.head()
# Plot the year vs Price
plt.title('Stamp Prices Over Time in the US')
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'],stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type () will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
###Code
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# Import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of an object
type(stamp_prices)
# head() will return the column name and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
###Code
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# import the libraries that we need
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset is from https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type () will return the type of the object
type (stamp_prices)
# head () will return column names and the first few rows from the data frame
stamp_prices.head()
# Plot the year vs the price
# First set a plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____
###Markdown
Plot US Stamp Prices Over Time
###Code
# import the libraries we need for analysis
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
This dataset can be found at https://vincentarelbundock.github.io/Rdatasets/datasets.html
###Code
# pd.read_csv will turn a csv file into a pandas dataframe
stamp_prices = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Stat2Data/USstamps.csv')
# type() will return the type of data you are pulling
type(stamp_prices)
# head() will return the column names and first few rows from the dataframe
stamp_prices.head()
# Plot the Year vs Price
# Start by setting the plot title
plt.title('Stamp Prices Over Time in the US')
# Set the columns to plot
plt.scatter(stamp_prices['Year'], stamp_prices['Price'])
###Output
_____no_output_____ |
samples/core/tutorials/keras/basic_text_classification.ipynb | ###Markdown
[View in Colaboratory](https://colab.research.google.com/github/chuckwoody/models/blob/master/samples/core/tutorials/keras/basic_text_classification.ipynb) Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Classify movie reviews: binary classification Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data managable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handing this shape as the first layer in our network.In this tutorial, we willl use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probabilty, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes , or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model need a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice of loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with out probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Classify movie reviews: binary classification View on TensorFlow.org Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handling this shape as the first layer in our network.In this tutorial, we will use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Classify movie reviews: binary classification Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data managable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handing this shape as the first layer in our network.In this tutorial, we willl use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probabilty, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes , or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model need a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice of loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with out probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Text classification with movie reviews View on TensorFlow.org Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `max_length * num_reviews`. We can use an embedding layer capable of handling this shape as the first layer in our network.In this tutorial, we will use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 40 epochs in mini-batches of 512 samples. This is 40 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Text classification with movie reviews View on TensorFlow.org Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For a more advanced text classification tutorial using `tf.keras`, see the [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handling this shape as the first layer in our network.In this tutorial, we will use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Classify movie reviews: binary classification View on TensorFlow.org Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data managable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handing this shape as the first layer in our network.In this tutorial, we willl use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probabilty, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes , or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model need a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice of loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with out probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Classify movie reviews: binary classification View on TensorFlow.org Run in Google Colab View source on GitHub This notebook classifies movie reviews as *positive* or *negative* using the text of the review. This is an example of binary—or two-class—classification, an important and widely applicable kind of machine learning problem. We'll use the [IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. This notebook uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
###Output
_____no_output_____
###Markdown
Download the IMDB datasetThe IMDB dataset comes packaged with TensorFlow. It has already been preprocessed such that the reviews (sequences of words) have been converted to sequences of integers, where each integer represents a specific word in a dictionary.The following code downloads the IMDB dataset to your machine (or uses a cached copy if you've already downloaded it):
###Code
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
###Output
_____no_output_____
###Markdown
The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable. Explore the data Let's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.
###Code
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
###Output
_____no_output_____
###Markdown
The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
Convert the integers back to wordsIt may be useful to know how to convert integers back to text. Here, we'll create a helper function to query a dictionary object that contains the integer to string mapping:
###Code
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
###Output
_____no_output_____
###Markdown
Now we can use the `decode_review` function to display the text for the first review:
###Code
decode_review(train_data[0])
###Output
_____no_output_____
###Markdown
Prepare the dataThe reviews—the arrays of integers—must be converted to tensors before fed into the neural network. This conversion can be done a couple of ways:* One-hot-encode the arrays to convert them into vectors of 0s and 1s. For example, the sequence [3, 5] would become a 10,000-dimensional vector that is all zeros except for indices 3 and 5, which are ones. Then, make this the first layer in our network—a Dense layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.* Alternatively, we can pad the arrays so they all have the same length, then create an integer tensor of shape `num_examples * max_length`. We can use an embedding layer capable of handling this shape as the first layer in our network.In this tutorial, we will use the second approach. Since the movie reviews must be the same length, we will use the [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) function to standardize the lengths:
###Code
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
###Output
_____no_output_____
###Markdown
Let's look at the length of the examples now:
###Code
len(train_data[0]), len(train_data[1])
###Output
_____no_output_____
###Markdown
And inspect the (now padded) first review:
###Code
print(train_data[0])
###Output
_____no_output_____
###Markdown
Build the modelThe neural network is created by stacking layers—this requires two main architectural decisions:* How many layers to use in the model?* How many *hidden units* to use for each layer?In this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1. Let's build a model for this problem:
###Code
# input shape is the vocabulary count used for the movie reviews (10,000 words)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
###Output
_____no_output_____
###Markdown
The layers are stacked sequentially to build the classifier:1. The first layer is an `Embedding` layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`.2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model can handle input of variable length, in the simplest way possible.3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units.4. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level. Hidden unitsThe above model has two intermediate or "hidden" layers, between the input and output. The number of outputs (units, nodes, or neurons) is the dimension of the representational space for the layer. In other words, the amount of freedom the network is allowed when learning an internal representation.If a model has more hidden units (a higher-dimensional representation space), and/or more layers, then the network can learn more complex representations. However, it makes the network more computationally expensive and may lead to learning unwanted patterns—patterns that improve performance on training data but not on the test data. This is called *overfitting*, and we'll explore it later. Loss function and optimizerA model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs of a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. This isn't the only choice for a loss function, you could, for instance, choose `mean_squared_error`. But, generally, `binary_crossentropy` is better for dealing with probabilities—it measures the "distance" between probability distributions, or in our case, between the ground-truth distribution and the predictions.Later, when we are exploring regression problems (say, to predict the price of a house), we will see how to use another loss function called mean squared error.Now, configure the model to use an optimizer and a loss function:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Create a validation setWhen training, we want to check the accuracy of the model on data it hasn't seen before. Create a *validation set* by setting apart 10,000 examples from the original training data. (Why not use the testing set now? Our goal is to develop and tune our model using only the training data, then use the test data just once to evaluate our accuracy).
###Code
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
###Output
_____no_output_____
###Markdown
Train the modelTrain the model for 20 epochs in mini-batches of 512 samples. This is 20 iterations over all samples in the `x_train` and `y_train` tensors. While training, monitor the model's loss and accuracy on the 10,000 samples from the validation set:
###Code
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
_____no_output_____
###Markdown
Evaluate the modelAnd let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
###Output
_____no_output_____
###Markdown
This fairly naive approach achieves an accuracy of about 87%. WIth more advanced approaches, the model should get closer to 95%. Create a graph of accuracy and loss over time`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:
###Code
history_dict = history.history
history_dict.keys()
###Output
_____no_output_____
###Markdown
There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____ |
Other/Coursetalk_data.ipynb | ###Markdown
This notebook scrapes reviews from coursetalk.com
###Code
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = "https://www.coursetalk.com/providers/coursera/courses/machine-learning"
res = requests.get(url)
res.status_code
soup = BeautifulSoup(res.content, 'lxml')
review_list = soup.find('div', {'class': 'reviews-list'})
rating_list = review_list.find_all('meta', {'itemprop': 'ratingValue'})
ratings = []
for rating in rating_list:
ratings.append(rating['content'])
ratings
###Output
_____no_output_____ |
matplotlib/gallery_jupyter/subplots_axes_and_figures/demo_constrained_layout.ipynb | ###Markdown
Resizing axes with constrained layoutConstrained layout attempts to resize subplots ina figure so that there are no overlaps between axes objects and labelson the axes.See :doc:`/tutorials/intermediate/constrainedlayout_guide` for more details and:doc:`/tutorials/intermediate/tight_layout_guide` for an alternative.
###Code
import matplotlib.pyplot as plt
def example_plot(ax):
ax.plot([1, 2])
ax.set_xlabel('x-label', fontsize=12)
ax.set_ylabel('y-label', fontsize=12)
ax.set_title('Title', fontsize=14)
###Output
_____no_output_____
###Markdown
If we don't use constrained_layout, then labels overlap the axes
###Code
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=False)
for ax in axs.flat:
example_plot(ax)
###Output
_____no_output_____
###Markdown
adding ``constrained_layout=True`` automatically adjusts.
###Code
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=True)
for ax in axs.flat:
example_plot(ax)
###Output
_____no_output_____
###Markdown
Below is a more complicated example using nested gridspecs.
###Code
fig = plt.figure(constrained_layout=True)
import matplotlib.gridspec as gridspec
gs0 = gridspec.GridSpec(1, 2, figure=fig)
gs1 = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=gs0[0])
for n in range(3):
ax = fig.add_subplot(gs1[n])
example_plot(ax)
gs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs0[1])
for n in range(2):
ax = fig.add_subplot(gs2[n])
example_plot(ax)
plt.show()
###Output
_____no_output_____
###Markdown
------------References""""""""""The use of the following functions and methods is shown in this example:
###Code
import matplotlib
matplotlib.gridspec.GridSpec
matplotlib.gridspec.GridSpecFromSubplotSpec
###Output
_____no_output_____ |
knowknow/index.ipynb | ###Markdown
KnowKnowThis Python package, `knowknow`, is an attempt to make powerful, modern tools for analyzing the structure of knowledge open to anyone.Although I hope we can continue to improve the methods and documentation written here, and I intend that this grow larger than myself, this package acts as a stabilizing force for the field, giving us all access to the common methods and data for analyzing these structures.I have included every inch of code here, leaving no stone unturned. With every `pip install knowknow-amcgail`, you download the following:+ `creating variables`, a collection of pre-processing algorithms for cleaning and summarizing Web of Science search results, or JSTOR Data for Research data dumps.+ `analyses`, a set of descriptive notebooks which illustrate these datasets+ A connector to pre-computed cooccurrence sets, hosted on [OSF](https://osf.io/9vx4y/)For more details and instructions on how to reproduce these analyses, see [GitHub](https://github.com/amcgail/knowknow). Projects built on `knowknow`+ [amcgail/citation-death](citation-death) applies the concept of 'death' to attributes of citations, and analyzes the lifecourse of cited works, cited authors, and the authors writing the citations, using the `sociology-wos` dataset. Datasets built with `knowknow`+ Sociology ([hosted on OSF](https://osf.io/9vx4y/)) + `sociology-wos` every paper in WoS in early 2020 whose journal is in the 'Sociology' category, and which have full data. See [the Web of Science counter for more information](/creating%20variables%5Ccounter%20-%20web%20of%20science%20(cnt).html) + `sociology-jstor` in-text citations and their contexts were extracted from >90k full-text Sociology articles indexed in JSTOR. See [the JSTOR counter for more information](creating%20variables%5Ccounter%20-%20jstor%20(cnt).html) + `sociology-wos-all` Under the hood Creating Variables
###Code
list_dir("creating variables")
###Output
_____no_output_____
###Markdown
Analyses
###Code
list_dir("analyses")
comments()
###Output
_____no_output_____ |
Energy and Momentum/EnergyBarGraph.ipynb | ###Markdown
Energy Bar Charts: PH 211 Why?Energy bar charts are core tool for visualizing work-energy problems. This current version of the tool is interactive and includes kinetic energy initial and final), gravitational potential energy (initial and final), spring potential energy (initial and final), an unnamed potential energy (initial and final) as well as two possible non conservative forces doing work through the process. There is a final net energy bar that seeks to indicate the balance of all the energy 'flows'. LibrariesThere are a number of different widget libraries. In the end the ipywidgets was most adaptable to my purposes. I suspect this would change if I were seeking to build this tool as a webpage. References that I used in sorting this all out are given in my [InteractiveStudy notebook](https://github.com/smithrockmaker/ENGR212/blob/main/InteractiveStudy.ipynbhttps://github.com/smithrockmaker/ENGR212/blob/main/InteractiveStudy.ipynb). At the moment (2/21) this is miserably documented but the references contained therein are much better if they are still live.
###Code
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import interact, interactive, fixed, interact_manual, Layout
###Output
_____no_output_____
###Markdown
Setting Up the Bar GraphThis is where the decisions about how many bars and how they will be labelled are made. In the end I opted to create an enormous text str to label the bars which the barLabels. The locate and locateShift lists articulate x values (locations) for each of the bar. This involves skipping values to leave space for the vertical dividers that help this all make sense to me conceptually.
###Code
# set up locations for bars and other objects
# start with how many of each bar
initialBars = 2
transBars = 3
finalBars = initialBars
# total number of bars that are interactive. Gaps and other spacing issues handled at end of cell
Nbase = initialBars + transBars + finalBars + 4
locate = np.arange(Nbase+1)
# shifted locations for labels
locateShift = locate - 0.4
# the x locations for the groups
# Having them in separate lists allows me to choose different colors for each section
# of the bar graph more easily (without creating a color list that I need to edit)
initialLoc = locate[0:initialBars]
transLoc = locate[initialBars+1: initialBars + transBars +1]
finalLoc = locate[Nbase-(finalBars + 2):Nbase-2]
vlineLoc = [initialBars, (initialBars + transBars +1)]
vline2Loc = [Nbase - 2]
netLoc = [Nbase-1]
# check alignments -- I had a lot of trouble making sure that everything lined up
# appropriately. These are diagnostic print statements to be sure I'm visualizing
# the bar and divider locations correctly.
#print("Initial Bars:",initialLoc)
#print("Add/Remove Bars:",transLoc)
#print("Final Bars Bars:",finalLoc)
#print("dividers:",vlineLoc)
#print("Net Separator:",vline2Loc)
#print("Net Bar:",netLoc)
#print("locate:",locate)
# Structure bar width - this is a proportional value apparently
# it scales with plot figure size.
width = 0.4
# bar labels
labelKEi = 'KE' # initial
labelPEgi = 'PEg' # initial
labelPEsi = 'PEs' # initial
labelUKi = 'U' # unknown source of energy initial
labelPM1 = 'W1' # unknown source of energy added or lost during process
labelPM2 = 'W2' # unknown source of energy added or lost during process
labelPM3 = 'W3' # unknown source of energy added or lost during process
labelPM4 = 'W4' # unknown source of energy added or lost during process
labelKEf = 'KE' # final
labelPEgf = 'PEg' # final
labelPEsf = 'PEs' # final
labelUKf = 'U' # unknown source of energy final
labelNet = 'net = ' # does everything add up?
vertBar = ''
lSpace = ' '
lScale = 7
# assemble labels for each section. Spacing is most easily adjusted using the lScale variabkel above
initialLabels = labelKEi + (lScale)*lSpace + labelPEgi + (lScale)*lSpace
transLabels = labelPM1 + lScale*lSpace + labelPM2 + (lScale)*lSpace + labelPM3 + (lScale +1)*lSpace
finalLabels = labelKEf + lScale*lSpace + labelPEgf + (lScale)*lSpace
netLabels = labelNet
vertLabel = vertBar
# put it all together for labels
barLabels = initialLabels + lScale*lSpace + transLabels + lScale*lSpace + finalLabels + lScale*lSpace + netLabels + lScale*lSpace
# check the label string if needed.
#print("barlabels:", barLabels)
###Output
_____no_output_____
###Markdown
Energy Bar Graph FunctionThis may not be the only or best way to do this but eventually it seemed easiest given my experience or lack of it. I tested everything using fixed values for the bars (you can see this in early version of this notebook). Because I decided I wanted to update the values of each bar on the plot I also needed to generate a dynamic text string that depended on the bar values passed to the plotting function. barValues represents this aspect of the plot.The plot scales vertically relatively smoothly. It will **NOT** scale horizontally since the text strings probably won't follow the bars properly. I can imagine how to sort that out but it's not important enough to take that time at this point. Very basic intro to bar plots is linked below.[pyplot.bar documentation](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.bar.html)[
###Code
def energyBar(KE0, KEf, PEg0, PEgf, WF1, WF2, WF3):
# create array of bar heights (energy)
initialHeights = [KE0, PEg0]
transHeights = [WF1, WF2, WF3]
finalHeights = [KEf, PEgf]
netEnergy = KE0 + PEg0 +WF1 + WF2 + WF3 - (KEf + PEgf)
netHeights = [netEnergy]
# truncate current bar values and create value array to display current value under each bar
# for creating text string for labels
sLabel = ' '
sScale = 7
# initial values
KE0Val = str(np.trunc(KE0))
PEg0Val = str(np.trunc(PEg0))
initialValues =KE0Val + (sScale)*sLabel + PEg0Val + (sScale+1)*sLabel
# add/remove values
WF1Val = str(np.trunc(WF1))
WF2Val = str(np.trunc(WF2))
WF3Val = str(np.trunc(WF3))
# WF4Val = str(np.trunc(WF4))
transValues = WF1Val + sScale*sLabel + WF2Val + sScale*sLabel + WF3Val + (sScale+2)*sLabel
# final values
KEfVal = str(np.trunc(KEf))
PEgfVal = str(np.trunc(PEgf))
finalValues =KEfVal + (sScale)*sLabel + PEg0Val + (sScale+1)*sLabel
# net value
netValue = str(np.trunc(netEnergy))
# current value string
barValues = initialValues + (sScale-1)*sLabel + transValues + (sScale-1)*sLabel + finalValues + (sScale-1)*sLabel + netValue
# determine plot max/min
initMax = np.max(initialHeights)
transMax = np.max(transHeights)
finalMax = np.max(finalHeights)
# include 10 as a lower limit on the top of plot
collectMax = [initMax,transMax,finalMax, 10]
globalMax = 1.1*np.max(collectMax)
initMin = np.min(initialHeights)
transMin= np.min(transHeights)
finalMin = np.min(finalHeights)
collectMin = [initMin,transMin,finalMin, -5.]
globalMin = 1.1*np.min(collectMin)
if np.abs(globalMin) < globalMax:
yLim = globalMax
else:
yLim = np.abs(globalMin)
# create the plot
fig1, ax1 = plt.subplots()
# bar graph sections
ax1.bar(initialLoc,
initialHeights,
width,
color = 'red',
label= 'initial energy',
alpha = 0.4)
ax1.bar(transLoc,
transHeights,
width,
color = 'purple',
label= 'added/removed',
alpha = 0.4)
ax1.bar(finalLoc,
finalHeights,
width,
color = 'blue',
label= 'final energy',
alpha = 0.4)
ax1.bar(netLoc,
netHeights,
width,
color = 'green',
label= 'net energy',
alpha = 0.4)
# dividing lines
ax1.vlines(vlineLoc, -.95*yLim, .95*yLim, linestyles= 'dashed', color = 'navy')
ax1.vlines(vline2Loc, -.95*yLim, .95*yLim, linestyles= '-', color = 'red')
# limits of plot
plt.xlim(-1, Nbase)
plt.ylim(-yLim, yLim)
# turn on plot grid
ax1.grid()
# labeling stuff
#ax1.tick_params(axis="x",direction="in", pad=-200)
#plt.xticks(locateShift, barLabels, fontsize = 12)
plt.text(-.5, -.1*yLim, barLabels)
plt.text(-.5, -.2*yLim, barValues)
#ax1.tick_params(axis="x",direction="in", pad=-170)
#plt.xticks(locate, barLabels, fontsize = 12)
# axis labels
# currently forcing plt.legend to put legend top right for consistency
plt.xlabel('energy type', fontsize = 20)
plt.ylabel('energy', fontsize = 20)
plt.title('Energy Bar Chart', fontsize = 20)
plt.legend(loc = 1)
# Set the size of my plot for better visibility
fig1.set_size_inches(12, 6)
fig.savefig("myplot.png")
plt.show()
###Output
_____no_output_____
###Markdown
Setting up widgets and interactivityOnce the active function is defined then we define the interactive widgets which are mostly sliders for visual connection to the bar graph. In hindsight I might have done well to make the sliders vertical so they move in the same direction as the bars but hey .... got to save something for a rainy day.The cap variables are strings for labeling the different sections of the slider array. Hbox and VBox are used to lay out the panel. Last two lines pull the trigger and set up the interactivity.
###Code
# Set up widgetsm - captions
cap1 = widgets.Label(value='.....Initial Energy')
cap2 = widgets.Label(value=' Add/Removed')
cap3 = widgets.Label(value='.....Final Energy')
cap4 = widgets.Label(value='Object 1:')
cap5 = widgets.Label(value='Force 1:')
cap6 = widgets.Label(value='Force 2:')
cap7 = widgets.Label(value='Force 3:')
cap8 = widgets.Label(value='Force 4:')
cap9 = widgets.Label(value='Net Energy:')
# kinetic energy sliders
KE0=widgets.FloatText(min=0, max=100, value=.1, description = 'Initial KE',continuous_update=False,
layout=Layout(width='60%'))
KEf=widgets.FloatText(min=0, max=100, value=.1, description = 'Final KE',continuous_update=False,
layout=Layout(width='60%'))
# gravitational energy sliders
PEg0=widgets.FloatText(min=-100, max=100, value=.1, description = 'Initial PE_g',continuous_update=False,
layout=Layout(width='60%'))
PEgf=widgets.FloatText(min=-100, max=100, value=.1, description = 'Final PE_g',continuous_update=False,
layout=Layout(width='60%'))
# nonconservative force - energy sliders
WF1=widgets.FloatText(min=-100, max=100, value=.1, description = 'Work F1',continuous_update=False,
layout=Layout(width='60%'))
WF2=widgets.FloatText(min=-100, max=100, value=.1, description = 'Work F2',continuous_update=False,
layout=Layout(width='60%'))
WF3=widgets.FloatText(min=-100, max=100, value=.1, description = 'Work F2',continuous_update=False,
layout=Layout(width='60%'))
# An HBox lays out its children horizontally, VBox lays them out vertically
col1 = widgets.VBox([cap1, cap4, KE0, PEg0])
col2 = widgets.VBox([cap2, cap5, WF1, cap6, WF2, cap7, WF3])
col3 = widgets.VBox([cap3, cap4, KEf, PEgf])
panel = widgets.HBox([col1, col2, col3])
out = widgets.interactive_output(energyBar, {'KE0': KE0, 'KEf': KEf,
'PEg0': PEg0, 'PEgf': PEgf,
'WF1': WF1,'WF2': WF2,
'WF3': WF3})
display(out, panel)
###Output
_____no_output_____ |
ds-newtextbook-python/notebooks/4-2-4-pandas-timedata.ipynb | ###Markdown
4.2.4 時系列データ
###Code
import pandas as pd
import numpy as np
dates = pd.date_range(start="2017-04-01", end="2017-04-30")
dates
np.random.seed(123)
df = pd.DataFrame(np.random.randint(1, 31, 30), index=dates, columns=["乱数"])
df
dates = pd.date_range(start="2017-01-01", periods=365)
dates
np.random.seed(123)
df = pd.DataFrame(np.random.randint(1, 31, 365), index=dates, columns=["乱数"])
df
df.groupby(pd.Grouper(freq='M')).mean()
df.loc[:, "乱数"].resample('M').mean()
pd.date_range(start="2017-01-01", end="2017-12-31", freq="W-SAT")
df_year = pd.DataFrame(df.groupby(pd.Grouper(freq='W-SAT')).sum(), columns=['乱数'])
df_year
###Output
_____no_output_____ |
02-Alignment.ipynb | ###Markdown
Alignment & Operatrions This notebook is more about *understanding* pandas, "going with the flow", than any particular method or operation.Alignment is a key part of many parts of pandas, including- binary operations (`+, -, *, /, **, ==, |, &`) between pandas objects- merges / joins / concats- constructors (`pd.DataFrame`, `pd.Series`)- reindexingThat said, it's not really something you'll be doing explicitly.It happens in the background, as part of all those tasks.As far as I know, it's unique to pandas, so it may not click immediately.It's all about pandas using *labels* (`Seies/DataFrame.index` and `DataFrame.columns`) to do the tricky work of making sure the operation goes through correctly.
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from IPython import display
%matplotlib inline
pd.options.display.max_rows = 10
sns.set(context='talk')
plt.style.use('default')
###Output
_____no_output_____
###Markdown
Alignment without row labels (bad)- separate datasets on GDP and CPI- Goal: compute real GDP- Problem: Different frequencies I grabbed some data from [FRED](https://fred.stlouisfed.org/) on nominal US GDP (total output each quarter) and CPI (a measure of inflation).Each CSV has a column of dates, and a column for the measurement. DATE CPIAUCSL 1947-01-01 21.48 1947-02-01 21.62 1947-03-01 22.00 1947-04-01 22.00 1947-05-01 21.95 Typically, we would use `DATE` as the index (`index_col='DATE'` in `read_csv`).But to appreciate the value of labels, we'll take them away for now.This will result in the default `range(n)` index.
###Code
# The "wrong" way
# Read in CPI & GDP, parsing the dates
gdp_bad = pd.read_csv("data/gdp.csv", parse_dates=['DATE'])
cpi_bad = pd.read_csv("data/cpi.csv", parse_dates=['DATE'])
gdp_bad.head()
cpi_bad.head()
###Output
_____no_output_____
###Markdown
Goal: Compute Real GDP Our task is to calculate *real* GDP.The data in the CSV is nominal GDP; it hasn't been adjusted for inflation.To compute real GDP, you take nomial GDP (`gdp_bad`) and divide by a measure of inflation (`cpi_bad`).- nomial GDP: Total output in dollars- real GDP: Total output in constant dollars- $\mathrm{real\ gdp} = \frac{\mathrm{nomial\ gdp}}{\mathrm{inflation}}$Ideally, this would be as simple as `gdp_bad / cpi_bad`, but we have a slight issue: `gdp_bad` is measured quarterly, while `cpi_bad` is monthly.The two need to be *aligned* before we can do the conversion from nominal to real GDP.Normally, pandas would do this for us, but since we don't have meaningful row labels we have to do it manually.We'll find the dates in common between the two series, manually filter to those, and then do the division.You could do this a few ways; we'll go with a sql-style merge, roughly:```SQLselect "DATE", GDP / CPIAUCSL as real_gdp from gdp_data join cpi_data using ("DATE")```
###Code
# merge on DATE, divide
m = pd.merge(gdp_bad, cpi_bad, on='DATE', how='inner')
m.head()
m['GDP'] / m['CPIAUCSL']
###Output
_____no_output_____
###Markdown
Problems1. The output has lost the `DATE` fields, we would need to manually bring those along after doing the division2. We had to worry about doing the merge, which is incidental to the problem of calculating real gdp The Better Way- Use row labels- Specify `index_col='DATE'` in `read_csv`- Just do the operation: `gdp / cpi` When we have meaningful row labels shared across pandas objects, pandas will handle all the fiddly details for alignment for us.Let's do things the proper way now, using `DATE` as our row labels.We could use `gdp = gdp_bad.set_index("DATE")` to move a column into the index, but we'll just re-read the data from disk using the `index_col` method.
###Code
# use .squeeze to convert a 1 column df to a Series
gdp = pd.read_csv('data/gdp.csv', index_col='DATE',
parse_dates=['DATE']).squeeze()
gdp.head()
cpi = pd.read_csv('data/cpi.csv', index_col='DATE',
parse_dates=['DATE']).squeeze()
cpi.head()
###Output
_____no_output_____
###Markdown
Now when you do the division, pandas will handle the alignemnt.
###Code
rgdp = gdp / cpi
rgdp
###Output
_____no_output_____
###Markdown
You'll notice that a bunch of the values are `NaN`, short for ["Not A Number"](https://en.wikipedia.org/wiki/NaN).This is the missing value indicator pandas uses for numeric data.The `NaN`s are there because alignment produces the *union* of the two Indexes. Explicit AlignmentRoughly speaking, alignment composes two operations:1. union the labels2. reindex the data to conform to the unioned labels, inserting `NaN`s where necessary
###Code
# step 1: union indexes
full_idx = gdp.index.union(cpi.index)
full_idx
# step 2: reindex
gdp.reindex(full_idx)
###Output
_____no_output_____
###Markdown
Once the data have been reindexed, the operation (like `/` in our case) proceedes.
###Code
gdp.reindex(full_idx) / cpi.reindex(full_idx)
###Output
_____no_output_____
###Markdown
Ocassionally, you will do a manual `reindex`, but most of the time it's done in the background when you do an operation. Exercise: Compute Real GDPCompute real GDP in 2009 dollars You'll hear real GDP reported in '2009 dollars', or '2005 dollars'.The deflator (CPI in our case) is an index, and doesn't really have units.Some time span is chosen to be the base and set equal to 100.Every other observation is relative to it.The [data from FRED](https://fred.stlouisfed.org/series/CPIAUCSL) is indexed to 1982-1984.For the exercise, compute real-gdp in 2009 dollars.- Step 1: Convert CPI from base 1982-1984, to base 2009; Create a new series `cpi09` where the average value for 2009 is 100 + Hint: Use [partial string indexing](http://pandas.pydata.org/pandas-docs/stable/timeseries.htmldatetimeindex-partial-string-indexing) to slice the values for just 2009 + Divide the original `cpi` by that value and rescale to be an index (1 -> 100)- Step 2: Divide `gdp` by the result from Step 1
###Code
# Your solution
cpi09 = cpi / ... * 100
...
%load solutions/alignment_real_gdp09.py
###Output
_____no_output_____
###Markdown
To the extent possible, you should use *meaningful labels*, rather than the default `range(n)` index.This will put the burden of aligning things on pandas, rather than your memory.Additionally, labels like the date are often "nuisance" columns, that would have to be dropped and recombined when doing arithmetic calculations.When they're in the `.index`, they come along with the calculation but don't get in the way. Alignment on *both* axisThis may surpise you at some point down the road. Above, we used the `.squeeze()` method to turn the 1-D `DataFrame` down to a `Series`.We did this, because pandas will align on both the index *and* columns.Can you guess what would happen if we divided two DataFrames, with different column names?
###Code
gdp_ = pd.read_csv('data/gdp.csv', index_col='DATE',
parse_dates=['DATE'])
gdp_.head()
cpi_ = pd.read_csv('data/cpi.csv', index_col='DATE',
parse_dates=['DATE'])
cpi_.head()
gdp_ / cpi_
###Output
_____no_output_____
###Markdown
So pandas aligned by the columns, in addition to the index.Recall that alignment does the set *union*, so the output DataFrame has both CPI and GDP, which probably isn't what we wanted here. Aside: Handling Missing DataPandas, recognizing that missing data is a fact of life, has a bunch of methods for detecting and handling missing data.1. detecting missing data2. dropping missing data3. filling missing data Detecting Missing Data1. `pd.isna(), df.isna()`2. `pd.notna(), df.notna()`
###Code
# detect with `isna` and `notna`
rgdp.isna().head()
rgdp.notna().head()
###Output
_____no_output_____
###Markdown
These are often useful as masks for boolean indexing:
###Code
rgdp[rgdp.isna()].head()
###Output
_____no_output_____
###Markdown
Or for counting (True counts as 1, and False as 0 for numeric operations):
###Code
rgdp.isna().sum()
###Output
_____no_output_____
###Markdown
Dropping Missing DataYou can drop missing values with `.dropna````DataFrame.dropnaReturn object with labels on given axis omitted wherealternately any or all of the data are missingParameters----------axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof Pass tuple or list to drop on multiple axeshow : {'any', 'all'} * any : if any NA values are present, drop that label * all : if all values are NA, drop that label```
###Code
rgdp.dropna()
###Output
_____no_output_____
###Markdown
Almost all pandas methods return a new Series or DataFrame, and do not mutate data inplace.`rgdp` still has the missing vaules, even though we called `.dropna`
###Code
rgdp.head()
###Output
_____no_output_____
###Markdown
To make the change stick, you can assign the output to a new variable (or re-assign it to `rgdp`) like `rgdp = rgdp.dropna()`. Dropna for DataFramesSince `DataFrame` is a 2-d container, there are additional complexities with dropping missing data.Do you drop the row or column? Does just one value in the row or column have to be missing, or all of them?
###Code
# We'll see concat later
df = pd.concat([gdp, cpi], axis='columns')
df.head()
###Output
_____no_output_____
###Markdown
The defaults, shown next, are to drop *rows* (`axis='index'`) thathave at any missing values (`how='any'`):
###Code
df.dropna(axis='index', how='any')
###Output
_____no_output_____
###Markdown
You can drop a row only if all of it's values are missing:
###Code
df.dropna(axis='index', how='all')
###Output
_____no_output_____
###Markdown
Exercise: Dropping ColumnsDrop any `columns` in `df` that have at least one missing value
###Code
%load solutions/dropna_columns.py
###Output
_____no_output_____
###Markdown
Filling Missing ValuesUse `.fillna` to fill with a value (scalar, or mapping of `label: value`) or method. There's also `.fillna` to fill missing values, either with a value (which can be a scalar or array) or a method like `ffill` to fill-foward the last-observed value.
###Code
rgdp.fillna(method='ffill').plot()
sns.despine()
###Output
_____no_output_____
###Markdown
Missing data will come up throughout. Joining Pandas ObjectsYou have some options:1. `pd.merge`: SQL-style joins2. `pd.concat`: array-style joins You'll run into problems where you have multiple `Series` or `DataFrame`s, that you want to join into a single `DataFrame`.We saw an example of this earlier, but let's follow it up as a pair of exercises.There are two main ways to do this, `pd.merge` and `pd.concat`.When to use `merge` vs. `concat`?My general rule is to use `concat` for one-to-one joins of two or more Series/DataFrames, where your joining on the index.I use `pd.merge` when doing database style joins that are one-to-many, or many-to-many, or whenever you're joining on a column. Exercise: Merge DatasetsUse [`pd.merge`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.merge.html) to join the two DataFrames `gdp_bad` and `cpi_bad`, using an *outer* join (earlier we used an *inner* join).- Hint: You may want to sort by date afterward (see [`DataFrame.sort_values`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html))
###Code
# Your solution
%load solutions/aligment_merge.py
###Output
_____no_output_____
###Markdown
Exercise: Concatenate DatasetsUse [`pd.concat`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html) to stick together `gdp` and `cpi` into a DataFrame- Hint: what should the argument to `axis` be?
###Code
# Your solution
%load solutions/aligment_concat.py
###Output
_____no_output_____
###Markdown
ufuncs And ReductionsThese next couple of topics aren't really related to alignment, but I didn't have anywhere else to put them.NumPy has the concept of [universal functions](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) (ufuncs) that operate on any sized array.
###Code
np.log(df)
###Output
_____no_output_____
###Markdown
`ufuncs` work elementwise, which means they don't care about the dimensions, just the data types.Even something like adding a scalar is a ufunc.
###Code
df + 100
###Output
_____no_output_____
###Markdown
Reductions`DataFrame` has many methods that *reduce* a DataFrame to a Series by aggregating over a dimension.Likewise, `Series` has many methods that collapse down to a scalar.Some examples are `.mean`, `.std`, `.max`, `.any`, `.all`. Let's get a DataFrame with two columns on a similar scale.The `pct_change` method returns the `(current - previous) / previous` for each row (with `NaN` for the first since there isn't a previous.
###Code
pct_change = df.dropna().pct_change()
pct_change.head()
pct_change.plot();
###Output
_____no_output_____
###Markdown
By default, the index (0) axis is reduced for `DataFrames`.
###Code
pct_change.mean()
###Output
_____no_output_____
###Markdown
To collapse the columns (leaving the same row labels), use the `axis` argument.Specifying `axis='columns'` or `axis=1` will aggregate over the columns
###Code
pct_change.max(axis='columns')
###Output
_____no_output_____
###Markdown
If you have trouble remembering, the `axis` argument specifies the axis you want to *remove*.
###Code
# Which column had the larger percent change?
pct_change.idxmax(axis="columns")
###Output
_____no_output_____
###Markdown
Exercise: Percent PositiveExercise: What percent of the periods had a positive percent change for each column?
###Code
%load solutions/alignment_positive.py
###Output
_____no_output_____ |
HeroesOfPymoli/HeroesOfPymoli_starterU.ipynb | ###Markdown
Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
###Output
_____no_output_____ |
notebooks/classification_examples/pytorch.ipynb | ###Markdown
Classification with Delira and PyTorch - A very short introduction*Author: Justus Schock* *Date: 31.07.2019*This Example shows how to set up a basic classification model and experiment using PyTorch.Let's first setup the essential hyperparameters. We will use `delira`'s `Parameters`-class for this:
###Code
logger = None
import torch
from delira.training import Parameters
params = Parameters(fixed_params={
"model": {
"in_channels": 1,
"n_outputs": 10
},
"training": {
"batch_size": 64, # batchsize to use
"num_epochs": 10, # number of epochs to train
"optimizer_cls": torch.optim.Adam, # optimization algorithm to use
"optimizer_params": {'lr': 1e-3}, # initialization parameters for this algorithm
"losses": {"CE": torch.nn.CrossEntropyLoss()}, # the loss function
"lr_sched_cls": None, # the learning rate scheduling algorithm to use
"lr_sched_params": {}, # the corresponding initialization parameters
"metrics": {} # and some evaluation metrics
}
})
###Output
_____no_output_____
###Markdown
Since we did not specify any metric, only the `CrossEntropyLoss` will be calculated for each batch. Since we have a classification task, this should be sufficient. We will train our network with a batchsize of 64 by using `Adam` as optimizer of choice. Logging and VisualizationTo get a visualization of our results, we should monitor them somehow. For logging we will use `Tensorboard`. Per default the logging directory will be the same as our experiment directory. Data Preparation LoadingNext we will create some fake data. For this we use the `ClassificationFakeData`-Dataset, which is already implemented in `deliravision`. To avoid getting the exact same data from both datasets, we use a random offset.
###Code
from deliravision.data.fakedata import ClassificationFakeData
dataset_train = ClassificationFakeData(num_samples=10000,
img_size=(3, 32, 32),
num_classes=10)
dataset_val = ClassificationFakeData(num_samples=1000,
img_size=(3, 32, 32),
num_classes=10,
rng_offset=10001
)
###Output
_____no_output_____
###Markdown
AugmentationFor Data-Augmentation we will apply a few transformations:
###Code
from batchgenerators.transforms import RandomCropTransform, \
ContrastAugmentationTransform, Compose
from batchgenerators.transforms.spatial_transforms import ResizeTransform
from batchgenerators.transforms.sample_normalization_transforms import MeanStdNormalizationTransform
transforms = Compose([
RandomCropTransform(24), # Perform Random Crops of Size 24 x 24 pixels
ResizeTransform(32), # Resample these crops back to 32 x 32 pixels
ContrastAugmentationTransform(), # randomly adjust contrast
MeanStdNormalizationTransform(mean=[0.5], std=[0.5])])
###Output
_____no_output_____
###Markdown
With these transformations we can now wrap our datasets into datamanagers:
###Code
from delira.data_loading import DataManager, SequentialSampler, RandomSampler
manager_train = DataManager(dataset_train, params.nested_get("batch_size"),
transforms=transforms,
sampler_cls=RandomSampler,
n_process_augmentation=4)
manager_val = DataManager(dataset_val, params.nested_get("batch_size"),
transforms=transforms,
sampler_cls=SequentialSampler,
n_process_augmentation=4)
###Output
_____no_output_____
###Markdown
ModelAfter we have done that, we can specify our model: We will use a smaller version of a [VGG-Network](https://arxiv.org/pdf/1409.1556.pdf) in this case. We will use more convolutions to reduce the feature dimensionality and reduce the number of units in the linear layers to save up memory (and we only have to deal with 10 classes, not the 1000 imagenet classes).
###Code
from delira.models import AbstractPyTorchNetwork
import torch
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class SmallVGGPyTorch(AbstractPyTorchNetwork):
def __init__(self, in_channels, num_classes):
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels, 64, 3, padding=1), # 32 x 32
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 16 x 16
torch.nn.Conv2d(64, 128, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 8 x 8
torch.nn.Conv2d(128, 256, 3, padding=1), # 4 x 4
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 4 x 4
torch.nn.Conv2d(256, 512, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(), # 2 x 2
torch.nn.Conv2d(512, 512, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(), # 1 x 1
Flatten(),
torch.nn.Linear(1*1*512, num_classes),
)
def forward(self, x: torch.Tensor):
return {"pred": self.model(x)}
@staticmethod
def prepare_batch(data_dict, input_device, output_device):
return_dict = {"data": torch.from_numpy(batch["data"]).to(
input_device).to(torch.float)}
for key, vals in batch.items():
if key == "data":
continue
return_dict[key] = torch.from_numpy(vals).to(output_device).to(
torch.float)
return return_dict
@staticmethod
def closure(model, data_dict: dict, optimizers: dict, losses: dict,
fold=0, **kwargs):
loss_vals = {}
total_loss = 0
# predict
inputs = data_dict.pop("data")
preds = model(inputs)
# calculate losses
for key, crit_fn in losses.items():
_loss_val = crit_fn(preds["pred"], data_dict["label"])
loss_vals[key] = _loss_val.item()
total_loss += _loss_val
optimizers['default'].zero_grad()
# perform loss scaling via apex if half precision is enabled
with scale_loss(total_loss, optimizers["default"]) as scaled_loss:
scaled_loss.backward()
optimizers['default'].step()
return loss_vals, {k: v.detach()
for k, v in preds.items()}
###Output
_____no_output_____
###Markdown
So let's evisit, what we have just done.In `delira` all networks must be derived from `delira.models.AbstractNetwork`. For each backend there is a class derived from this class, handling some backend-specific function calls and registrations. For the `PyTorch` Backend this class is `AbstractPyTorchNetwork` and all PyTorch Networks should be derived from it.First we defined the network itself (this is the part simply concatenating the layers into a sequential model). Next, we defined the logic to apply, when we want to predict from the model (this is the `forward` method).So far this was plain `PyTorch`. The `prepare_batch` function is not plain PyTorch anymore, but allows us to ensure the data is in the correct shape, has the correct data-type and lies on the correct device. The function above is the standard `prepare_batch` function, which is also implemented in the `AbstractPyTorchNetwork` and just re-implemented here for the sake of completeness.Same goes for the `closure` function. This function defines the update rule for our parameters (and how to calculate the losses). These funcitons are good to go for many simple networks but can be overwritten for customization when training more complex networks. TrainingNow that we have defined our network, we can finally specify our experiment and run it.
###Code
import warnings
warnings.simplefilter("ignore", UserWarning) # ignore UserWarnings raised by dependency code
warnings.simplefilter("ignore", FutureWarning) # ignore FutureWarnings raised by dependency code
from delira.training import PyTorchExperiment
from delira.training.train_utils import create_optims_default_pytorch
if logger is not None:
logger.info("Init Experiment")
experiment = PyTorchExperiment(params, SmallVGGPyTorch,
name="ClassificationExample",
save_path="./tmp/delira_Experiments",
optim_builder=create_optims_default_pytorch,
key_mapping={"x": "data"}
gpu_ids=[0])
experiment.save()
model = experiment.run(manager_train, manager_val)
###Output
_____no_output_____
###Markdown
Congratulations, you have now trained your first Classification Model using `delira`, we will now predict a few samples from the testset to show, that the networks predictions are valid (for now, this is done manually, but we also have a `Predictor` class to automate stuff like this):
###Code
import numpy as np
from tqdm.auto import tqdm # utility for progress bars
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # set device (use GPU if available)
model = model.to(device) # push model to device
preds, labels = [], []
with torch.no_grad():
for i in tqdm(range(len(dataset_val))):
img = dataset_val[i]["data"] # get image from current batch
img_tensor = torch.from_numpy(img).unsqueeze(0).to(device).to(torch.float) # create a tensor from image, push it to device and add batch dimension
pred_tensor = model(img_tensor) # feed it through the network
pred = pred_tensor.argmax(1).item() # get index with maximum class confidence
label = np.asscalar(dataset_val[i]["label"]) # get label from batch
if i % 1000 == 0:
print("Prediction: %d \t label: %d" % (pred, label)) # print result
preds.append(pred)
labels.append(label)
# calculate accuracy
accuracy = (np.asarray(preds) == np.asarray(labels)).sum() / len(preds)
print("Accuracy: %.3f" % accuracy)
###Output
_____no_output_____
###Markdown
Classification with Delira and PyTorch - A very short introduction*Author: Justus Schock* *Date: 31.07.2019*This Example shows how to set up a basic classification model and experiment using PyTorch.Let's first setup the essential hyperparameters. We will use `delira`'s `Parameters`-class for this:
###Code
logger = None
import torch
from delira.training import Parameters
params = Parameters(fixed_params={
"model": {
"in_channels": 1,
"n_outputs": 10
},
"training": {
"batch_size": 64, # batchsize to use
"num_epochs": 10, # number of epochs to train
"optimizer_cls": torch.optim.Adam, # optimization algorithm to use
"optimizer_params": {'lr': 1e-3}, # initialization parameters for this algorithm
"losses": {"CE": torch.nn.CrossEntropyLoss()}, # the loss function
"lr_sched_cls": None, # the learning rate scheduling algorithm to use
"lr_sched_params": {}, # the corresponding initialization parameters
"metrics": {} # and some evaluation metrics
}
})
###Output
_____no_output_____
###Markdown
Since we did not specify any metric, only the `CrossEntropyLoss` will be calculated for each batch. Since we have a classification task, this should be sufficient. We will train our network with a batchsize of 64 by using `Adam` as optimizer of choice. Logging and VisualizationTo get a visualization of our results, we should monitor them somehow. For logging we will use `Tensorboard`. Per default the logging directory will be the same as our experiment directory. Data Preparation LoadingNext we will create some fake data. For this we use the `ClassificationFakeData`-Dataset, which is already implemented in `deliravision`. To avoid getting the exact same data from both datasets, we use a random offset.
###Code
from deliravision.data.fakedata import ClassificationFakeData
dataset_train = ClassificationFakeData(num_samples=10000,
img_size=(3, 32, 32),
num_classes=10)
dataset_val = ClassificationFakeData(num_samples=1000,
img_size=(3, 32, 32),
num_classes=10,
rng_offset=10001
)
###Output
_____no_output_____
###Markdown
AugmentationFor Data-Augmentation we will apply a few transformations:
###Code
from batchgenerators.transforms import RandomCropTransform, \
ContrastAugmentationTransform, Compose
from batchgenerators.transforms.spatial_transforms import ResizeTransform
from batchgenerators.transforms.sample_normalization_transforms import MeanStdNormalizationTransform
transforms = Compose([
RandomCropTransform(24), # Perform Random Crops of Size 24 x 24 pixels
ResizeTransform(32), # Resample these crops back to 32 x 32 pixels
ContrastAugmentationTransform(), # randomly adjust contrast
MeanStdNormalizationTransform(mean=[0.5], std=[0.5])])
###Output
_____no_output_____
###Markdown
With these transformations we can now wrap our datasets into datamanagers:
###Code
from delira.data_loading import BaseDataManager, SequentialSampler, RandomSampler
manager_train = BaseDataManager(dataset_train, params.nested_get("batch_size"),
transforms=transforms,
sampler_cls=RandomSampler,
n_process_augmentation=4)
manager_val = BaseDataManager(dataset_val, params.nested_get("batch_size"),
transforms=transforms,
sampler_cls=SequentialSampler,
n_process_augmentation=4)
###Output
_____no_output_____
###Markdown
ModelAfter we have done that, we can specify our model: We will use a smaller version of a [VGG-Network](https://arxiv.org/pdf/1409.1556.pdf) in this case. We will use more convolutions to reduce the feature dimensionality and reduce the number of units in the linear layers to save up memory (and we only have to deal with 10 classes, not the 1000 imagenet classes).
###Code
from delira.models import AbstractPyTorchNetwork
import torch
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class SmallVGGPyTorch(AbstractPyTorchNetwork):
def __init__(self, in_channels, num_classes):
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels, 64, 3, padding=1), # 32 x 32
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 16 x 16
torch.nn.Conv2d(64, 128, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 8 x 8
torch.nn.Conv2d(128, 256, 3, padding=1), # 4 x 4
torch.nn.ReLU(),
torch.nn.MaxPool2d(2), # 4 x 4
torch.nn.Conv2d(256, 512, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(), # 2 x 2
torch.nn.Conv2d(512, 512, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(), # 1 x 1
Flatten(),
torch.nn.Linear(1*1*512, num_classes),
)
def forward(self, x: torch.Tensor):
return {"pred": self.model(x)}
@staticmethod
def prepare_batch(data_dict, input_device, output_device):
return_dict = {"data": torch.from_numpy(batch["data"]).to(
input_device).to(torch.float)}
for key, vals in batch.items():
if key == "data":
continue
return_dict[key] = torch.from_numpy(vals).to(output_device).to(
torch.float)
return return_dict
@staticmethod
def closure(model, data_dict: dict, optimizers: dict, losses: dict,
fold=0, **kwargs):
loss_vals = {}
total_loss = 0
# predict
inputs = data_dict.pop("data")
preds = model(inputs)
# calculate losses
for key, crit_fn in losses.items():
_loss_val = crit_fn(preds["pred"], data_dict["label"])
loss_vals[key] = _loss_val.item()
total_loss += _loss_val
optimizers['default'].zero_grad()
# perform loss scaling via apex if half precision is enabled
with scale_loss(total_loss, optimizers["default"]) as scaled_loss:
scaled_loss.backward()
optimizers['default'].step()
return loss_vals, {k: v.detach()
for k, v in preds.items()}
###Output
_____no_output_____
###Markdown
So let's evisit, what we have just done.In `delira` all networks must be derived from `delira.models.AbstractNetwork`. For each backend there is a class derived from this class, handling some backend-specific function calls and registrations. For the `PyTorch` Backend this class is `AbstractPyTorchNetwork` and all PyTorch Networks should be derived from it.First we defined the network itself (this is the part simply concatenating the layers into a sequential model). Next, we defined the logic to apply, when we want to predict from the model (this is the `forward` method).So far this was plain `PyTorch`. The `prepare_batch` function is not plain PyTorch anymore, but allows us to ensure the data is in the correct shape, has the correct data-type and lies on the correct device. The function above is the standard `prepare_batch` function, which is also implemented in the `AbstractPyTorchNetwork` and just re-implemented here for the sake of completeness.Same goes for the `closure` function. This function defines the update rule for our parameters (and how to calculate the losses). These funcitons are good to go for many simple networks but can be overwritten for customization when training more complex networks. TrainingNow that we have defined our network, we can finally specify our experiment and run it.
###Code
import warnings
warnings.simplefilter("ignore", UserWarning) # ignore UserWarnings raised by dependency code
warnings.simplefilter("ignore", FutureWarning) # ignore FutureWarnings raised by dependency code
from delira.training import PyTorchExperiment
from delira.training.train_utils import create_optims_default_pytorch
if logger is not None:
logger.info("Init Experiment")
experiment = PyTorchExperiment(params, SmallVGGPyTorch,
name="ClassificationExample",
save_path="./tmp/delira_Experiments",
optim_builder=create_optims_default_pytorch,
key_mapping={"x": "data"}
gpu_ids=[0])
experiment.save()
model = experiment.run(manager_train, manager_val)
###Output
_____no_output_____
###Markdown
Congratulations, you have now trained your first Classification Model using `delira`, we will now predict a few samples from the testset to show, that the networks predictions are valid (for now, this is done manually, but we also have a `Predictor` class to automate stuff like this):
###Code
import numpy as np
from tqdm.auto import tqdm # utility for progress bars
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # set device (use GPU if available)
model = model.to(device) # push model to device
preds, labels = [], []
with torch.no_grad():
for i in tqdm(range(len(dataset_val))):
img = dataset_val[i]["data"] # get image from current batch
img_tensor = torch.from_numpy(img).unsqueeze(0).to(device).to(torch.float) # create a tensor from image, push it to device and add batch dimension
pred_tensor = model(img_tensor) # feed it through the network
pred = pred_tensor.argmax(1).item() # get index with maximum class confidence
label = np.asscalar(dataset_val[i]["label"]) # get label from batch
if i % 1000 == 0:
print("Prediction: %d \t label: %d" % (pred, label)) # print result
preds.append(pred)
labels.append(label)
# calculate accuracy
accuracy = (np.asarray(preds) == np.asarray(labels)).sum() / len(preds)
print("Accuracy: %.3f" % accuracy)
###Output
_____no_output_____ |
games/morpion/Morpion.ipynb | ###Markdown
Projet Morpionpar *Hélène et Victoria* ; Bugnon Ours, oc.info 2018/2019 Morpion est un jeux simple qui se joue sur un cadrillage 3x3. Le but est d'aligner 3 jetons en colonne, ligne au en diagonale.Le jeux est joué sur la plateforme SenseHAT pour le Raspberry Pi. Dans ce notebook, des fragment de code sont expliqué. Parfois le résultat apparait sur le SenseHAT et parfois dans la cellule Out.Pour utiliser SenseHAT, il faut importer le module SensHAT pour avoir accès aux fonctions. Il faut créer une instance SenseHat() pour accéder aux méthodes.
###Code
from sense_hat import SenseHat
from time import sleep, time
sense = SenseHat()
###Output
_____no_output_____
###Markdown
Ensuite nous définissons les variables : les couleurs qui représentent le player 1 par exemple.
###Code
X = (255, 255, 255)
O = (0, 0, 0)
P1 = (0, 0, 255)
P2 = (255, 255, 0)
colors = (O, P1, P2)
score1 = 0
score2 = 0
print(colors)
###Output
((0, 0, 0), (0, 0, 255), (255, 255, 0))
###Markdown
Définition init() * Nous devons faire une défnition initation, où nous définissons l'état initial du jeu qui sera modifié par les joueurs. * Pour que nos arguments appelés *state*, *board*, *state_to_board* soit utilisables dans tous le code, il fat utiliser le mot-clé **global**. Ensuite nous créeons la grille sur lequel a lieux le jeu qui est *board*. Puis nous créeons une matrice dont 4 pixels représentent un élément de la matrice *state* 3x3, sans oublier de bien faire les sauts qui correspondent au cadrillage de la grille.
###Code
def init():
global state
global board
global state_to_board
board = [
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
]
state_to_board = [[(0, 1, 8, 9), (3, 4, 11, 12), (6, 7, 14, 15)],
[(24, 25, 32, 33), (27, 28, 35, 36), (30, 31, 38, 39)],
[(48, 49, 56, 57), (51, 52, 59, 60), (54, 55, 62, 63)]]
state = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
board = [
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
]
board[:3]
###Output
_____no_output_____
###Markdown
Nous voyons qu'il imprime que les trois premieres cases du *board*. Voici à quoi ressemble notre *board* à l'état initial. Définition Show_board(board, state) Cette définition nous permet d'afficher les différents états du jeu. Nous parcourons, les liste *state* et *state_to_board* pour aquérir à l'état de nos quatres pixels qui forment une case (soit les coordonées). Puis nous faisons correspondre son état, qui est 0,1,2 au position de la liste *colors* qui permettront d'afficher l'état du jeu par exemple : jaune qui indique que c'est le player 2 qui a selectionné la case.
###Code
def show_board(board, state):
for y in range(len(state)):
for x, s in enumerate(state[y]):
c = colors[s]
for index in state_to_board[y][x]:
board[index] = c
sense.set_pixels(board)
show_board(board, state)
board = [
1, 1, X, O, O, X, O, O,
1, 1, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
X, X, X, X, X, X, X, X,
O, O, X, O, O, X, O, O,
O, O, X, O, O, X, O, O,
]
state = [[1, 0, 0], [0, 0, 0], [0, 0, 0]]
def show_board(board, state):
for y in range(len(state)):
for x, s in enumerate(state[y]):
c = colors[s]
for index in state_to_board[y][x]:
board[index] = c
sense.set_pixels(board)
show_board(board, state)
###Output
_____no_output_____
###Markdown
Définition is_winning(p,state) Nous définissons tous les cas ou un joueur est gagant. Pour cela il faut que l'état de trois cases selon les règles du jeu de morpion est le même état. Si c'est le cas la fonction retoune **True**, autrement **False**.
###Code
def is_winning(p, state):
return state[0][0] == state[0][1] == state[0][2] == p or \
state[1][0] == state[1][1] == state[1][2] == p or \
state[2][0] == state[2][1] == state[2][2] == p or \
state[0][0] == state[1][0] == state[2][0] == p or \
state[0][1] == state[1][1] == state[2][1] == p or \
state[0][2] == state[1][2] == state[2][2] == p or \
state[0][0] == state[1][1] == state[2][2] == p or \
state[0][2] == state[1][1] == state[2][0] == p
is_winning(1, state)
state = [[1, 1, 1], [0, 0, 0], [0, 0, 0]]
show_board(board, state)
is_winning(2, state)
###Output
_____no_output_____
###Markdown
Définitions is_draw(state) Si les cas de is_winnig(p,state) n'apparaissent pas dans le jeu, et que toutes les cases sont à l'état 1 ou 2 alors c'est un match nul. Si une case est à l'état 0 la fonction retourne **False**, autrement elle retourne **True**.
###Code
def is_draw(state):
for i in state:
for s in i:
if s == 0:
return False
return True
state = [[1,2,1],[1,2,2],[2,1,1]]
is_draw(state)
###Output
_____no_output_____
###Markdown
Définition play(p, board, state) * Grâce à cette fonction nous définissons les paramètres du joueur. * D'abord nous définissons les paramètres du curseurs à l'état initial ((x,y) = (1,1)). Mais à l'état initial aucun joueur n'a encore joué donc la couleur du curseur est divisé par deux.Nous faisons cela en parcourant la liste state_to_board qui va donc prendre chaque état des quatres pixels pour ensuite reparcourir la liste colors pour afficher l'état correspondant mais en divisant la couleur par deux. * Ensuite nous lançons une boucle qui attend un évènement de la bibliothèque *directions* pour modifier les coordonnées du curseur. Lorsqu'un évènment est préssé il ajoute la coordonées à celles initinales, mais les coordonées ne doivent pas etre plus "grandes" que notre matrice nous faisons donc un modulo.Les coordonées sont donc modifiées mais pas forcément selectionnées pour de bon donc la case apparait encore d'une intensité de couleur diminuée. * Il ne reste plus qu'a ajouter un paramètre : celui qui ne permet pas de modifier un état losque celui ci n'est pas égal à 0 .
###Code
def play(p,board, state):
(x, y) = (1, 1)
dirs = {'up':(0, -1), 'down':(0, 1),
'right':(1, 0), 'left':(-1, 0)}
c = tuple(int(x/2) for x in colors[p])
for index in state_to_board[y][x]:
board[index] = c
sense.set_pixels(board)
while True :
event = sense.stick.wait_for_event()
if event.action == 'pressed':
if event.direction in dirs:
(dx, dy) = dirs[event.direction]
x = (x + dx) % len(state)
y = (y + dy) % len(state)
show_board(board, state) # eviter de colorier le chemin
c = tuple(int(x/2) for x in colors[p])
for index in state_to_board[y][x]:
board[index] = c
sense.set_pixels(board)
else:
if state[y][x] == 0:
state[y][x] = p
show_board(board, state)
return
play(p, board, state)
###Output
_____no_output_____
###Markdown
Voici ce qui se passe visuellement losrqu'une partie est en cours.Ici le joeur *bleu* a finit de jouer, c'est pour cela que la case central apparait en jaune claire : le joueur n'a pas encore changer les coordonées et n'a pas préssé sur le joystick. Dénition show_score(p) Si les player 1 ou 2 ont gagné leur score est incrémenté de 1 et s'affiche à la fin de la partie.
###Code
def show_score(p):
global score1, score2
if p == 1:
score1 += 1
elif p == 2:
score2 += 1
msg = 'player1=' + str(score1) + ' player2=' + str(score2)
sense.show_message(msg)
p = 1
score1 = 0
show_score(p)
###Output
_____no_output_____
###Markdown
Définition end_game(p) Lorsque aucun joueur gagne, on affiche le message *draw* autrement c'est le numéro du joueur gagant qui est affiché, puis le score s'affiche. Pour relancer la partie le message *?* apparait pendant 3 secondes, il faut donc appuyer sur le joystick pour recommencer la partie.
###Code
def end_game(p):
if p == 0:
sense.show_message("draw")
else:
sense.show_letter(str(p))
sleep(3)
show_score(p)
return continue_game()
p = 1
end_game(p)
###Output
_____no_output_____
###Markdown
Voici ce qui se passe visuellement lorsqu'il affiche le gagnant à la fin du jeu : Définition continue_game() Si pendant 3 secondes aucune action est faite avcec le joystick, alors le jeu s'éteind (**False**), autrement il rejoue une partie (**True**).
###Code
def continue_game():
sense.show.letter('?')
sense.stick.get.events()
t0 = time()
while time() < t0 + 3:
for event in sense.stick.get_events():
init()
show_board(board, sate)
print('continue')
return True
print('timeout')
return False
###Output
_____no_output_____
###Markdown
Voici ce qui se passe visuellement losrque La fonction demande pour continuer le jeu : Définition Main() Tout d'abord il regarde si l'état du jeu correspond à un état gagnant ou a un match nul. Puis il regarde si la réponse de la définition continue_game est True ou False pour refaire une partie. Pour intervertir les joueurs entre 1 et 2 nous utilisons l'astuce `player = 3 - player`.
###Code
def main():
init()
show_board(board, state)
player = 1
playing = True
while playing:
play(player, board, state)
if is_winning(player, state):
playing = end_game(player)
elif is_draw(state):
playing = end_game(0)
player = 3 - player
###Output
_____no_output_____ |
covid19_facility_location/covid19_facility_location.ipynb | ###Markdown
COVID-19: Healthcare Facility Capacity Optimization Objective and PrerequisitesThis COVID-19 Healthcare Facility Capacity Optimization problem shows you how to determine the optimal location and capacity of healthcare facilities in order to:* Satisfy demand from COVID-19 patients for treatment,* Minimize the cost of opening temporary facilities for healthcare providers, and* Predict the allocation of COVID-19 patients from a specific county to a specific healthcare facility.This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge of how to build mathematical optimization models.**Download the Repository** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). **Gurobi License** In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_Covid19_HC_Facility_Location_COM_EVAL_GitHub&utm_term=Covid-19%20Healthcare%20Facility%20Location&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-EDU-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_Covid19_HC_Facility_Location_COM_EVAL_GitHub&utm_term=Covid-19%20Healthcare%20Facility%20Location&utm_content=C_JPM) as an *academic user*. --- Problem DescriptionHospitals in various counties throughout the US are reaching full capacity due to a surge in COVID-19 patients. Many hospitals are considering creating temporary facilities to increase their capacity to handle COVID-19 patients.In this example, we focus on nine counties in the US. Each county has existing facilities to treat COVID-19 patients, and also has the option of building temporary facilities to increase the overall capacity to handle COVID-19 patients.The following table defines the coordinates of the centroid and the forecasted demand (i.e. the projected number of COVID-19 patients) of each county. To estimate this demand, we consider the population of nine fictional counties in California, the current number of COVID-19 cases per day in California, the average percentage of COVID-19 cases who require hospitalization, and the average number of days that a COVID-19 patient stays in the hospital.| Centroid | Coordinates | Demand || --- | --- | --- || county 1 | (1, 1.5) | 351 || county 2 | (3, 1) | 230 || county 3 | (5.5, 1.5) | 529 || county 4 | (1, 4.5 ) | 339 || county 5 | (3, 3.5) | 360 || county 6 | (5.5, 4.5) | 527 || county 7 | (1, 8) | 469 || county 8 | (3, 6) | 234 || county 9 | (4.5, 8) | 500 |The following table defines the coordinates and capacity of existing facilities. The capacity of existing facilities is calculated as 80% of the forecasted demand of the county in which the existing facilities are located. The exception to this is county 9, where we assume that we have an excess of existing capacity.| Existing | Coordinates | Capacity || --- | --- | --- || facility 1 | (1, 2) | 281 || facility 2 | (2.5, 1) | 187 || facility 3 | (5, 1) | 200 || facility 4 | (6.5, 3.5) | 223 || facility 5 | (1, 5) | 281 || facility 6 | (3, 4) | 281 || facility 7 | (5, 4) | 222 || facility 8 | (6.5, 5.5) | 200 || facility 9 | (1, 8.5) | 250 || facility 10 | (1.5, 9.5) | 125 || facility 11 | (8.5, 6) | 187 || facility 12 | (5, 8) | 300 || facility 13 | (3, 9) | 300 || facility 14 | (6, 9) | 243 |The following table defines the coordinates and capacity of new temporary facilities. The cost of building a temporary facilitywith a capacity of treating one hundred COVID-19 patients is $\$500,000$.| Temporary | Coordinates | Capacity || --- | --- | --- || facility 15 | (1.5, 1) | 100 || facility 16 | (3.5, 1.5) | 100 || facility 17 | (5.5, 2.5) | 100 || facility 18 | (1.5, 3.5) | 100 || facility 19 | (3.5, 2.5) | 100 || facility 20 | (4.5, 4.5) | 100 || facility 21 | (1.5, 6.5) | 100 || facility 22 | (3.5, 6.5) | 100 || facility 23 | (5.5, 6.5) | 100 |The coordinates of the three tables are in tens of miles. We assume that each increase of 10 miles in the distance to a COVID-19 facility results in a $\$5$ increase in driving costs for each COVID-19 patient.In this example, the goal is to identify which temporary facilities to build in order to be able to accommodate demand for treatment by COVID-19 patients while minimizing the total cost of COVID-19 patients driving to an existing or temporary COVID-19 facility and the total cost of building temporary facilities.This example shows how a Facility Location mixed-integer programming (MIP) model can help healthcare providers make decisions about:* How to best utilize their capacity, * Whether to build temporary facilities for COVID-19 patients, and* How COVID-19 patients from a county should be allocated to various healthcare facilities in order to ensure that the facilities have the capacity to provide treatment for the patients.This Jupyter Notebook is based on the paper written by Katherine Klise and Michael Bynum [1]. Model Formulation Sets and Indices$e \in E$: Index and set of existing healthcare facility locations.$t \in T$: Index and set of temporary healthcare facility locations.$f \in F = E \cup T$: Index and set of all healthcare facility locations.$c \in C$: Index and set of counties. Parameters$Dist_{c,f} \in \mathbb{R}^+$: Distance between county $c$ and facility location $f$.$Dem_{c} \in \mathbb{R}^+$: Expected number of people in county $c$ who will need a COVID-19 facility.$Cap_{f} \in \mathbb{R}^+$: Number of people that can be served by a facility at location $f$.$\text{dCost} = 5$: Cost of driving 10 miles.$\text{tFCost} = 500,000$: Cost of building a temporary COVID-19 facility with a capacity of treating 100 COVID-19 patients.$bigM$: Penalty of adding extra capacity at temporary facilities in order to satisfy treatment of COVID-19 patients demand. Decision Variables$y_{t} \in \{0, 1 \}$: This variable is equal to 1 if we build a temporary facility at location $t$; and 0 otherwise.$ x_{c,f} \in \mathbb{R}^+$: Number of people from county $c$ served by a facility at location $f$.$z_{t} \in \mathbb{R}^+$: Extra capacity added at temporary facility location $t$. Objective Function- **Cost**. We want to minimize the total cost of patients driving from a county to a healthcare facility and the total cost of building temporary COVID-19 treatment capacity. The last term with the big penalty coefficient ($bigM$), enables extra capacity to be added at a temporary facility to ensure that total demand is satisfied. \begin{equation}\text{Min} \quad Z = \sum_{c \in C} \sum_{f \in F} \text{dCost} *Dist_{c,f} * x_{c,f} + \text{tFCost}*\sum_{t \in T} y_{t} + bigM*\sum_{t \in T} z_{t}\tag{0}\end{equation} Constraints- **Demand**. Satisfy county demand of service from a COVID-19 facility.\begin{equation}\sum_{f \in F} x_{c,f} = Dem_{c} \quad \forall c \in C\tag{1}\end{equation}- **Existing facilities**. Capacity of an existing location of a facility cannot be exceeded.\begin{equation}\sum_{c \in C} x_{c,e} \leq Cap_{e} \quad \forall e \in E\tag{2}\end{equation}- **Temporary facilities**. Capacity of a temporary location of a facility cannot be exceeded. Please observe that extra capacity can be added.\begin{equation}\sum_{c \in C} x_{c,t} \leq Cap_{t}*y_{t} + z_{t} \quad \forall t \in T\tag{3}\end{equation} --- Python ImplementationWe now import the Gurobi Python Module and other Python libraries.
###Code
from itertools import product
from math import sqrt
import gurobipy as gp
from gurobipy import GRB
# tested with Gurobi v9.1.0 and Python 3.7.0
###Output
_____no_output_____
###Markdown
--- Helper Functions* `compute_distance` computes distance between a county centroid and the location of a facility* `solve_covid19_facility` builds, solves, and prints results of the COVID-19 healthcare facility capacity optimization model
###Code
def compute_distance(loc1, loc2):
# This function determines the Euclidean distance between a facility and a county centroid.
dx = loc1[0] - loc2[0]
dy = loc1[1] - loc2[1]
return sqrt(dx*dx + dy*dy)
def solve_covid19_facility(c_coordinates, demand):
#####################################################
# Data
#####################################################
# Indices for the counties
counties = [*range(1,10)]
# Indices for the facilities
facilities = [*range(1,24)]
# Create a dictionary to capture the coordinates of an existing facility and capacity of treating COVID-19 patients
existing, e_coordinates, e_capacity = gp.multidict({
1: [(1, 2), 281],
2: [(2.5, 1), 187],
3: [(5, 1), 200],
4: [(6.5, 3.5), 223],
5: [(1, 5), 281],
6: [(3, 4), 281],
7: [(5, 4), 222],
8: [(6.5, 5.5), 200],
9: [(1, 8.5), 250],
10: [(1.5, 9.5), 125],
11: [(8.5, 6), 187],
12: [(5, 8), 300],
13: [(3, 9), 300],
14: [(6, 9), 243]
})
# Create a dictionary to capture the coordinates of a temporary facility and capacity of treating COVID-19 patients
temporary, t_coordinates, t_capacity = gp.multidict({
15: [(1.5, 1), 100],
16: [(3.5, 1.5), 100],
17: [(5.5, 2.5), 100],
18: [(1.5, 3.5), 100],
19: [(3.5, 2.5), 100],
20: [(4.5, 4.5), 100],
21: [(1.5, 6.5), 100],
22: [(3.5, 6.5), 100],
23: [(5.5, 6.5), 100]
})
# Cost of driving 10 miles
dcost = 5
# Cost of building a temporary facility with capacity of 100 COVID-19
tfcost = 500000
# Compute key parameters of MIP model formulation
f_coordinates = {}
for e in existing:
f_coordinates[e] = e_coordinates[e]
for t in temporary:
f_coordinates[t] = t_coordinates[t]
# Cartesian product of counties and facilities
cf = []
for c in counties:
for f in facilities:
tp = c,f
cf.append(tp)
# Compute distances between counties centroids and facility locations
distance = {(c,f): compute_distance(c_coordinates[c], f_coordinates[f]) for c, f in cf}
#####################################################
# MIP Model Formulation
#####################################################
m = gp.Model('covid19_temporary_facility_location')
# Build temporary facility
y = m.addVars(temporary, vtype=GRB.BINARY, name='temporary')
# Assign COVID-19 patients of county to facility
x = m.addVars(cf, vtype=GRB.CONTINUOUS, name='Assign')
# Add capacity to temporary facilities
z = m.addVars(temporary, vtype=GRB.CONTINUOUS, name='addCap' )
# Objective function: Minimize total distance to drive to a COVID-19 facility
# Big penalty for adding capacity at a temporary facility
bigM = 1e9
m.setObjective(gp.quicksum(dcost*distance[c,f]*x[c,f] for c,f in cf)
+ tfcost*y.sum()
+ bigM*z.sum(), GRB.MINIMIZE)
# Counties demand constraints
demandConstrs = m.addConstrs((gp.quicksum(x[c,f] for f in facilities) == demand[c] for c in counties),
name='demandConstrs')
# Existing facilities capacity constraints
existingCapConstrs = m.addConstrs((gp.quicksum(x[c,e] for c in counties) <= e_capacity[e] for e in existing ),
name='existingCapConstrs')
# temporary facilities capacity constraints
temporaryCapConstrs = m.addConstrs((gp.quicksum(x[c,t] for c in counties) -z[t]
<= t_capacity[t]*y[t] for t in temporary ),
name='temporaryCapConstrs')
# Run optimization engine
m.optimize()
#####################################################
# Output Reports
#####################################################
# Total cost of building temporary facility locations
temporary_facility_cost = 0
print(f"\n\n_____________Optimal costs______________________")
for t in temporary:
if (y[t].x > 0.5):
temporary_facility_cost += tfcost*round(y[t].x)
patient_allocation_cost = 0
for c,f in cf:
if x[c,f].x > 1e-6:
patient_allocation_cost += dcost*round(distance[c,f]*x[c,f].x)
print(f"The total cost of building COVID-19 temporary healhtcare facilities is ${temporary_facility_cost:,}")
print(f"The total cost of allocating COVID-19 patients to healtcare facilities is ${patient_allocation_cost:,}")
# Build temporary facility at location
print(f"\n_____________Plan for temporary facilities______________________")
for t in temporary:
if (y[t].x > 0.5):
print(f"Build a temporary facility at location {t}")
# Extra capacity at temporary facilities
print(f"\n_____________Plan to increase Capacity at temporary Facilities______________________")
for t in temporary:
if (z[t].x > 1e-6):
print(f"Increase temporary facility capacity at location {t} by {round(z[t].x)} beds")
# Demand satisfied at each facility
f_demand = {}
print(f"\n_____________Allocation of county patients to COVID-19 healthcare facility______________________")
for f in facilities:
temp = 0
for c in counties:
allocation = round(x[c,f].x)
if allocation > 0:
print(f"{allocation} COVID-19 patients from county {c} are treated at facility {f} ")
temp += allocation
f_demand[f] = temp
print(f"{temp} is the total number of COVID-19 patients that are treated at facility {f}. ")
print(f"\n________________________________________________________________________________")
# Test total demand = total demand satisfied by facilities
total_demand = 0
for c in counties:
total_demand += demand[c]
demand_satisfied = 0
for f in facilities:
demand_satisfied += f_demand[f]
print(f"\n_____________Test demand = supply______________________")
print(f"Total demand is: {total_demand:,} patients")
print(f"Total demand satisfied is: {demand_satisfied:,} beds")
###Output
_____no_output_____
###Markdown
Base ScenarioIn this scenario, we consider the data described for the instance of the COVID-19 Healthcare Facility Capacity Optimization problem. The forecasted demand is as defined in the first table of the problem description.
###Code
# Create a dictionary to capture the coordinates of a county and the demand of COVID-19 treatment
counties, coordinates, forecast = gp.multidict({
1: [(1, 1.5), 351],
2: [(3, 1), 230],
3: [(5.5, 1.5), 529],
4: [(1, 4.5 ), 339],
5: [(3, 3.5), 360],
6: [(5.5, 4.5), 527],
7: [(1, 8), 469],
8: [(3, 6), 234],
9: [(4.5, 8), 500]
})
# find the optimal solution of the base scenario
solve_covid19_facility(coordinates, forecast)
###Output
Using license file c:\gurobi\gurobi.lic
Gurobi Optimizer version 9.1.0 build v9.1.0rc0 (win64)
Thread count: 4 physical cores, 8 logical processors, using up to 8 threads
Optimize a model with 32 rows, 225 columns and 432 nonzeros
Model fingerprint: 0xbb38e066
Variable types: 216 continuous, 9 integer (9 binary)
Coefficient statistics:
Matrix range [1e+00, 1e+02]
Objective range [3e+00, 1e+09]
Bounds range [1e+00, 1e+00]
RHS range [1e+02, 5e+02]
Presolve time: 0.00s
Presolved: 32 rows, 225 columns, 432 nonzeros
Variable types: 216 continuous, 9 integer (9 binary)
Root relaxation: objective 1.317174e+06, 58 iterations, 0.00 seconds
Nodes | Current Node | Objective Bounds | Work
Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
0 0 1317174.01 0 3 - 1317174.01 - - 0s
H 0 0 2020218.8911 1317174.01 34.8% - 0s
H 0 0 1522423.0545 1317174.01 13.5% - 0s
H 0 0 1522032.8389 1317174.01 13.5% - 0s
0 0 1317555.15 0 4 1522032.84 1317555.15 13.4% - 0s
H 0 0 1521653.4243 1317555.15 13.4% - 0s
0 0 1317573.07 0 5 1521653.42 1317573.07 13.4% - 0s
0 0 1317780.47 0 3 1521653.42 1317780.47 13.4% - 0s
0 0 1318123.14 0 3 1521653.42 1318123.14 13.4% - 0s
0 2 1318123.14 0 3 1521653.42 1318123.14 13.4% - 0s
Cutting planes:
Gomory: 6
MIR: 7
Explored 134 nodes (729 simplex iterations) in 0.08 seconds
Thread count was 8 (of 8 available processors)
Solution count 4: 1.52165e+06 1.52203e+06 1.52242e+06 2.02022e+06
Optimal solution found (tolerance 1.00e-04)
Best objective 1.521653424311e+06, best bound 1.521653424311e+06, gap 0.0000%
_____________Optimal costs______________________
The total cost of building COVID-19 temporary healhtcare facilities is $1,500,000
The total cost of allocating COVID-19 patients to healtcare facilities is $21,645
_____________Plan for temporary facilities______________________
Build a temporary facility at location 15
Build a temporary facility at location 17
Build a temporary facility at location 18
_____________Plan to increase Capacity at temporary Facilities______________________
_____________Allocation of county patients to COVID-19 healthcare facility______________________
281 COVID-19 patients from county 1 are treated at facility 1
281 is the total number of COVID-19 patients that are treated at facility 1.
________________________________________________________________________________
187 COVID-19 patients from county 2 are treated at facility 2
187 is the total number of COVID-19 patients that are treated at facility 2.
________________________________________________________________________________
200 COVID-19 patients from county 3 are treated at facility 3
200 is the total number of COVID-19 patients that are treated at facility 3.
________________________________________________________________________________
223 COVID-19 patients from county 3 are treated at facility 4
223 is the total number of COVID-19 patients that are treated at facility 4.
________________________________________________________________________________
281 COVID-19 patients from county 4 are treated at facility 5
281 is the total number of COVID-19 patients that are treated at facility 5.
________________________________________________________________________________
281 COVID-19 patients from county 5 are treated at facility 6
281 is the total number of COVID-19 patients that are treated at facility 6.
________________________________________________________________________________
6 COVID-19 patients from county 3 are treated at facility 7
50 COVID-19 patients from county 5 are treated at facility 7
166 COVID-19 patients from county 6 are treated at facility 7
222 is the total number of COVID-19 patients that are treated at facility 7.
________________________________________________________________________________
200 COVID-19 patients from county 6 are treated at facility 8
200 is the total number of COVID-19 patients that are treated at facility 8.
________________________________________________________________________________
250 COVID-19 patients from county 7 are treated at facility 9
250 is the total number of COVID-19 patients that are treated at facility 9.
________________________________________________________________________________
125 COVID-19 patients from county 7 are treated at facility 10
125 is the total number of COVID-19 patients that are treated at facility 10.
________________________________________________________________________________
161 COVID-19 patients from county 6 are treated at facility 11
161 is the total number of COVID-19 patients that are treated at facility 11.
________________________________________________________________________________
28 COVID-19 patients from county 8 are treated at facility 12
272 COVID-19 patients from county 9 are treated at facility 12
300 is the total number of COVID-19 patients that are treated at facility 12.
________________________________________________________________________________
94 COVID-19 patients from county 7 are treated at facility 13
206 COVID-19 patients from county 8 are treated at facility 13
300 is the total number of COVID-19 patients that are treated at facility 13.
________________________________________________________________________________
228 COVID-19 patients from county 9 are treated at facility 14
228 is the total number of COVID-19 patients that are treated at facility 14.
________________________________________________________________________________
57 COVID-19 patients from county 1 are treated at facility 15
43 COVID-19 patients from county 2 are treated at facility 15
100 is the total number of COVID-19 patients that are treated at facility 15.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 16.
________________________________________________________________________________
100 COVID-19 patients from county 3 are treated at facility 17
100 is the total number of COVID-19 patients that are treated at facility 17.
________________________________________________________________________________
13 COVID-19 patients from county 1 are treated at facility 18
58 COVID-19 patients from county 4 are treated at facility 18
29 COVID-19 patients from county 5 are treated at facility 18
100 is the total number of COVID-19 patients that are treated at facility 18.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 19.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 20.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 21.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 22.
________________________________________________________________________________
0 is the total number of COVID-19 patients that are treated at facility 23.
________________________________________________________________________________
_____________Test demand = supply______________________
Total demand is: 3,539 patients
Total demand satisfied is: 3,539 beds
###Markdown
Analysis for Base ScenarioThe optimal total cost of building COVID-19 temporary healthcare facilities is $\$1,500,000$, and three COVID-19 temporary healthcare facilities are built. The total cost of allocating COVID-19 patients to healthcare facilities is $\$21,645$, and no extra capacity needs to be added to accommodate the demand for treatment from COVID-19 patients.The MIP model also determines the expected number of COVID-19 patients of a county allocated to a healthcare facility. For example, 6 COVID-19 patients from county 3, 50 COVID-19 patients from county 5, and 166 COVID-19 patients from county 6 are expected to be treated at facility 7. The total number of COVID-19 patients expected to be treated at facility 7 is 222. --- Scenario 1 Assume that the Centers for Disease Control and Prevention (CDC) announced that the number of hospitalizations will increase by 20%. This percentage includes 5% of buffer capacity to account for the variability of the expected demand.
###Code
# Increase in demand by 20%.
for c in counties:
forecast[c] = round(1.2*forecast[c])
# find the optimal for scenario 1
solve_covid19_facility(coordinates, forecast)
###Output
Gurobi Optimizer version 9.1.0 build v9.1.0rc0 (win64)
Thread count: 4 physical cores, 8 logical processors, using up to 8 threads
Optimize a model with 32 rows, 225 columns and 432 nonzeros
Model fingerprint: 0x599a0475
Variable types: 216 continuous, 9 integer (9 binary)
Coefficient statistics:
Matrix range [1e+00, 1e+02]
Objective range [3e+00, 1e+09]
Bounds range [1e+00, 1e+00]
RHS range [1e+02, 6e+02]
Presolve time: 0.00s
Presolved: 32 rows, 225 columns, 432 nonzeros
Variable types: 216 continuous, 9 integer (9 binary)
Root relaxation: objective 6.700453e+10, 51 iterations, 0.00 seconds
Nodes | Current Node | Objective Bounds | Work
Expl Unexpl | Obj Depth IntInf | Incumbent BestBd Gap | It/Node Time
* 0 0 0 6.700453e+10 6.7005e+10 0.00% - 0s
Explored 0 nodes (51 simplex iterations) in 0.02 seconds
Thread count was 8 (of 8 available processors)
Solution count 1: 6.70045e+10
Optimal solution found (tolerance 1.00e-04)
Best objective 6.700452551331e+10, best bound 6.700452551331e+10, gap 0.0000%
_____________Optimal costs______________________
The total cost of building COVID-19 temporary healhtcare facilities is $4,500,000
The total cost of allocating COVID-19 patients to healtcare facilities is $25,520
_____________Plan for temporary facilities______________________
Build a temporary facility at location 15
Build a temporary facility at location 16
Build a temporary facility at location 17
Build a temporary facility at location 18
Build a temporary facility at location 19
Build a temporary facility at location 20
Build a temporary facility at location 21
Build a temporary facility at location 22
Build a temporary facility at location 23
_____________Plan to increase Capacity at temporary Facilities______________________
Increase temporary facility capacity at location 15 by 40 beds
Increase temporary facility capacity at location 17 by 27 beds
_____________Allocation of county patients to COVID-19 healthcare facility______________________
281 COVID-19 patients from county 1 are treated at facility 1
281 is the total number of COVID-19 patients that are treated at facility 1.
________________________________________________________________________________
187 COVID-19 patients from county 2 are treated at facility 2
187 is the total number of COVID-19 patients that are treated at facility 2.
________________________________________________________________________________
200 COVID-19 patients from county 3 are treated at facility 3
200 is the total number of COVID-19 patients that are treated at facility 3.
________________________________________________________________________________
223 COVID-19 patients from county 3 are treated at facility 4
223 is the total number of COVID-19 patients that are treated at facility 4.
________________________________________________________________________________
281 COVID-19 patients from county 4 are treated at facility 5
281 is the total number of COVID-19 patients that are treated at facility 5.
________________________________________________________________________________
281 COVID-19 patients from county 5 are treated at facility 6
281 is the total number of COVID-19 patients that are treated at facility 6.
________________________________________________________________________________
74 COVID-19 patients from county 3 are treated at facility 7
148 COVID-19 patients from county 6 are treated at facility 7
222 is the total number of COVID-19 patients that are treated at facility 7.
________________________________________________________________________________
200 COVID-19 patients from county 6 are treated at facility 8
200 is the total number of COVID-19 patients that are treated at facility 8.
________________________________________________________________________________
250 COVID-19 patients from county 7 are treated at facility 9
250 is the total number of COVID-19 patients that are treated at facility 9.
________________________________________________________________________________
125 COVID-19 patients from county 7 are treated at facility 10
125 is the total number of COVID-19 patients that are treated at facility 10.
________________________________________________________________________________
187 COVID-19 patients from county 6 are treated at facility 11
187 is the total number of COVID-19 patients that are treated at facility 11.
________________________________________________________________________________
300 COVID-19 patients from county 9 are treated at facility 12
300 is the total number of COVID-19 patients that are treated at facility 12.
________________________________________________________________________________
188 COVID-19 patients from county 7 are treated at facility 13
55 COVID-19 patients from county 8 are treated at facility 13
57 COVID-19 patients from county 9 are treated at facility 13
300 is the total number of COVID-19 patients that are treated at facility 13.
________________________________________________________________________________
243 COVID-19 patients from county 9 are treated at facility 14
243 is the total number of COVID-19 patients that are treated at facility 14.
________________________________________________________________________________
140 COVID-19 patients from county 1 are treated at facility 15
140 is the total number of COVID-19 patients that are treated at facility 15.
________________________________________________________________________________
89 COVID-19 patients from county 2 are treated at facility 16
11 COVID-19 patients from county 3 are treated at facility 16
100 is the total number of COVID-19 patients that are treated at facility 16.
________________________________________________________________________________
127 COVID-19 patients from county 3 are treated at facility 17
127 is the total number of COVID-19 patients that are treated at facility 17.
________________________________________________________________________________
100 COVID-19 patients from county 4 are treated at facility 18
100 is the total number of COVID-19 patients that are treated at facility 18.
________________________________________________________________________________
100 COVID-19 patients from county 5 are treated at facility 19
100 is the total number of COVID-19 patients that are treated at facility 19.
________________________________________________________________________________
51 COVID-19 patients from county 5 are treated at facility 20
49 COVID-19 patients from county 6 are treated at facility 20
100 is the total number of COVID-19 patients that are treated at facility 20.
________________________________________________________________________________
26 COVID-19 patients from county 4 are treated at facility 21
74 COVID-19 patients from county 8 are treated at facility 21
100 is the total number of COVID-19 patients that are treated at facility 21.
________________________________________________________________________________
100 COVID-19 patients from county 8 are treated at facility 22
100 is the total number of COVID-19 patients that are treated at facility 22.
________________________________________________________________________________
48 COVID-19 patients from county 6 are treated at facility 23
52 COVID-19 patients from county 8 are treated at facility 23
100 is the total number of COVID-19 patients that are treated at facility 23.
________________________________________________________________________________
_____________Test demand = supply______________________
Total demand is: 4,247 patients
Total demand satisfied is: 4,247 beds
|
class_exercises/.ipynb_checkpoints/D05-Pandas_Part2_revisited-checkpoint.ipynb | ###Markdown
Lesson 4 Class Exercises: Pandas Part 2 With these class exercises we learn a few new things. When new knowledge is introduced you'll see the icon shown on the right:  Get StartedImport the Numpy and Pandas packages
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Exercise 1: Review of Pandas Part 1 Task 1: Explore the dataImport the data from the [Lectures in Quantiatives Economics](https://github.com/QuantEcon/lecture-source-py) regarding minimum wages in countries around the world in US Dollars. You can view the data [here](https://github.com/QuantEcon/lecture-source-py/blob/master/source/_static/lecture_specific/pandas_panel/realwage.csv) and you can access the data file here: https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv. Then perform the followingImport the data into a variable named `minwages` and print the first 5 lines of data to explore what is there.
###Code
minwages = pd.read_csv('https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv',)
minwages.head(5)
###Output
_____no_output_____
###Markdown
Find the shape of the data.
###Code
minwages.shape
###Output
_____no_output_____
###Markdown
List the column names.
###Code
minwages.columns
###Output
_____no_output_____
###Markdown
Identify the data types. Do they match what you would expect?
###Code
minwages.dtypes
###Output
_____no_output_____
###Markdown
Identify columns with missing values.
###Code
minwages.isna().sum()
###Output
_____no_output_____
###Markdown
Identify if there are duplicated entires.
###Code
minwages.duplicated().sum()
###Output
_____no_output_____
###Markdown
How many unique values per row are there. Do these look reasonable for the data type and what you know about what is stored in the column?
###Code
minwages.nunique()
###Output
_____no_output_____
###Markdown
Task 2: Explore MoreRetrieve descriptive statistics for the data.
###Code
minwages.describe()
###Output
_____no_output_____
###Markdown
Identify all of the countries listed in the data.
###Code
minwages['Country'].unique()
###Output
_____no_output_____
###Markdown
Convert the time column to a datetime object.
###Code
minwages['Time'] = pd.to_datetime(minwages['Time'])
minwages.dtypes
###Output
_____no_output_____
###Markdown
List the time points that were used for data collection. How many years of data collection were there? What time of year were the data collected?
###Code
minwages['Time'].unique()
###Output
_____no_output_____
###Markdown
Because we only have one data point collected per year per country, simplify this by adding a new column with just the year. Print the first 5 rows to confirm the column was added.
###Code
minwages['Year'] = minwages['Time'].dt.year
minwages.head()
minwages['Year'].unique()
###Output
_____no_output_____
###Markdown
There are two pay periods. Retrieve them in a list of just the two strings
###Code
minwages['Pay period'].unique()
minwages['Series'].unique()
###Output
_____no_output_____
###Markdown
Task 3: Clean the dataWe have no duplicates in this data so we do not need to consider removing those, but we do have missing values in the `value` column. Lets remove those. Check the dimensions afterwards to make sure they rows with missing values are gone.
###Code
minwages.dropna(inplace=True)
minwages.shape
###Output
_____no_output_____
###Markdown
If your dataframe has an "Unnamed: 0" column remove it, as it's not needed. Note: in the `pd.read_csv()` function you can use the `index_col` argument to set the column in the file that provides the index and that would prevent the "Unnamed: 0" column with this dataset.
###Code
#minwages.drop(['Unnamed: 0'], axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Task 4: IndexingUse boolean indexing to retrieve the rows of annual salary in United States
###Code
minwages[(minwages['Country'] == 'United States') &
(minwages['Pay period'] == 'Annual')]
###Output
_____no_output_____
###Markdown
Do we have enough data to calculate descriptive statistics for annual salary in the United States in 2016? From here on out, let's only explore the rows that have a "series" value of "In 2015 constant prices at 2015 USD exchange rates"
###Code
minwages2 = minwages[minwages['Series'] == 'In 2015 constant prices at 2015 USD exchange rates']
minwages2.shape
###Output
_____no_output_____
###Markdown
Use `loc` to calculate descriptive statistics for the hourly salary in the United States and then again separately for Ireland. Hint: you will have to set row indexes. Hint: you should reset the index before using `loc` Now do the same for Annual salary Exercise 2: OccurancesFirst, reset the indexes back to numeric values. Print the first 10 lines to confirm. Get the count of how many rows there are per year? Exercise 3: Grouping Task 1: AggregationCalculate the average salary for each country across all years.
###Code
groups = minwages2.groupby(['Country', 'Pay period'])
groups.mean().head()
###Output
_____no_output_____
###Markdown
Calculate the average salary and hourly wage for each country across all years. Save the resulting dataframe containing the means into a new variable named `mwmean`. Above we saw how to aggregate using built-in functions of the `DataFrameGroupBy` object. For eaxmple we called the `mean` function directly. These handly functions help with writing succint code. However, you can also use the `aggregate` function to do more! You can learn more on the [aggregate description page](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.aggregate.html)With `aggregate` we can perform operations across rows and columns, and we can perform more than one operation at a time. Explore the online documentation for the function and see how you would calculate the mean, min, and max for each country and pay period type, as well as the total number of records per country and pay period:
###Code
groups.aggregate(['mean', 'std', 'count'])
###Output
_____no_output_____
###Markdown
Also you can use the aggregate on a single column of the grouped object. For example:```python mwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period']) mwgroup['value'].aggregate(['mean'])```Redo the aggregate function in the previous cell but this time apply it to a single column. Task 2: Slicing/IndexingIn the following code the resulting dataframe should contain only one data column: the mean values. It does, however, have two levels of indexes: Country and Pay period. For example:```pythonmwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])mwmean = mwgroup.mean()mwmean```Try it out:
###Code
mwgroup = minwages2[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwmean = mwgroup.mean()
mwmean
###Output
_____no_output_____
###Markdown
Notice in the output above there are two levels of indexes. This is called MultiIndexing. In reality, there is only one data column and two index levels. So, you can do this:```pythonmwmean['value']```But you can't do this:```pythonmwmean['Pay period']```Why not? Try it:
###Code
mwmean['value']
mwmean['Pay period']
###Output
_____no_output_____
###Markdown
The reason we cannot exeucte `mwmean['Pay period']` is because `Pay period` is not a data column. It's an index. Let's learn how to use MultiIndexes to retrieve data. You can learn more about it on the [MultiIndex/advanced indexing page](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.htmladvanced-indexing-with-hierarchical-index)First, let's take a look at the indexes using the `index` attribute. ```pythonmwmean.index```Try it:
###Code
mwmean.index
###Output
_____no_output_____
###Markdown
Notice that each index is actually a tuple with two levels. The first is the country names and the second is the pay period. Remember, we can use the `loc` function, to slice a dataframe using indexes. We can do so with a MultiIndexed dataframe as well. For example, to extract all elements with they index named 'Australia':```pythonmwmean.loc[('Australia')]```Try it yourself: You can specify both indexes to pull out a single row. For example, to find the average hourly salary in Australia:```pythonmwmean.loc[('Australia','Hourly')]```Try it yourself:
###Code
mwmean.loc[('Australia','Hourly')]
###Output
_____no_output_____
###Markdown
Suppose you wanted to retrieve all of the mean "Hourly" wages. For MultiIndexes, there are multiple ways to slice it, some are not entirely intuitive or flexible enough. Perhaps the easiest is to use the `pd.IndexSlice` object. It allows you to specify an index format that is intuitive to the way you've already learned to slice. For example:```pythonidx = pd.IndexSlicemwmean.loc[idx[:,'Hourly'],:]```In the code above the `idx[:, 'Hourly']` portion is used in the "row" indexor position of the `loc` function. It indicates that we want all possible first-level indexes (specified with the `:`) and we want second-level indexes to be restricted to "Hourly". Try it out yourself:
###Code
idx = pd.IndexSlice
rows = idx[:,'Hourly']
mwmean.loc[rows,:]
###Output
_____no_output_____
###Markdown
Using what you've learned above about slicing the MultiIndexed dataframe, find out which country has had the highest average annual salary. You can move the indexes into the dataframe and reset the index to a traditional single-level numeric index by reseting the indexes: ```pythonmwmean.reset_index()```Try it yourself:
###Code
mwmean2 = mwmean.reset_index()
mwmean2[mwmean2['Pay period'] == 'Hourly']
###Output
_____no_output_____
###Markdown
Task 3: Filtering the original data.Another way we might want to filter is to find records in the dataset that, after grouping meets some criteria. For example, what if we wanted to find the records for all countries with the average annual salary was greater than $35K?To do this, we can use the `filter` function of the `DataFrameGroupBy` object. The filter function must take a function as an argument (this is new and may seem weird). ```pythonannualwages = minwages[minwages['Pay period'] == 'Annual']annualwages.groupby(['Country']).filter( lambda x : x['value'].mean() > 22000)```Try it:
###Code
annualwages = minwages2[minwages2['Pay period'] == 'Annual']
annualwages.groupby(['Country']).filter(
lambda x : x['value'].mean() > 22000
)
###Output
_____no_output_____
###Markdown
Task 4: Reset the indexIf you do not want to use MultiIndexes and you prefer to return any Multiindex dataset back to a traditional 1-level index dataframe you can use the`reset_index` function. Try it out on the `mwmean` dataframe: Exercise 4: Task 6d from the practice notebookLoad the iris dataset. In the Iris dataset:+ Create a new column with the label "region" in the iris data frame. This column will indicates geographic regions of the US where measurments were taken. Values should include: 'Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'. Use these randomly.+ Use `groupby` to get a new data frame of means for each species in each region.+ Add a `dev_stage` column by randomly selecting from the values "early" and "late".+ Use `groupby` to get a new data frame of means for each species, in each region and each development stage.+ Use the `count` function (just like you used the `mean` function) to identify how many rows in the table belong to each combination of species + region + developmental stage. Exercise 5: Kaggle Titanic DatasetA dataset of Titanic passengers and their fates is provided by the online machine learning competition server [Kaggle](https://www.kaggle.com/). See the [Titanic project](https://www.kaggle.com/c/titanic) page for more details. Let's practice all we have learned thus far to explore and perhaps clean this dataset. You have been provided with the dataset named `Titanic_train.csv`. Task 1: Explore the dataFirst import the data and print the first 10 lines.
###Code
titanic = pd.read_csv('../data/titanic_train.csv')
titanic.head(10)
###Output
_____no_output_____
###Markdown
Find the shape of the data.
###Code
titanic.shape
###Output
_____no_output_____
###Markdown
List the column names.
###Code
titanic.columns
###Output
_____no_output_____
###Markdown
Identify the data types. Do they match what you would expect?
###Code
titanic.dtypes
###Output
_____no_output_____
###Markdown
Identify columns with missing values.
###Code
titanic.isna().sum()
###Output
_____no_output_____
###Markdown
Identify if there are duplicated entires.
###Code
titanic.duplicated().sum()
###Output
_____no_output_____
###Markdown
How many unique values per row are there. Do these look reasonable for the data type and what you know about what is stored in the column?
###Code
titanic.nunique()
titanic['Sex'].unique()
titanic['Survived'].unique()
titanic['Pclass'].unique()
titanic['Cabin'].unique()
titanic['Embarked'].unique()
###Output
_____no_output_____
###Markdown
Task 2: Clean the dataDo missing values need to be removed? If so, remove them.
###Code
#titanic = titanic.dropna() <- Not done because that would remove 687 rows with missing cabin
# If we only wanted to drop missing age
#titanic = titanic.dropna(subset='Age') <- Also not done
###Output
_____no_output_____
###Markdown
Do duplicates need to be removed? If so remove them. Task 3: Find Interesting FactsCount the number of passengers that survied and died in each passenger class Were men or women more likely to survive?
###Code
titanic.groupby(by=['Sex', 'Survived']).size()
# Need to do a statistical test to see if differences are significant
###Output
_____no_output_____ |
datos_modelar/.ipynb_checkpoints/modelos_gustavo-checkpoint.ipynb | ###Markdown
Parámetros
###Code
# Número de árboles en el random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 1100, num = 100)]
# Número de features a considerar en cada split
max_features = ['auto', 'sqrt']
# Máximo número de niveles en el árbol
max_depth = [None, 1, 2, 3]
# Número mínimo de samples para hacer un split en un nodo
min_samples_split = [2, 5, 10]
# Número mínimo de samples para cada hoja
min_samples_leaf = [1, 2, 4]
# Método de selección para cada árbol de entrenamiento
bootstrap = [True, False]
param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
scorer = make_scorer(median_absolute_error)
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
###Output
_____no_output_____
###Markdown
RandomForest
###Code
# rf = GridSearchCV(RandomForestRegressor(criterion="mse"),
# param_grid,
# cv = 5,
# scoring = scorer,
# n_jobs = -1,
# verbose = 1)
n_iteration = 20
rf = RandomizedSearchCV(RandomForestRegressor(criterion="mse"), param_grid, cv = 5,n_iter=n_iteration,
scoring = scorer, n_jobs = -1, verbose = 1)
rf.fit(X_train, y_train)
pred_rf = np.exp(rf.predict(X_test)) - 1
pred_rf = pred_rf.reshape(-1, 1)
df = pd.DataFrame(np.exp(y_test)-1 , index = y_test.index)
df.columns = ['TARGET']
df['pred_rf'] = pred_rf
median_absolute_error(df['TARGET'],df['pred_rf'])
report(rf.cv_results_, n_top=20)
# filename = 'random_forest_gridsearchcv1.sav'
# pickle.dump(rf.best_estimator_, open(filename, 'wb'))
###Output
_____no_output_____
###Markdown
ExtraTreesRegressor
###Code
etr = GridSearchCV(ExtraTreesRegressor(criterion="mse"),
param_grid,
cv = 5,
scoring = scorer,
n_jobs = -1,
verbose = 1)
etr.fit(X_train, y_train)
pred_etr = np.exp(etr.predict(X_test)) - 1
pred_etr = pred_etr.reshape(-1, 1)
df['pred_etr'] = pred_etr
median_absolute_error(df['TARGET'],df['pred_etr'])
# filename = 'extra_trees_gridsearchcv1.sav'
# pickle.dump(etr.best_estimator_, open(filename, 'wb'))
###Output
_____no_output_____ |
TSA/ARIMA_v2.ipynb | ###Markdown
aic/bic/hqic 越小越好,股选择ARMA(7,0) 模型检验在指数平滑模型下,观察ARIMA模型的残差是否是平均值为0且方差为常数的正态分布(服从零均值、方差不变的正态分布),同时也要观察连续残差是否(自)相关。
###Code
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(arma_mod20.resid, lags=40, ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(arma_mod20.resid, lags=40, ax=ax2)
arma_mod20.resid.mean()/arma_mod20.resid.std()
arma_mod20.resid.plot()
###Output
_____no_output_____
###Markdown
做D-W检验德宾-沃森(Durbin-Watson)检验。德宾-沃森检验,简称D-W检验,是目前检验自相关性最常用的方法,但它只使用于检验一阶自相关性。因为自相关系数ρ的值介于-1和1之间,所以 0≤DW≤4。并且DW=0 ->ρ=1 即存在正自相关性 DW=4->ρ=-1 即存在负自相关性 DW=2->ρ=0 即不存在(一阶)自相关性 因此,当DW值显著的接近于0或4时,则存在自相关性,而接近于2时,则不存在(一阶)自相关性。这样只要知道DW统计量的概率分布,在给定的显著水平下,根据临界值的位置就可以对原假设$H_0$进行检验。
###Code
print(sm.stats.durbin_watson(arma_mod20.resid.values))
resid = arma_mod20.resid#残差
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = qqplot(resid, line='q', ax=ax, fit=True)
###Output
_____no_output_____
###Markdown
Ljung-Box检验Ljung-Box test是对randomness的检验,或者说是对时间序列是否存在滞后相关的一种统计检验。对于滞后相关的检验,我们常常采用的方法还包括计算ACF和PCAF并观察其图像,但是无论是ACF还是PACF都仅仅考虑是否存在某一特定滞后阶数的相关。LB检验则是基于一系列滞后阶数,判断序列总体的相关性或者说随机性是否存在。 时间序列中一个最基本的模型就是高斯白噪声序列。而对于ARIMA模型,其残差被假定为高斯白噪声序列,所以当我们用ARIMA模型去拟合数据时,拟合后我们要对残差的估计序列进行LB检验,判断其是否是高斯白噪声,如果不是,那么就说明ARIMA模型也许并不是一个适合样本的模型。
###Code
r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)
data = np.c_[range(1,41), r[1:], q, p]
table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
table.set_index('lag')
table[:20]
###Output
_____no_output_____
###Markdown
检验的结果就是看最后一列前十二行的检验概率(一般观察滞后1~12阶),如果检验概率小于给定的显著性水平,比如0.05、0.10等就拒绝原假设,其原假设是相关系数为零。就结果来看,如果取显著性水平为0.05,那么相关系数与零没有显著差异,即为白噪声序列。
###Code
predict_sunspots = arma_mod20.predict('2090', '2100', dynamic=True)
print(predict_sunspots)
fig, ax = plt.subplots(figsize=(12, 8))
ax = dta.loc['2001':].plot(ax=ax)
predict_sunspots.plot(ax=ax)
pd.ewma?
###Output
_____no_output_____ |
analysis/check_bonus_amount_by_worker.ipynb | ###Markdown
"answers": {"subject_information": {"score":67}},
###Code
## 11:01 AM on april 18 still have to bonus ATF7HBD1TFI2U
# len(coll.distinct('workerId'))
# coll.find({'workerId':'A3MXZMAX4XEZU4'}).count()
# b = coll.find({'$and': [{'gameid':g[-1]},{'eventType':'clickedObj'}, {'iterationName':'chairs2k_expansion_only'}, {'time': {'$gt': 1522802963803}}]}).sort('time')
wID = 'A1SJRFJWFXSDH4'
g = coll.find({'workerId':wID}).distinct('gameid')
b = coll.find({'$and': [{'gameid':g[-1]},{'eventType':'clickedObj'}, {'iterationName':'chairs2k_expansion_only'}, {'time': {'$gt': 1522802963803}}]}).sort('time')
correct = []
for rec in b:
correct.append(rec['correct'])
score = sum(correct)
# print sum(correct), b.count()
total_amt = (b.count()/70)*2.5 + 0.02*sum(correct)
if wID not in already_comp:
print 'worker: {} | total_amt: {} | score: {}'.format(wID,total_amt,score)
## already compensated bonused workers
## worker: A3BFMY5OX2WMUW | aID: 3KOPY89HM9G9MG9QFTU88Z6SO6V3JR | total_amt: 3.7 | score: 60
## worker: A1TQ0BT81FJYNS | aID: 3LUY3GC630EN9ADBCI2IIC4K27N7P0 | total_amt: 3.84 | score: 67
## worker: A3EQR9C0L23O6E | aID: 3ZQIG0FLQFUF49B862JTGKSJINYVWF | total_amt: 0.0 | score: 0
## worker: A3ROADR7T6811 | aID: 3N2BF7Y2VR8EHWMAXBW2SLZJC69MHP | total_amt: 3.84 | score: 67
####### ONLY RUN IF YOU ARE SURE YOU WANT TO RENAME
########db.chairs_chatbox.update_many({'iterationName':'chairs2k_expansion_only______'},{'$set': {'iterationName': 'chairs1k_expansion_only'}})
###Output
_____no_output_____ |
chrisfarfan_json_exercise.ipynb | ###Markdown
imports for Python, Pandas
###Code
import json
from pandas.io.json import json_normalize
###Output
_____no_output_____
###Markdown
**** JSON exerciseUsing data in file 'data/world_bank_projects.json' and the techniques demonstrated above,1. Find the 10 countries with most projects2. Find the top 10 major project themes (using column 'mjtheme_namecode')3. In 2. above you will notice that some entries have only the code and the name is missing. Create a dataframe with the missing names filled in.
###Code
df = pd.read_json('data/world_bank_projects.json')
df.head(3)
df.countryshortname.value_counts().sort_values(ascending=False).head(10)
###Output
_____no_output_____
###Markdown
Thus we have the ten countries with the most projects.
###Code
# load json as string
json_string = json.load((open('data/world_bank_projects.json')))
unstacked_mjthemes = json_normalize(json_string, 'mjtheme_namecode','_id')
unstacked_mjthemes.head(15)
unstacked_mjthemes.code.value_counts().head(10)
###Output
_____no_output_____
###Markdown
Now we have the top 10 project themes (out of 11 total). However these give us only the code, so unless you have the corresponding string names memorized, it's not too useful. Using $value\_counts()$ on the name column of our new dataframe won't quite do, since there will be missing entries counted as blanks. So let's fill these in using the map method.
###Code
themes = unstacked_mjthemes[unstacked_mjthemes.name != '']
paired_themes = themes.name.groupby(themes.code).max()
paired_themes = paired_themes.to_dict()
paired_themes
unstacked_mjthemes['name'] = unstacked_mjthemes.code.map(paired_themes)
unstacked_mjthemes.head(15)
any(unstacked_mjthemes.name == '')
###Output
_____no_output_____
###Markdown
So we see we have used the dict of paired codes and names to them map the blank entries correctly, and that there are no empty strings left in the name column. We can thus now use value counts to correctly see the ten top themes by name and not just code.
###Code
unstacked_mjthemes.name.value_counts().head(10)
###Output
_____no_output_____
###Markdown
Reindex our new dataframe to use the project id as index. Could then use this to reinsert corrected major themes into large dataframe if required.
###Code
unstacked = unstacked_mjthemes[['_id','code','name']]
unstacked.set_index('_id')
###Output
_____no_output_____ |
experiments/upstream/Embed joint LSTM.ipynb | ###Markdown
Embed data with joint LSTMEmbed downstream data using the joint LSTM trained on all signals simultaneously.
###Code
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from phase.embedding import *
PATH = os.path.expanduser("~/phase/upstream_embedding/")
lookback = 60
DEBUG = False
label_type_eta_currfeat_lst = [("desat_bool92_5_nodesat",0.02,"SAO2"),
("nibpm60",0.1,"NIBPM"),
("etco235",0.1,"ETCO2")]
for label_type, _, curr_feat in label_type_eta_currfeat_lst:
print("\n[Progress] label_type: {}, eta: {}, curr_feat {}".format(label_type, "NA", curr_feat))
for hosp_model in [0,1]:
# This is the order of features that the big LSTM uses
top15 = ["SAO2","FIO2","ECGRATE","ETCO2","RESPRATE","PEEP","TV","PEAK",
"PIP","ETSEVO","ETSEV","TEMP1","NIBPM","NIBPS","NIBPD"]
MODDIR = "/homes/gws/hughchen/phase/upstream_embedding/models/"
MODDIR += "multivariate_biglstmdropout_hd{}_nextfivetask_".format(hosp_model)
MODDIR += "200n_200n_200ep_rmspropopt_0.001lr_0.5drop_1000bs_epochsize"
min_mod = load_min_model_helper(MODDIR)
min_mod_weights = min_mod.get_weights()
for hosp_data in [0,1]:
print("\n[Progress] hosp_data {}".format(hosp_data))
data_type = "proc[top15]+nonsignal"
# Load train validation data and split it
(X_trval_lst,y_trval) = load_data(PATH,data_type,label_type,True,hosp_data,curr_feat,DEBUG=DEBUG)
X_trval_lst = [X[:,:,np.newaxis] if X.shape[1] == 60 else X for X in X_trval_lst]
# Load test data
(X_test1_lst,y_test1) = load_data(PATH,data_type,label_type,False,hosp_data,curr_feat,DEBUG=DEBUG)
X_test1_lst = [X[:,:,np.newaxis] if X.shape[1] == 60 else X for X in X_test1_lst]
# Create and save embeddings for top15 features
task = "nextfivemulti"
suffix = "embedding_data{}_model{}.npy".format(hosp_data,hosp_model)
final_task = label_type
SPATH = PATH+"data/{}/hospital_{}/".format(final_task,hosp_data)
SPATH += "hidden200/{}/model_{}/".format(task,hosp_model)
if not os.path.exists(SPATH): os.makedirs(SPATH)
for i in range(0,len(top15)):
feat = top15[i] # Current feature being embedded
# Create model
h1 = 200; h2 = 200; lookback = 60
model = Sequential()
model.add(LSTM(h1, recurrent_dropout=0.5, return_sequences=True,
input_shape=(lookback,1),
weights=min_mod_weights[(i*3):((i+1)*3)]))
model.add(LSTM(h2, recurrent_dropout=0.5,dropout=0.5,
weights=min_mod_weights[(45+(i*3)):(45+((i+1)*3))]))
# Create embeddings and names
trval_pred = model.predict(X_trval_lst[i])
test1_pred = model.predict(X_test1_lst[i])
trval_pred_name = "task:{}_feat:{}_trval_{}".format(task,feat,suffix)
test1_pred_name = "task:{}_feat:{}_test1_{}".format(task,feat,suffix)
# Save embeddings
np.save(SPATH+trval_pred_name,trval_pred)
np.save(SPATH+test1_pred_name,test1_pred)
###Output
[Progress] label_type: etco235, eta: NA, curr_feat ETCO2
[PROGRESS] Starting load_min_model_helper()
[DEBUG] MPATH /homes/gws/hughchen/phase/upstream_embedding/models/multivariate_biglstmdropout_hd1_nextfivetask_200n_200n_200ep_rmspropopt_0.001lr_0.5drop_1000bs_epochsize
|
notebooks/07 bert 2.ipynb | ###Markdown
Inputs Next, import the BERT tokenizer, used to convert our text into tokens that correspond to BERT's vocabulary.
###Code
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print ("Tokenize the first sentence:")
print (tokenized_texts[0])
# Set the maximum sequence length. The longest sequence in our training set is 47, but we'll leave room on the end anyway.
# In the original paper, the authors used a length of 512.
MAX_LEN = 128
# Pad our input tokens
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
###Output
_____no_output_____
###Markdown
Create the attention masks
###Code
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
# Use train_test_split to split our data into train and validation sets for training
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
random_state=2018, test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# Select a batch size for training. For fine-tuning BERT on a specific task, the authors recommend a batch size of 16 or 32
batch_size = 16
# Create an iterator of our data with torch DataLoader. This helps save on memory during training because, unlike a for loop,
# with an iterator the entire dataset does not need to be loaded into memory
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# Load BertForSequenceClassification, the pretrained BERT model with a single linear classification layer on top.
model = BertForSequenceClassification.from_pretrained("bert-large-uncased", num_labels=2)
model.cuda()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
# This variable contains all of the hyperparemeter information our training loop needs
optimizer = BertAdam(optimizer_grouped_parameters,
lr=2e-5,
warmup=.1)
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def flat_precision(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
pass
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs (authors recommend between 2 and 4)
epochs = 4
# trange is a tqdm wrapper around the normal python range
for _ in trange(epochs, desc="Epoch"):
# Training
# Set our model to training mode (as opposed to evaluation mode)
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
# Validation
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
###Output
Epoch: 0%| | 0/4 [00:00<?, ?it/s]
###Markdown
Training Evaluation Let's take a look at our training loss over all batches:
###Code
plt.figure(figsize=(15,8))
plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.plot(train_loss_set)
plt.show()
df = pd.read_csv("data/dataOut/schiz/annFinalSchiz_2.csv/annFinalSchiz_2.csv")
# Create sentence and label lists
sentences = df.Tweet.values
# We need to add special tokens at the beginning and end of each sentence for BERT to work properly
sentences = ["[CLS] " + sentence + " [SEP]" for sentence in sentences]
labels = df.Classification.values
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
MAX_LEN = 300
# Pad our input tokens
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
prediction_inputs = torch.tensor(input_ids)
prediction_masks = torch.tensor(attention_masks)
prediction_labels = torch.tensor(labels)
batch_size = 16
prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
# Prediction on test set
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
yTrue = np.array(true_labels)
yPred = np.array(predictions)
a = []
for p, y in zip(yPred, yTrue):
a.append(flat_accuracy(p, y))
np.array(a).mean()
# Import and evaluate each test batch using Matthew's correlation coefficient
from sklearn.metrics import matthews_corrcoef, accuracy_score
matthews_set = []
for i in range(len(true_labels)):
matthews = matthews_corrcoef(true_labels[i],
np.argmax(predictions[i], axis=1).flatten())
matthews_set.append(matthews)
acc = accuracy_score(true_labels, predictions)
###Output
_____no_output_____
###Markdown
The final score will be based on the entire test set, but let's take a look at the scores on the individual batches to get a sense of the variability in the metric between batches.
###Code
matthews_set
# Flatten the predictions and true values for aggregate Matthew's evaluation on the whole dataset
flat_predictions = [item for sublist in predictions for item in sublist]
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
flat_true_labels = [item for sublist in true_labels for item in sublist]
matthews_corrcoef(flat_true_labels, flat_predictions)
###Output
_____no_output_____ |
Tutorial-GRMHD_Equations-Cartesian.ipynb | ###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Equations of General Relativistic Magnetohydrodynamics (GRMHD) Author: Zach Etienne This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`**Notebook Status:** Self-Validated; induction equation not yet implemented **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** IntroductionWe write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):\begin{eqnarray}\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right),\end{eqnarray}where $$s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].$$We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where * $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and * $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,* The GRMHD conservative variables: * $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)` * $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)` * $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`* The GRMHD fluxes: * $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)` * $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)` * $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`* GRMHD source terms: * $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)` * $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:* Spacetime quantities: * ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$* Hydrodynamical quantities: * Rest-mass density $\rho_0$ * Pressure $P$ * Internal energy $\epsilon$ * 4-velocity $u^\mu$* Electrodynamical quantities * Magnetic field $B^i= \tilde{B}^i / \gamma$ A Note on NotationAs is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:```pythonT4EMUU = ixp.zerorank2(DIM=4)for mu in range(4): for nu in range(4): Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]```When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:```pythonbetaD = ixp.zerorank1()for i in range(DIM): for j in range(DIM): betaD[i] += gammaDD[i][j] * betaU[j]```As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:```python \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2for i in range(DIM): for mu in range(4): for nu in range(4): S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2``` Table of Contents$$\label{toc}$$Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows1. [Step 1](importmodules): Import needed NRPy+ & Python modules1. [Step 2](stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$: * **compute_T4UU()**, **compute_T4UD()**: 1. [Step 3](declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.1. [Step 4](code_validation): Code Validation against `GRMHD.equations` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Import needed NRPy+ & Python modules \[Back to [top](toc)\]$$\label{importmodules}$$
###Code
# Step 1: Import needed core NRPy+ modules
from outputC import * # NRPy+: Core C code output module
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
###Output
_____no_output_____
###Markdown
Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](toc)\]$$\label{stressenergy}$$Recall from above that$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) functionSince a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,$$T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},$$where* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
###Code
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
###Output
_____no_output_____
###Markdown
Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](toc)\]$$\label{declarevarsconstructgrhdeqs}$$
###Code
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
###Output
_____no_output_____
###Markdown
Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation}$$As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in1. this tutorial versus2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
###Code
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2==None:
return basename+"["+str(idx1)+"]"
if idx3==None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
if all_passed:
print("ALL TESTS PASSED!")
###Output
ALL TESTS PASSED!
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-GRMHD_Equations-Cartesian.ipynb
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-GRMHD_Equations-Cartesian.ipynb to latex
[NbConvertApp] Writing 52982 bytes to Tutorial-GRMHD_Equations-Cartesian.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); GR HD Equations Author: Zach Etienne Formatting improvements courtesy Brandon Clark$\newcommand{\be}{\begin{equation}}$$\newcommand{\ee}{\end{equation}}$$\newcommand{\grad}{{\boldsymbol{\nabla}}}$$\newcommand{\vel}{{\boldsymbol{v}}}$$\newcommand{\mom}{{\boldsymbol{p}}}$$\newcommand{\ddt}[1]{{\frac{\partial 1}{\partial t}}}$$\newcommand{\ddx}[1]{{\frac{\partial 1}{\partial x}}}$$\newcommand{\state}{{\boldsymbol{\mathcal{U}}}}$$\newcommand{\charge}{{\boldsymbol{U}}}$$\newcommand{\psicharge}{{\boldsymbol{\psi}}}$$\newcommand{\lapse}{\alpha}$$\newcommand{\shift}{\boldsymbol{\beta}}$$\newcommand{\rhostar}{{\rho_*}}$$\newcommand{\tautilde}{{\tilde{\tau}}}$$\newcommand{\Svectilde}{{\tilde{\boldsymbol{S}}}}$$\newcommand{\rtgamma}{{\sqrt{\gamma}}}$$\newcommand{\T}[2]{{T^{1 2}}}$$\newcommand{\uvec}{{\boldsymbol{u}}}$$\newcommand{\Vvec}{{\boldsymbol{\mathcal{V}}}}$$\newcommand{\vfluid}{{\boldsymbol{v}_{\rm f}}}$$\newcommand{\vVal}{{\tilde{\boldsymbol{v}}}}$$\newcommand{\flux}{{\boldsymbol{\mathcal{F}}}}$$\newcommand{\fluxV}{{\boldsymbol{F}}}$$\newcommand{\source}{{\boldsymbol{\mathcal{S}}}}$$\newcommand{\sourceV}{{\boldsymbol{S}}}$$\newcommand{\area}{{\boldsymbol{A}}}$$\newcommand{\normal}{{\hat{\boldsymbol{n}}}}$$\newcommand{\pt}{{\boldsymbol{p}}}$$\newcommand{\nb}{{\boldsymbol{n}}}$$\newcommand{\meshv}{{\boldsymbol{w}}}$$\newcommand{\facev}{{\boldsymbol{\tilde{w}}_{ij}}}$$\newcommand{\facer}{{\boldsymbol{\tilde{r}}_{ij}}}$$\newcommand{\meshr}{{\boldsymbol{r}}}$$\newcommand{\cmr}{{\boldsymbol{c}}}$ Introduction: We start out with the ** GRHD ** equations in conservative form with the state vector $\state=(\rhostar, \Svectilde, \tautilde)$:\begin{equation}\ddt{\state} + \grad\cdot\flux = \source,\end{equation}where $\rhostar = \lapse\rho\rtgamma u^0$, $\Svectilde = \rhostar h \uvec$, $\tautilde = \lapse^2\rtgamma \T00 - \rhostar$. The associated set of primitive variables are $(\rho, \vel, \epsilon)$, which are the rest mass density, fluid 3-velocity, and internal energy (measured in the rest frame). The flux, $\flux$ is given by\begin{equation} \flux=(\rhostar \vel, \lapse\rtgamma\T{j}{\beta}g_{\beta i}, \lapse^2\rtgamma\T0j - \rhostar\vel\end{equation}where $\vel$ is the 3-velocity, and $\source = (0, \frac 1 2 \lapse\rtgamma \T{\lapse}{\beta}g_{\lapse\beta,i}, s)$ is the source function, and\begin{equation}s = \lapse\rtgamma\left[\left(\T00\beta^i\beta^j + 2\T0i\beta^j\right)K_{ij} - \left(\T00\beta^i + \T0i\right)\partial_i\lapse\right]\end{equation}The stress energy tensor for a perfect fluid is written as \begin{equation}\T{\mu}{\nu} = \rho h u^{\mu} u^{\nu} + P g^{\mu\nu},\end{equation}where $h = 1 + \epsilon + P/\rho$ is the specific enthalpy and $u^{\mu}$ are the respective components of the four velocity. Noting that the mass $\flux$ is defined in terms of $\rhostar$ and $\vel$, we need to first find a mapping between $\vel$ and $u$. Alternative formulationThe Athena++ folks have an alternative formulations that might be superior. Begin with the continuity equation\begin{equation}\grad_{\mu}\rho u^{\mu} = 0,\end{equation}where $\grad$ is the covariant derivative. This can be mapped directly to \begin{equation}\partial_{0} \sqrt{-g}\rho u^0 + \partial_i\sqrt{-g} \rho u^0 v^i = 0 \end{equation}which we can identify with $\rhostar = \alpha\rtgamma \rho u^0$ because $\sqrt{-g} = \alpha\rtgamma$.Now the second equation is conservation of energy-momentum which we write as\begin{equation}\grad_{\nu}T^{\nu}_{\mu} = 0 \end{equation}writing this out we have \begin{equation}\partial_0 g_{\mu\alpha}T^{\alpha 0} + \partial_i g_{\mu\alpha}T^{\alpha i} - \Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} = 0 \end{equation}Noting that\begin{equation}\Gamma^{\alpha}_{\beta\gamma} = \frac 1 2 g^{\alpha\delta}\left(\partial_{\gamma}g_{\beta\delta} + \partial_{\beta}g_{\gamma\delta} - \partial_{\delta}g_{\beta\gamma}\right)\end{equation}Writing this all out, we note the last term is\begin{equation}\Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} =\frac 1 2 g^{\gamma\delta}\left(\partial_{\alpha}g_{\mu\delta} + \partial_{\mu}g_{\alpha \delta} - \partial_{\delta}g_{\mu \alpha}\right) T_{\gamma}^{\alpha} = \frac 1 2 \left(\partial_{\alpha}g_{\mu\delta} + \partial_{\mu}g_{\alpha \delta} - \partial_{\delta}g_{\mu \alpha}\right)T^{\alpha\delta}\end{equation}We sum over $\alpha$ and $\delta$, but noting that we are antisymmetric in first and last terms in $\alpha$ and $\delta$ in the () but symmetric in $T_{\alpha\delta}$ so we have\begin{equation}\Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} = \frac 1 2 \partial_{\mu}g_{\alpha \delta} T^{\alpha\delta}\end{equation}Thus we have \begin{equation}\partial_0 T^{0}_{\mu} + \partial_i T^{i}_{\mu} = \frac 1 2 \partial_{\mu}g_{\alpha \delta} T^{\alpha\delta}\end{equation}The $\mu = (1,2,3)$, we almost get back the equations in the standard formulation\begin{equation}\partial_0 \rho h u^0 u_i + \partial_j T^j_i = \frac 1 2 \partial_{i}g_{\alpha \delta} T^{\alpha\delta},\end{equation}which modulo a factors of $\lapse\rtgamma$ in front is the same as the "standard" equations.The $T^0_0$ term is more interesting. Here we have\begin{equation}\partial_0 (\rho h u^0 u_0 + + \partial_j T^j_i = \frac 1 2 \partial_{0}g_{\alpha \delta} T^{\alpha\delta},\end{equation}However the disadvantage is that we need the time derivative of the metric. Table of Contents$$\label{toc}$$This module is organized as follows1. [Step 1](mapping): Primitive to Conservative Mapping1. [Step 2](zach): Compute $u^0$ from the Valencia 3-velocity (Zach step)1. [Step 3](flux): Compute the flux1. [Step 4](source): Source Terms1. [Step 5](rotation): Rotation1. [Step 6](solver): Conservative to Primitive Solver1. [Step 7](lorentz): Lorentz Boosts1. [Step 8](latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file Step 1: Primitive to Conservative Mapping$$\label{mapping}$$We want to make a mapping from the primitives to conserved variables:\begin{equation}(\rho, \vel, \epsilon) \rightarrow (\rhostar = \lapse\rho\rtgamma u^0, \Svectilde = \rhostar h \uvec, \tautilde = \lapse^2\rtgamma \T00 - \rhostar).\end{equation}To do so, we first need to determine $u^0$ and $\uvec$. Noting that $g_{\mu\nu} u^{\mu} u^{\nu} = -1$, we find\begin{equation}u^0 = \left(-g_{00} - 2g_{i0} v^i - g_{ij}v^iv^j\right)^{-1/2},\end{equation}where we have used $\vel = \uvec/u^0$. This gives me $\rhostar$ and $\uvec$. We note that the metric is (B&S 2.122)\begin{equation}g_{\mu\nu} = \begin{pmatrix} -\lapse^2 + \shift\cdot\shift & \beta_i \\\beta_j & \gamma_{ij}\end{pmatrix},\end{equation}Lets write some code to define metric contraction on four vectors in this context:
###Code
import NRPy_param_funcs as par
import indexedexp as ixp
import sympy as sp
from outputC import *
import NRPy_param_funcs as par
DIM = 3
# Declare rank-2 covariant gmunu
gmunuDD = ixp.declarerank2("gmunuDD","sym01",DIM=4)
gammaDD = ixp.declarerank2("gammaDD","sym01")
components = ["xx", "xy", "xz", "yy", "yz", "zz"]
names = ""
for comp in components :
names = names + "mi.gamDD{0} ".format(comp)
gxx, gxy, gxz, gyy, gyz, gzz = sp.symbols( names)
gammaDD[0][0] = gxx
gammaDD[0][1] = gxy
gammaDD[0][2] = gxz
gammaDD[1][0] = gxy
gammaDD[1][1] = gyy
gammaDD[1][2] = gyz
gammaDD[2][0] = gxz
gammaDD[2][1] = gyz
gammaDD[2][2] = gzz
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi.alpha mi.rtDetGamma mi.betaX mi.betaY mi.betaZ")
u10, u1x, u1y, u1z = sp.symbols("u1[0] u1[1] u1[2] u1[3]")
u20, u2x, u2y, u2z = sp.symbols("u2[0] u2[1] u2[2] u2[3]")
u1U = ixp.declarerank1("u1Vector", DIM=4)
u2U = ixp.declarerank1("u2Vector", DIM=4)
u1U[0] = u10
u1U[1] = u1x
u1U[2] = u1y
u1U[3] = u1z
u2U[0] = u20
u2U[1] = u2x
u2U[2] = u2y
u2U[3] = u2z
shiftU = ixp.declarerank1("shiftU")
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
beta2 = 0
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDD[i][j] * shiftU[i]*shiftU[j]
gmunuDD[0][0] = -lapse*lapse + beta2
for i in range(DIM) :
gmunuDD[i+1][0] = shiftU[i]
gmunuDD[0][i+1] = shiftU[i]
for j in range(DIM) :
gmunuDD[i+1][j+1] = gammaDD[i][j]
dot4Product = 0
for i in range(4):
for j in range(4):
dot4Product += gmunuDD[i][j]*u1U[i]*u2U[j]
str = outputC( dot4Product, "dotProduct", filename="returnstring")
print(str)
###Output
/*
* Original SymPy expression:
* "dotProduct = mi.betaX*u1[0]*u2[1] + mi.betaX*u1[1]*u2[0] + mi.betaY*u1[0]*u2[2] + mi.betaY*u1[2]*u2[0] + mi.betaZ*u1[0]*u2[3] + mi.betaZ*u1[3]*u2[0] + mi.gamDDxx*u1[1]*u2[1] + mi.gamDDxy*u1[1]*u2[2] + mi.gamDDxy*u1[2]*u2[1] + mi.gamDDxz*u1[1]*u2[3] + mi.gamDDxz*u1[3]*u2[1] + mi.gamDDyy*u1[2]*u2[2] + mi.gamDDyz*u1[2]*u2[3] + mi.gamDDyz*u1[3]*u2[2] + mi.gamDDzz*u1[3]*u2[3] + u1[0]*u2[0]*(-mi.alpha**2 + mi.betaX**2*mi.gamDDxx + 2*mi.betaX*mi.betaY*mi.gamDDxy + 2*mi.betaX*mi.betaZ*mi.gamDDxz + mi.betaY**2*mi.gamDDyy + 2*mi.betaY*mi.betaZ*mi.gamDDyz + mi.betaZ**2*mi.gamDDzz)"
*/
{
const double tmp0 = 2*mi.betaX;
dotProduct = mi.betaX*u1[0]*u2[1] + mi.betaX*u1[1]*u2[0] + mi.betaY*u1[0]*u2[2] + mi.betaY*u1[2]*u2[0] + mi.betaZ*u1[0]*u2[3] + mi.betaZ*u1[3]*u2[0] + mi.gamDDxx*u1[1]*u2[1] + mi.gamDDxy*u1[1]*u2[2] + mi.gamDDxy*u1[2]*u2[1] + mi.gamDDxz*u1[1]*u2[3] + mi.gamDDxz*u1[3]*u2[1] + mi.gamDDyy*u1[2]*u2[2] + mi.gamDDyz*u1[2]*u2[3] + mi.gamDDyz*u1[3]*u2[2] + mi.gamDDzz*u1[3]*u2[3] + u1[0]*u2[0]*(-pow(mi.alpha, 2) + pow(mi.betaX, 2)*mi.gamDDxx + pow(mi.betaY, 2)*mi.gamDDyy + 2*mi.betaY*mi.betaZ*mi.gamDDyz + mi.betaY*mi.gamDDxy*tmp0 + pow(mi.betaZ, 2)*mi.gamDDzz + mi.betaZ*mi.gamDDxz*tmp0);
}
###Markdown
which then gives \begin{equation}u^0 = \left(\lapse^2 - \shift\cdot\shift - 2\shift\cdot\vel - \gamma_{ij}v^iv^j\right)^{-1/2},\end{equation}The other thing is $\uvec = u^0\vel$. So then we can proceed and spit out the conservative variables: $\rhostar, \Svectilde$.To get $\tau$, we note that we have defined the metric as the covariant form, e.g., lower indices. The upper form of $g^{\mu\nu}$ is found in [B&S]() 2.119 and is given by\begin{equation}g^{\mu\nu} = \begin{pmatrix}-\lapse^{-2} & \lapse^{-2}\beta^i \\\lapse^{-2}\beta^j & \gamma^{ij} - \lapse^{-2} \beta^i\beta^j\end{pmatrix}\end{equation}Lets get the form of this in code:The main challenge is the calculation of the inverse of the 3x3 matrix $\gamma_{ij}$. To do so we note:
###Code
import indexedexp as ixp
gammaUU, gammabarDet = ixp.symm_matrix_inverter3x3(gammaDD)
gUUxx = gammaUU[0][0]
gUUxy = gammaUU[0][1]
gUUxz = gammaUU[0][2]
gUUyy = gammaUU[1][1]
gUUyz = gammaUU[1][2]
gUUzz = gammaUU[2][2]
rtDetGamma = sp.sqrt(gammabarDet)
outputC( [gUUxx,gUUxy,gUUxz,gUUyy,gUUyz,gUUzz, rtDetGamma], ["mi.gamUUxx", "mi.gamUUxy","mi.gamUUxz","mi.gamUUyy","mi.gamUUyz","mi.gamUUzz","mi.rtDetGamma"], filename="NRPY+gmunuUU_and_det.h")
#print str
###Output
Wrote to file "NRPY+gmunuUU_and_det.h"
###Markdown
Step 2: Compute $u^0$ from the Valencia 3-velocity (Zach step)$$\label{zach}$$According to Eqs. 9-11 of [the IllinoisGRMHD paper](https://arxiv.org/pdf/1501.07276.pdf), the Valencia 3-velocity $v^i_{(n)}$ is related to the 4-velocity $u^\mu$ via\begin{align}\alpha v^i_{(n)} &= \frac{u^i}{u^0} + \beta^i \\\implies u^i &= u^0 \left(\alpha v^i_{(n)} - \beta^i\right)\end{align}Defining $v^i = \frac{u^i}{u^0}$, we get$$v^i = \alpha v^i_{(n)} - \beta^i,$$Or in other words in terms of the 3 velocity$$v^i_{(n)} = \alpha^{-1}\left(v^i + \beta^i\right)$$ and in terms of this variable we get\begin{align}g_{00} \left(u^0\right)^2 + 2 g_{0i} u^0 u^i + g_{ij} u^i u^j &= \left(u^0\right)^2 \left(g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j\right)\\\implies u^0 &= \pm \sqrt{\frac{-1}{g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j}} \\&= \pm \sqrt{\frac{-1}{(-\alpha^2 + \beta^2) + 2 \beta_i v^i + \gamma_{ij} v^i v^j}} \\&= \pm \sqrt{\frac{1}{\alpha^2 - \gamma_{ij}\left(\beta^i + v^i\right)\left(\beta^j + v^j\right)}}\\&= \pm \sqrt{\frac{1}{\alpha^2 - \alpha^2 \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\&= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\end{align}Generally speaking, numerical errors will occasionally drive expressions under the radical to either negative values or potentially enormous values (corresponding to enormous Lorentz factors). Thus a reliable approach for computing $u^0$ requires that we first rewrite the above expression in terms of the Lorentz factor squared: $\Gamma^2=\left(\alpha u^0\right)^2$:\begin{align}u^0 &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\\implies \left(\alpha u^0\right)^2 &= \frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}} \\\implies \gamma_{ij}v^i_{(n)}v^j_{(n)} &= 1 - \frac{1}{\left(\alpha u^0\right)^2}\end{align}In order for the bottom expression to hold true, the left-hand side must be between 0 and 1. Again, this is not guaranteed due to the appearance of numerical errors. In fact, a robust algorithm will not allow $\Gamma^2$ to become too large (which might contribute greatly to the stress-energy of a given gridpoint), so let's define $\Gamma_{\rm max}$, the largest allowed Lorentz factor. Then our algorithm for computing $u^0$ is as follows:If$$R=\gamma_{ij}v^i_{(n)}v^j_{(n)}>1 - \frac{1}{\Gamma_{\rm max}},$$ then adjust the 3-velocity $v^i$ as follows:$$v^i_{(n)} = \sqrt{\frac{1 - \frac{1}{\Gamma_{\rm max}}}{R}}v^i_{(n)}.$$After this rescaling, we are then guaranteed that if $R$ is recomputed, it will be set to its ceiling value $R=1 - \frac{1}{\Gamma_{\rm max}}$.Then $u^0$ can be safely computed via$$u^0 = \frac{1}{\alpha \sqrt{1-R}}.$$
###Code
import sympy as sp
import NRPy_param_funcs as par
import grid as gri
import indexedexp as ixp
import reference_metric as rfm
from outputC import *
vx, vy, vz = sp.symbols( "vx vy vz")
vU = ixp.declarerank1("vU")
vU[0] = vx
vU[1] = vy
vU[2] = vz
ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU",DIM=3)
ValenciavU[0] = (vx + beta_x)/lapse
ValenciavU[1] = (vy + beta_y)/lapse
ValenciavU[2] = (vz + beta_z)/lapse
# Step 1: Compute R = 1 - 1/max(Gamma)
R = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
R += gammaDD[i][j]*ValenciavU[i]*ValenciavU[j]
GAMMA_SPEED_LIMIT = par.Cparameters("REAL","GRMHD_equations","GAMMA_SPEED_LIMIT", 10.0) # 10.0 is default for IllinoisGRMHD
Rmax = 1 - 1/GAMMA_SPEED_LIMIT
rescaledValenciavU = ixp.zerorank1()
for i in range(DIM):
rescaledValenciavU[i] = ValenciavU[i]*sp.sqrt(Rmax/R)
rescaledu0 = 1/(lapse*sp.sqrt(1-Rmax))
regularu0 = 1/(lapse*sp.sqrt(1-R))
computeu0_Cfunction = "/* Function for computing u^0 from Valencia 3-velocity. */\n"
computeu0_Cfunction += "/* Inputs: vx, vy, vz, lapse, MetricInformation, GAMMA_SPEED_LIMIT (C parameter) */\n"
computeu0_Cfunction += "/* Output: u0=u^0 */\n\n"
computeu0_Cfunction += outputC([R,Rmax],["const double R","const double Rmax"],"returnstring",
params="includebraces=False,CSE_varprefix=tmpR,outCverbose=False")
computeu0_Cfunction += "if(R <= Rmax) "
computeu0_Cfunction += outputC(regularu0,"u0","returnstring",
params="includebraces=True,CSE_varprefix=tmpnorescale,outCverbose=False")
computeu0_Cfunction += " else "
computeu0_Cfunction += outputC([rescaledu0],
["u0"],"returnstring",
params="includebraces=True,CSE_varprefix=tmprescale,outCverbose=False")
print(computeu0_Cfunction)
###Output
/* Function for computing u^0 from Valencia 3-velocity. */
/* Inputs: vx, vy, vz, lapse, MetricInformation, GAMMA_SPEED_LIMIT (C parameter) */
/* Output: u0=u^0 */
const double tmpR0 = pow(mi.alpha, -2);
const double tmpR1 = mi.betaX + vx;
const double tmpR2 = mi.betaY + vy;
const double tmpR3 = mi.betaZ + vz;
const double tmpR4 = 2*tmpR0*tmpR1;
const double R = mi.gamDDxx*tmpR0*pow(tmpR1, 2) + mi.gamDDxy*tmpR2*tmpR4 + mi.gamDDxz*tmpR3*tmpR4 + mi.gamDDyy*tmpR0*pow(tmpR2, 2) + 2*mi.gamDDyz*tmpR0*tmpR2*tmpR3 + mi.gamDDzz*tmpR0*pow(tmpR3, 2);
const double Rmax = 1 - 1/GAMMA_SPEED_LIMIT;
if(R <= Rmax) {
const double tmpnorescale0 = pow(mi.alpha, -2);
const double tmpnorescale1 = mi.betaX + vx;
const double tmpnorescale2 = mi.betaY + vy;
const double tmpnorescale3 = mi.betaZ + vz;
const double tmpnorescale4 = 2*tmpnorescale0*tmpnorescale1;
u0 = 1/(mi.alpha*sqrt(-mi.gamDDxx*tmpnorescale0*pow(tmpnorescale1, 2) - mi.gamDDxy*tmpnorescale2*tmpnorescale4 - mi.gamDDxz*tmpnorescale3*tmpnorescale4 - mi.gamDDyy*tmpnorescale0*pow(tmpnorescale2, 2) - 2*mi.gamDDyz*tmpnorescale0*tmpnorescale2*tmpnorescale3 - mi.gamDDzz*tmpnorescale0*pow(tmpnorescale3, 2) + 1));
}
else {
u0 = 1/(mi.alpha*sqrt(1.0/GAMMA_SPEED_LIMIT));
}
###Markdown
We now note that $\tau = \lapse^2\rtgamma T^{00} - \rhostar$, which gives\begin{equation}\tau = \lapse\rhostar h u^0 - P\rtgamma - \rhostar\end{equation}The code for this is
###Code
rho, epsilon, gamma1, p = sp.symbols("rho ie gamma p")
betaDotV = 0
for i in range(DIM) :
for j in range(DIM) :
betaDotV += gammaDD[i][j] * shiftU[i]*vU[j]
v2 = 0
for i in range(DIM) :
for j in range(DIM) :
v2 += gammaDD[i][j] * vU[i]*vU[j]
u0 = sp.symbols("u0")
uvec4U = ixp.zerorank1(DIM=4)
uvec4D = ixp.zerorank1(DIM=4)
#StildeU = ixp.declarerank1("StildeU")
StildeD = ixp.zerorank1()
rhostar = lapse*rtgamma*rho*u0
h = 1. + epsilon + p/rho
for i in range(1,4):
uvec4U[i] = vU[i-1]*u0
uvec4U[0] = u0
for mu in range(4) :
for nu in range(4) :
uvec4D[mu] += gmunuDD[mu][nu]*uvec4U[nu]
for i in range(DIM):
StildeD[i] = uvec4D[i+1]*rhostar*h
tau = lapse*rhostar*h*u0 - rtgamma*p - rhostar
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC([rhostar, StildeD[0], StildeD[1], StildeD[2], tau], ["con[iRhoStar]", "con[iSx]", "con[iSy]", "con[iSz]", "con[iTau]"], filename="returnstring")
print(str)
f = open("NRPY+prim2Con.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[con[iRhoStar] = mi.alpha*mi.rtDetGamma*rho*u0,
* con[iSx] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaX*u0 + mi.gamDDxx*u0*vx + mi.gamDDxy*u0*vy + mi.gamDDxz*u0*vz),
* con[iSy] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaY*u0 + mi.gamDDxy*u0*vx + mi.gamDDyy*u0*vy + mi.gamDDyz*u0*vz),
* con[iSz] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaZ*u0 + mi.gamDDxz*u0*vx + mi.gamDDyz*u0*vy + mi.gamDDzz*u0*vz),
* con[iTau] = mi.alpha**2*mi.rtDetGamma*rho*u0**2*(ie + p/rho + 1.0) - mi.alpha*mi.rtDetGamma*rho*u0 - mi.rtDetGamma*p]"
*/
{
const double tmp0 = mi.rtDetGamma*rho;
const double tmp1 = mi.alpha*tmp0*u0;
const double tmp2 = u0*vx;
const double tmp3 = u0*vy;
const double tmp4 = u0*vz;
const double tmp5 = ie + p/rho + 1.0;
const double tmp6 = tmp1*tmp5;
con[iRhoStar] = tmp1;
con[iSx] = tmp6*(mi.betaX*u0 + mi.gamDDxx*tmp2 + mi.gamDDxy*tmp3 + mi.gamDDxz*tmp4);
con[iSy] = tmp6*(mi.betaY*u0 + mi.gamDDxy*tmp2 + mi.gamDDyy*tmp3 + mi.gamDDyz*tmp4);
con[iSz] = tmp6*(mi.betaZ*u0 + mi.gamDDxz*tmp2 + mi.gamDDyz*tmp3 + mi.gamDDzz*tmp4);
con[iTau] = pow(mi.alpha, 2)*tmp0*tmp5*pow(u0, 2) - mi.rtDetGamma*p - tmp1;
}
###Markdown
Step 3: Compute the flux$$\label{flux}$$The fluxes are as follows\begin{equation}\frac{\partial}{\partial t} \begin{pmatrix}\rhostar\\\Svectilde\\\tautilde\end{pmatrix} + \frac{\partial}{\partial x^j}\begin{pmatrix} \rhostar v^j\\\lapse\rtgamma T^j_i\\ \lapse^2\rtgamma T^{0j} - \rhostar v^j\end{pmatrix} = \begin{pmatrix} 0 \\ \frac 1 2 \lapse\rtgamma T^{\alpha\beta}g_{\alpha\beta,i} \\ s \end{pmatrix}\end{equation}so the flux is \begin{equation}\mathcal{F} = \begin{pmatrix} \rhostar v^i \\ \lapse\rtgamma T^i_k \\ \lapse^2\rtgamma T^{0i} - \rhostar v^i\end{pmatrix}\end{equation}In the moving-mesh formalism, the flux is just taken along the x directions so we have\begin{equation}\mathcal{F} = \begin{pmatrix} \rhostar v^1 \\ \lapse\rtgamma T^1_k \\ \lapse^2\rtgamma T^{01} - \rhostar v^1\end{pmatrix}\end{equation}Note that we will need to rotate $T^{\mu\nu}$ and $g_{\mu\nu}$ to get the right orientation.In order to do this, we must first compute the stress energy tensor:\begin{equation}T^{\mu\nu} = \rho h u^{\mu}u^{\nu} + Pg^{\mu\nu} = \rho h (u^0)^2v^iv^j + P g^{\mu\nu}\end{equation}
###Code
TmunuUU = ixp.declarerank2("TmunuUU","sym01",DIM=4)
uvecU = ixp.zerorank1()
for i in range(3) :
uvecU[i] = uvec4U[i+1]
TmunuUU[0][0] = rho*h*u0*u0 - p/(lapse*lapse) #is this \pm?
for i in range(3):
TmunuUU[0][i+1] = rho*h*u0*uvecU[i] + p/(lapse*lapse)*shiftU[i]
TmunuUU[i+1][0] = rho*h*u0*uvecU[i] + p/(lapse*lapse)*shiftU[i]
for i in range(3):
for j in range(3):
TmunuUU[i+1][j+1] = rho*h*uvecU[i]*uvecU[j] + p*(gammaUU[i][j] - 1./(lapse*lapse)*shiftU[i]*shiftU[j])
#str = outputC([TmunuUU[1][0], TmunuUU[1][1], TmunuUU[1][2], TmunuUU[1][3]], ["Tmunu10", "Tmunu11", "Tmunu12", "Tmunu13"], filename="returnstring")
#print(str)
#str = outputC([gmunuDD[1][0], gmunuDD[1][1], gmunuDD[1][2], gmunuDD[1][3]], ["gmunu10", "gmunu11", "gmunu12", "gmunu13"], filename="returnstring")
#print(str)
#calculate Tmunu^1_i
Tmunu1D = ixp.zerorank1()
for i in range(3):
for j in range(0,4) :
Tmunu1D[i] += gmunuDD[i+1][j] * TmunuUU[1][j]
#str = outputC([Tmunu1D[0], Tmunu1D[1], Tmunu1D[2]], ["Tmunu1Dx", "Tmunu1Dy", "Tmunu1Dz"], filename="returnstring")
#print str
# now get the flux
fluxRho, fluxMomX, fluxMomY, fluxMomZ, fluxEnergy = sp.symbols("flux[iRhoStar] flux[iSx] flux[iSy] flux[iSz] flux[iTau]")
fluxRho = rhostar * vU[0]
fluxMomX = lapse*rtgamma*Tmunu1D[0]
fluxMomY = lapse*rtgamma*Tmunu1D[1]
fluxMomZ = lapse*rtgamma*Tmunu1D[2]
fluxEnergy = lapse*lapse*rtgamma*TmunuUU[0][1] - rhostar*vU[0]
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC([fluxRho, fluxMomX, fluxMomY, fluxMomZ, fluxEnergy], ["flux[iRhoStar]", "flux[iSx]", "flux[iSy]", "flux[iSz]", "flux[iTau]"], filename="returnstring")
print(str)
f = open("NRPY+calFlux.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[flux[iRhoStar] = mi.alpha*mi.rtDetGamma*rho*u0*vx,
* flux[iSx] = mi.alpha*mi.rtDetGamma*(mi.betaX*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxx*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDxy*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDxz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iSy] = mi.alpha*mi.rtDetGamma*(mi.betaY*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxy*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDyy*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDyz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iSz] = mi.alpha*mi.rtDetGamma*(mi.betaZ*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxz*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDyz*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDzz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iTau] = mi.alpha**2*mi.rtDetGamma*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) - mi.alpha*mi.rtDetGamma*rho*u0*vx]"
*/
{
const double tmp0 = mi.alpha*mi.rtDetGamma;
const double tmp1 = rho*vx;
const double tmp2 = tmp0*tmp1*u0;
const double tmp3 = pow(mi.alpha, 2);
const double tmp4 = 1.0/tmp3;
const double tmp5 = mi.betaX*tmp4;
const double tmp6 = pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp7 = tmp1*tmp6;
const double tmp8 = p*tmp5 + tmp7;
const double tmp9 = 1.0*tmp5;
const double tmp10 = mi.gamDDxz*mi.gamDDyz;
const double tmp11 = mi.gamDDyy*mi.gamDDzz;
const double tmp12 = pow(mi.gamDDyz, 2);
const double tmp13 = 1.0/(mi.gamDDxx*tmp11 - mi.gamDDxx*tmp12 - pow(mi.gamDDxy, 2)*mi.gamDDzz + 2*mi.gamDDxy*tmp10 - pow(mi.gamDDxz, 2)*mi.gamDDyy);
const double tmp14 = p*(-mi.betaY*tmp9 + tmp13*(-mi.gamDDxy*mi.gamDDzz + tmp10)) + tmp7*vy;
const double tmp15 = p*(-mi.betaZ*tmp9 + tmp13*(mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)) + tmp7*vz;
const double tmp16 = p*(-1.0*pow(mi.betaX, 2)*tmp4 + tmp13*(tmp11 - tmp12)) + rho*tmp6*pow(vx, 2);
flux[iRhoStar] = tmp2;
flux[iSx] = tmp0*(mi.betaX*tmp8 + mi.gamDDxx*tmp16 + mi.gamDDxy*tmp14 + mi.gamDDxz*tmp15);
flux[iSy] = tmp0*(mi.betaY*tmp8 + mi.gamDDxy*tmp16 + mi.gamDDyy*tmp14 + mi.gamDDyz*tmp15);
flux[iSz] = tmp0*(mi.betaZ*tmp8 + mi.gamDDxz*tmp16 + mi.gamDDyz*tmp14 + mi.gamDDzz*tmp15);
flux[iTau] = mi.rtDetGamma*tmp3*tmp8 - tmp2;
}
###Markdown
Step 4: Source Terms$$\label{source}$$The sources terms are for mass, momentum and energy are: \begin{equation}\source = (0, \frac 1 2 \lapse\rtgamma \T{\alpha}{\beta}g_{\alpha\beta,i}, s),\end{equation}For a time stationary metric $s = 0$, so we will ignore this for now. As for the rest, we need to define derivatives of the metric. Suppose I have done this already. Then the code for the source terms is:
###Code
gmunuDDind = [0,0]
gammaDDind = [0,0]
alpha = [0.,0.]
h = sp.symbols( "h")
for ind in range(2) :
gmunuDDind[ind] = ixp.zerorank2(DIM=4) #derivative of gmunu in some direction
gammaDDind[ind] = ixp.zerorank2()
components = ["xx", "xy", "xz", "yy", "yz", "zz"]
names = ""
for comp in components :
names = names + "mi{1}.gamDD{0} ".format(comp, ind+1)
gxx, gxy, gxz, gyy, gyz, gzz = sp.symbols( names)
gammaDDind[ind][0][0] = gxx
gammaDDind[ind][0][1] = gxy
gammaDDind[ind][0][2] = gxz
gammaDDind[ind][1][0] = gxy
gammaDDind[ind][1][1] = gyy
gammaDDind[ind][1][2] = gyz
gammaDDind[ind][2][0] = gxz
gammaDDind[ind][2][1] = gyz
gammaDDind[ind][2][2] = gzz
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi{0}.alpha mi{0}.rtDetGamma mi{0}.betaX mi{0}.betaY mi{0}.betaZ".format(ind+1))
u10, u1x, u1y, u1z = sp.symbols("u1[0] u1[1] u1[2] u1[3]")
u20, u2x, u2y, u2z = sp.symbols("u2[0] u2[1] u2[2] u2[3]")
shiftU = ixp.zerorank1()
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
beta2 = 0
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDDind[ind][i][j] * shiftU[i]*shiftU[j]
gmunuDDind[ind][0][0] = -lapse*lapse + beta2
for i in range(DIM) :
gmunuDDind[ind][i+1][0] = shiftU[i]
gmunuDDind[ind][0][i+1] = shiftU[i]
for j in range(DIM) :
gmunuDDind[ind][i+1][j+1] = gammaDDind[ind][i][j]
dgmunuDD = ixp.zerorank2(DIM=4)
source = 0
for i in range(DIM) :
for j in range(DIM) :
dgmunuDD[i][j] = (gmunuDDind[1][i][j] - gmunuDDind[0][i][j])*(1./(2.*h))
source = source + TmunuUU[i][j]*dgmunuDD[i][j]
#print TmunuUU[2][1]
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC( [source, gmunuDDind[1][0][0]], ["source", "dgmunu"], filename="returnstring")
print(str)
f = open("NRPY+calMomSources.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[source = 1.0*(-mi1.betaX + mi2.betaX)*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2)/h + 1.0*(-mi1.betaY + mi2.betaY)*(rho*u0**2*vy*(ie + p/rho + 1.0) + mi.betaY*p/mi.alpha**2)/h + 0.5*(-mi1.gamDDxx + mi2.gamDDxx)*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0))/h + 1.0*(-mi1.gamDDxy + mi2.gamDDxy)*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0))/h + 0.5*(-mi1.gamDDyy + mi2.gamDDyy)*(p*((mi.gamDDxx*mi.gamDDzz - mi.gamDDxz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaY**2/mi.alpha**2) + rho*u0**2*vy**2*(ie + p/rho + 1.0))/h + 0.5*(rho*u0**2*(ie + p/rho + 1.0) - p/mi.alpha**2)*(mi1.alpha**2 - mi1.betaX**2*mi1.gamDDxx - 2*mi1.betaX*mi1.betaY*mi1.gamDDxy - 2*mi1.betaX*mi1.betaZ*mi1.gamDDxz - mi1.betaY**2*mi1.gamDDyy - 2*mi1.betaY*mi1.betaZ*mi1.gamDDyz - mi1.betaZ**2*mi1.gamDDzz - mi2.alpha**2 + mi2.betaX**2*mi2.gamDDxx + 2*mi2.betaX*mi2.betaY*mi2.gamDDxy + 2*mi2.betaX*mi2.betaZ*mi2.gamDDxz + mi2.betaY**2*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaZ**2*mi2.gamDDzz)/h,
* dgmunu = -mi2.alpha**2 + mi2.betaX**2*mi2.gamDDxx + 2*mi2.betaX*mi2.betaY*mi2.gamDDxy + 2*mi2.betaX*mi2.betaZ*mi2.gamDDxz + mi2.betaY**2*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaZ**2*mi2.gamDDzz]"
*/
{
const double tmp0 = pow(mi.alpha, -2);
const double tmp1 = p*tmp0;
const double tmp2 = rho*pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp3 = tmp2*vx;
const double tmp4 = 1.0/h;
const double tmp5 = 1.0*tmp4;
const double tmp6 = 1.0*tmp0;
const double tmp7 = mi.gamDDxz*mi.gamDDyz;
const double tmp8 = mi.gamDDyy*mi.gamDDzz;
const double tmp9 = pow(mi.gamDDyz, 2);
const double tmp10 = pow(mi.gamDDxz, 2);
const double tmp11 = 1.0/(mi.gamDDxx*tmp8 - mi.gamDDxx*tmp9 - pow(mi.gamDDxy, 2)*mi.gamDDzz + 2*mi.gamDDxy*tmp7 - mi.gamDDyy*tmp10);
const double tmp12 = 0.5*tmp4;
const double tmp13 = 2*mi1.betaX;
const double tmp14 = 2*mi2.betaX;
const double tmp15 = -pow(mi2.alpha, 2) + pow(mi2.betaX, 2)*mi2.gamDDxx + pow(mi2.betaY, 2)*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaY*mi2.gamDDxy*tmp14 + pow(mi2.betaZ, 2)*mi2.gamDDzz + mi2.betaZ*mi2.gamDDxz*tmp14;
source = tmp12*(-mi1.gamDDxx + mi2.gamDDxx)*(p*(-pow(mi.betaX, 2)*tmp6 + tmp11*(tmp8 - tmp9)) + tmp2*pow(vx, 2)) + tmp12*(-mi1.gamDDyy + mi2.gamDDyy)*(p*(-pow(mi.betaY, 2)*tmp6 + tmp11*(mi.gamDDxx*mi.gamDDzz - tmp10)) + tmp2*pow(vy, 2)) + tmp12*(-tmp1 + tmp2)*(pow(mi1.alpha, 2) - pow(mi1.betaX, 2)*mi1.gamDDxx - pow(mi1.betaY, 2)*mi1.gamDDyy - 2*mi1.betaY*mi1.betaZ*mi1.gamDDyz - mi1.betaY*mi1.gamDDxy*tmp13 - pow(mi1.betaZ, 2)*mi1.gamDDzz - mi1.betaZ*mi1.gamDDxz*tmp13 + tmp15) + tmp5*(-mi1.betaX + mi2.betaX)*(mi.betaX*tmp1 + tmp3) + tmp5*(-mi1.betaY + mi2.betaY)*(mi.betaY*tmp1 + tmp2*vy) + tmp5*(-mi1.gamDDxy + mi2.gamDDxy)*(p*(-mi.betaX*mi.betaY*tmp6 + tmp11*(-mi.gamDDxy*mi.gamDDzz + tmp7)) + tmp3*vy);
dgmunu = tmp15;
}
###Markdown
Step 5: Rotation$$\label{rotation}$$One of the key ideas behind the moving-mesh idea is that we must rotate the vector to the appropriate direction such that the Riemann solve is along the x-direction. This is done by computing the normal vector also the "x-direction" and two normal vectors orthogonal to it. In MANGA they are labeled $n_0$, $n_1$, and $n_2$. And the rotation matrix looks like\begin{equation}R = \begin{pmatrix}n_{0,x} & n_{0,y} & n_{0,z} \\n_{1,x} & n_{1,y} & n_{1,z} \\n_{2,x} & n_{2,y} & n_{2,z} \end{pmatrix} \end{equation}Likewise, I also define an inverse Rotation as \begin{equation}R^{-1} = \begin{pmatrix}n'_{0,x} & n'_{0,y} & n'_{0,z} \\n'_{1,x} & n'_{1,y} & n'_{1,z} \\n'_{2,x} & n'_{2,y} & n'_{2,z} \end{pmatrix} \end{equation}Base on this how does $g_{\mu\nu}$ and $T_{\mu\nu}$ transform under rotation. We begin by defining an extended rotation matrix:\begin{equation}\mathcal{R} = \begin{pmatrix}1 &0 &0 &0 \\0 &n_{0,x} & n_{0,y} & n_{0,z} \\0 &n_{1,x} & n_{1,y} & n_{1,z} \\0 & n_{2,x} & n_{2,y} & n_{2,z} \end{pmatrix} \end{equation}Now we note that the term $g_{\mu\nu} x^{\mu} x^{\nu}$ is invariant under rotation. Defining $x' = R x$, we then note\begin{equation}g_{\mu\nu} x^{\mu} x^{\nu} = g_{\mu\nu} \mathcal{R}^{-1\mu}_{\alpha}x'^{\alpha} \mathcal{R}^{-1\nu}_{\beta}x'^{\beta} = g_{\alpha\beta} \mathcal{R}^{-1\alpha}_{\mu}\mathcal{R}^{-1\beta}_{\nu}x'^{\mu} x'^{\nu} \rightarrow g'_{\mu\nu} = g_{\alpha\beta} \mathcal{R}^{-1\alpha}_{\mu}\mathcal{R}^{-1\beta}_{\nu},\end{equation}which gives us the appropriate rotated metric. To get the similar transformation for $T^{\mu\nu}$, we note the $T = T^{\mu}_{\mu} = g_{\mu\nu}T^{\mu\nu}$ tranforms as a scalar. So we have\begin{equation}T = g_{\mu\nu}T^{\mu\nu} = g_{\alpha\beta}\mathcal{R}^{-1,\alpha}_{\gamma}\mathcal{R}^{-1,\beta}_{\delta}\mathcal{R}^{\gamma}_{\mu}\mathcal{R}^{\delta}_{\nu} T^{\mu\nu} = \rightarrow g'_{\mu\nu} T'^{\mu\nu}\end{equation}which provide the identity\begin{equation}T'^{\mu\nu} = \mathcal{R}^{\mu}_{\alpha}\mathcal{R}^{\nu}_{\beta} T^{\alpha\beta}\end{equation}
###Code
RotUD = ixp.declarerank2("RotUD", "nosym", DIM=4)
RotInvUD = ixp.declarerank2("RotInvUD", "nosym", DIM=4)
#declare normal primed vectors for rotation
n00, n01, n02 = sp.symbols("n0[0] n0[1] n0[2]")
n10, n11, n12 = sp.symbols("n1[0] n1[1] n1[2]")
n20, n21, n22 = sp.symbols("n2[0] n2[1] n2[2]")
#declare normal primed vectors for inverse rotation
n0p0, n0p1, n0p2 = sp.symbols("n0p[0] n0p[1] n0p[2]")
n1p0, n1p1, n1p2 = sp.symbols("n1p[0] n1p[1] n1p[2]")
n2p0, n2p1, n2p2 = sp.symbols("n2p[0] n2p[1] n2p[2]")
for i in range(4):
RotUD[0][i] = 0.
RotUD[i][0] = 0.
RotInvUD[0][i] = 0.
RotInvUD[i][0] = 0.
RotUD[0][0] = 1.
RotInvUD[0][0] = 1.
RotUD[1][1] = n00
RotUD[1][2] = n01
RotUD[1][3] = n02
RotUD[2][1] = n10
RotUD[2][2] = n11
RotUD[2][3] = n12
RotUD[3][1] = n20
RotUD[3][2] = n21
RotUD[3][3] = n22
RotInvUD[1][1] = n0p0
RotInvUD[1][2] = n0p1
RotInvUD[1][3] = n0p2
RotInvUD[2][1] = n1p0
RotInvUD[2][2] = n1p1
RotInvUD[2][3] = n1p2
RotInvUD[3][1] = n2p0
RotInvUD[3][2] = n2p1
RotInvUD[3][3] = n2p2
gmunuRotDD = ixp.declarerank2("gmunuRotDD", "sym01", DIM=4)
for i in range(4) :
for j in range(4) :
gmunuRotDD[i][j] = 0.
for k in range(4) :
for l in range(4) :
gmunuRotDD[i][j] += gmunuDD[l][k]*RotInvUD[l][i]*RotInvUD[k][j]
outputC([gmunuRotDD[1][1], gmunuRotDD[1][2], gmunuRotDD[1][3],gmunuRotDD[2][2], gmunuRotDD[2][3], gmunuRotDD[3][3]], ["metricInfo.gamDDxx", "metricInfo.gamDDxy","metricInfo.gamDDxz","metricInfo.gamDDyy","metricInfo.gamDDyz","metricInfo.gamDDzz"], filename="NRPY+rotateMetric.h")
# provie
TmunuRotUU = ixp.declarerank2("TmunuRotUU", "sym01", DIM=4)
for i in range(4) :
for j in range(4) :
TmunuRotUU[i][j] = 0.
for k in range(4) :
for l in range(4) :
TmunuRotUU[i][j] += TmunuUU[l][k]*RotUD[i][l]*RotUD[j][k]
str = outputC([TmunuRotUU[0][0], TmunuRotUU[0][1], TmunuRotUU[1][0]], ["Tmunu00", "Tmunu12", "Tmunu21"], filename="returnstring")
print(str)
###Output
Wrote to file "NRPY+rotateMetric.h"
/*
* Original SymPy expressions:
* "[Tmunu00 = 1.0*rho*u0**2*(ie + p/rho + 1.0) - 1.0*p/mi.alpha**2,
* Tmunu12 = n0[0]*(1.0*rho*u0**2*vx*(ie + p/rho + 1.0) + 1.0*mi.betaX*p/mi.alpha**2) + n0[1]*(1.0*rho*u0**2*vy*(ie + p/rho + 1.0) + 1.0*mi.betaY*p/mi.alpha**2) + n0[2]*(1.0*rho*u0**2*vz*(ie + p/rho + 1.0) + 1.0*mi.betaZ*p/mi.alpha**2),
* Tmunu21 = 1.0*n0[0]*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + 1.0*n0[1]*(rho*u0**2*vy*(ie + p/rho + 1.0) + mi.betaY*p/mi.alpha**2) + 1.0*n0[2]*(rho*u0**2*vz*(ie + p/rho + 1.0) + mi.betaZ*p/mi.alpha**2)]"
*/
{
const double tmp0 = p/pow(mi.alpha, 2);
const double tmp1 = 1.0*tmp0;
const double tmp2 = rho*pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp3 = 1.0*tmp2;
Tmunu00 = -tmp1 + tmp3;
Tmunu12 = n0[0]*(mi.betaX*tmp1 + tmp3*vx) + n0[1]*(mi.betaY*tmp1 + tmp3*vy) + n0[2]*(mi.betaZ*tmp1 + tmp3*vz);
Tmunu21 = 1.0*n0[0]*(mi.betaX*tmp0 + tmp2*vx) + 1.0*n0[1]*(mi.betaY*tmp0 + tmp2*vy) + 1.0*n0[2]*(mi.betaZ*tmp0 + tmp2*vz);
}
###Markdown
Step 6: Conservative to Primitive Solver$$\label{solver}$$We now discuss the reverse mapping from conservative to primitive variables.Given the lapse, shift vector and $\rtgamma$, the mapping between primitive and conserved variable is straightforward. However, the reverse is not as simple. In GRMHD, the conservative to primitive solver is amplified by the inclusion of the magnetic field, leading to rather sophisticated root finding strategies. The failure rates of these algorithms are low (??), but since this algorithm may be executed several times per timestep for every gridpoint, even a low failure can give unacceptable collective failure rates. However, for purely polytropic equations of state, e.g., $P\propto\rho^{\Gamma_1}$, the convervative to primitive variable solver is greatly simplified. To construct the conservative-to-primitive variable solver, we restrict ourselves to polytropic equations of states\begin{equation}P = P_0\left(\frac{\rho}{\rho_0}\right)^{\Gamma_1} \quad\textrm{and}\quad \epsilon = \epsilon_0\left(\frac{\rho}{\rho_0}\right)^{\Gamma_1-1},\end{equation}where $P_0$, $\rho_0$, and $\epsilon_0$ are the fiducial pressure, density, and internal energy, and we have used the relation $P = (\Gamma_1 - 1)\rho\epsilon$. For such a polytropic equation of state, the energy equation is redundant and effectively we are only concerned with the continuity and momentum equations. The conservative variables of concern are $\rhostar$ and $\Svectilde$. Noting that the shift, $\alpha$, and $\rtgamma$ are provided by the Einsteins field equation solver, we can write\begin{equation}u^0 = \frac{\rhostar}{\alpha\rtgamma\rho} = u^0(\rho) \quad\textrm{and}\quad \uvec = \frac{\Svectilde}{\alpha\rtgamma\rho h} = \uvec(\rho).\end{equation}Noting that the four velocity $u^2 = g_{\mu\nu}u^{\mu}u^{\nu} = g^{00}u^0u^0 + 2g^{0i}u^0\uvec^i + g_{ij}\uvec^i\uvec^j = -1$, we have\begin{equation} 0 = f(\rho)\equiv \alpha^2\gamma\rho^2h^2 + \left(-\lapse^2 + \shift\cdot\shift\right)\rhostar^2h^2 + 2h\rhostar\shift\cdot\Svectilde + \Svectilde\cdot\Svectilde,\end{equation}which is an implicit equation of either $\rho$ or $u^0$, where $h(\rho = \rhostar/(\alpha\rtgamma u^0)) = 1 + \gamma_1 \epsilon$ which can be inverted by standard nonlinear root finding algorithms, e.g., Newton-raphson. We put this all together to define a function, $f(\rho)$, whose root is zero that we will find via Newton-raphson. Several checks must be performed:1. $\rhostar > 0$ : This check is performed at the very beginning2. $\rho > \rho_{\rm min}$ : This check is performed after the fact3. $u_0 < \alpha^{-1}\Gamma_{\rm max}$ : This check is performed after the fact as well
###Code
DIM = 3
# Declare rank-1 contravariant ("v") vector
vU = ixp.declarerank1("vU")
shiftU = ixp.zerorank1()
rho, gamma1 = sp.symbols("rho gamma")
Sx, Sy, Sz = sp.symbols("con[iSx] con[iSy] con[iSz]")
p0, rho0, rhostar = sp.symbols("p_0 rho_0 rhostar")
# Declare rank-2 covariant gmunu
#gammaDD = ixp.declarerank2("gammaDD","sym01")
StildeD = ixp.declarerank1("StildeD")
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi.alpha mi.rtDetGamma mi.betaX mi.betaY mi.betaZ")
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
StildeD[0] = Sx
StildeD[1] = Sy
StildeD[2] = Sz
gamma = rtgamma*rtgamma
lapse2 = lapse*lapse
uU0 = rhostar/(lapse*rtgamma*rho)
epsilon = p0/rho0*(rho/rho0)**(gamma1 - 1)/(gamma1 - 1)
h = 1. + gamma1*epsilon
beta2 = 0.
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDD[i][j] * shiftU[i]*shiftU[j]
betaDotStilde = 0
for i in range(DIM) :
betaDotStilde += shiftU[i]*StildeD[i]
Stilde2 = 0
for i in range(DIM) :
for j in range(DIM) :
Stilde2 += gammaUU[i][j] * StildeD[i]*StildeD[j]
f = rhostar**2*h**2 + (-lapse2 + beta2)*rhostar**2.*h**2.*uU0**2 + 2.*h*rhostar*betaDotStilde*uU0 + Stilde2
outputC(f,"rootRho",filename="NRPY+rhoRoot.h")
outputC(Stilde2, "Stilde2", filename="NRPY+Stilde2.h")
###Output
Wrote to file "NRPY+rhoRoot.h"
Wrote to file "NRPY+Stilde2.h"
###Markdown
The root solve above finds $\rho$, which then allows us to get \begin{equation}u^0 = \frac{\rhostar}{\alpha\rtgamma\rho}\quad\textrm{and}\quad \vel = \frac{\uvec}{u^0} = \frac{\Svectilde}{\rhostar h(\rho)}.\end{equation}and thus we can find the rest of the primitives.
###Code
#rhostar = sp.symbols("rhostar")
#StildeU = ixp.declarerank1("StildeU")
velU = ixp.zerorank1()
#lapse, rtgamma, rho, gamma1, c = sp.symbols("lapse rtgamma rho gamma1 c")
rho, rhostar = sp.symbols("testPrim[iRho] con[iRhoStar]")
u0 = rhostar/(lapse*rtgamma*rho)
epsilon = p0/rho0*(rho/rho0)**(gamma1 - 1)/(gamma1 - 1)
h = 1. + gamma1*epsilon
for i in range(DIM) :
for j in range(DIM) :
velU[i] += gammaUU[i][j]*StildeD[j]/(rhostar * h)/u0
outputC([h,u0,velU[0],velU[1],velU[2]], ["h", "u0","testPrim[ivx]", "testPrim[ivy]", "testPrim[ivz]"],filename="NRPY+getv.h")
###Output
Wrote to file "NRPY+getv.h"
###Markdown
Step 7: Lorentz Boosts$$\label{lorentz}$$We need to boost to the frame of the moving face. The boost is\begin{equation}B(\beta) =\begin{pmatrix}\gamma & -\beta\gamma n_x & -\beta\gamma n_y & -\beta\gamma n_z \\-\beta\gamma n_x & 1 + (\gamma-1)n_x^2 & (\gamma-1)n_x n_y & (\gamma-1)n_x n_z\\-\beta\gamma n_x & (\gamma-1)n_y n_x & 1 + (\gamma-1)n_y^2 & (\gamma-1)n_y n_z\\-\beta\gamma n_x & (\gamma-1) n_z n_x & (\gamma-1)n_z n_x & 1 + (\gamma-1)n_z^2 \end{pmatrix} \end{equation}And the boost is $X' = B(\beta) X$, where $X'$ and $X$ are four vectors.So the rest of this is straightforward. Step 8: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-GRMHD_Equations-Cartesian.ipynb
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-GRMHD_Equations-Cartesian.ipynb to latex
[NbConvertApp] Writing 103119 bytes to Tutorial-GRMHD_Equations-Cartesian.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Equations of General Relativistic Magnetohydrodynamics (GRMHD) Author: Zach Etienne This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`**Notebook Status:** Self-Validated; induction equation not yet implemented **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** IntroductionWe write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):\begin{eqnarray}\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right) = 0,\end{eqnarray}where $$s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].$$We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where * $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and * $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,* The GRMHD conservative variables: * $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)` * $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)` * $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`* The GRMHD fluxes: * $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)` * $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)` * $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`* GRMHD source terms: * $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)` * $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:* Spacetime quantities: * ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$* Hydrodynamical quantities: * Rest-mass density $\rho_0$ * Pressure $P$ * Internal energy $\epsilon$ * 4-velocity $u^\mu$* Electrodynamical quantities * Magnetic field $B^i= \tilde{B}^i / \gamma$ A Note on NotationAs is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:```pythonT4EMUU = ixp.zerorank2(DIM=4)for mu in range(4): for nu in range(4): Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]```When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:```pythonbetaD = ixp.zerorank1(DIM=3)for i in range(3): for j in range(3): betaD[i] += gammaDD[i][j] * betaU[j]```As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:```python \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2for i in range(3): for mu in range(4): for nu in range(4): S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2``` Table of Contents$$\label{toc}$$Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows1. [Step 1](importmodules): Import needed NRPy+ & Python modules1. [Step 2](stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$: * **compute_T4UU()**, **compute_T4UD()**: 1. [Step 3](declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.1. [Step 4](code_validation): Code Validation against `GRMHD.equations` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Import needed NRPy+ & Python modules \[Back to [top](toc)\]$$\label{importmodules}$$
###Code
# Step 1: Import needed core NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
###Output
_____no_output_____
###Markdown
Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](toc)\]$$\label{stressenergy}$$Recall from above that$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) functionSince a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,$$T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},$$where* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
###Code
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
###Output
_____no_output_____
###Markdown
Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](toc)\]$$\label{declarevarsconstructgrhdeqs}$$
###Code
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
###Output
_____no_output_____
###Markdown
Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation}$$As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in1. this tutorial versus2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
###Code
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2 is None:
return basename+"["+str(idx1)+"]"
if idx3 is None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
import sys
if all_passed:
print("ALL TESTS PASSED!")
else:
print("ERROR: AT LEAST ONE TEST DID NOT PASS")
sys.exit(1)
###Output
ALL TESTS PASSED!
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GRMHD_Equations-Cartesian")
###Output
Created Tutorial-GRMHD_Equations-Cartesian.tex, and compiled LaTeX file to
PDF file Tutorial-GRMHD_Equations-Cartesian.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Equations of General Relativistic Magnetohydrodynamics (GRMHD) Author: Zach Etienne This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`**Notebook Status:** Self-Validated; induction equation not yet implemented **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** IntroductionWe write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):\begin{eqnarray}\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right) = 0,\end{eqnarray}where $$s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].$$We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where * $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and * $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,* The GRMHD conservative variables: * $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)` * $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)` * $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`* The GRMHD fluxes: * $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)` * $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)` * $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`* GRMHD source terms: * $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)` * $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:* Spacetime quantities: * ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$* Hydrodynamical quantities: * Rest-mass density $\rho_0$ * Pressure $P$ * Internal energy $\epsilon$ * 4-velocity $u^\mu$* Electrodynamical quantities * Magnetic field $B^i= \tilde{B}^i / \gamma$ A Note on NotationAs is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:```pythonT4EMUU = ixp.zerorank2(DIM=4)for mu in range(4): for nu in range(4): Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]```When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:```pythonbetaD = ixp.zerorank1(DIM=3)for i in range(3): for j in range(3): betaD[i] += gammaDD[i][j] * betaU[j]```As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:```python \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2for i in range(3): for mu in range(4): for nu in range(4): S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2``` Table of Contents$$\label{toc}$$Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows1. [Step 1](importmodules): Import needed NRPy+ & Python modules1. [Step 2](stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$: * **compute_T4UU()**, **compute_T4UD()**: 1. [Step 3](declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.1. [Step 4](code_validation): Code Validation against `GRMHD.equations` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Import needed NRPy+ & Python modules \[Back to [top](toc)\]$$\label{importmodules}$$
###Code
# Step 1: Import needed core NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
###Output
_____no_output_____
###Markdown
Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](toc)\]$$\label{stressenergy}$$Recall from above that$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) functionSince a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,$$T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},$$where* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
###Code
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
###Output
_____no_output_____
###Markdown
Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](toc)\]$$\label{declarevarsconstructgrhdeqs}$$
###Code
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
###Output
_____no_output_____
###Markdown
Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation}$$As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in1. this tutorial versus2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
###Code
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2 is None:
return basename+"["+str(idx1)+"]"
if idx3 is None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
import sys
if all_passed:
print("ALL TESTS PASSED!")
else:
print("ERROR: AT LEAST ONE TEST DID NOT PASS")
sys.exit(1)
###Output
ALL TESTS PASSED!
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GRMHD_Equations-Cartesian")
###Output
Created Tutorial-GRMHD_Equations-Cartesian.tex, and compiled LaTeX file to
PDF file Tutorial-GRMHD_Equations-Cartesian.pdf
###Markdown
GR HD Equations$\newcommand{\be}{\begin{equation}}$$\newcommand{\ee}{\end{equation}}$$\newcommand{\grad}{{\boldsymbol{\nabla}}}$$\newcommand{\vel}{{\boldsymbol{v}}}$$\newcommand{\mom}{{\boldsymbol{p}}}$$\newcommand{\ddt}[1]{{\frac{\partial 1}{\partial t}}}$$\newcommand{\ddx}[1]{{\frac{\partial 1}{\partial x}}}$$\newcommand{\state}{{\boldsymbol{\mathcal{U}}}}$$\newcommand{\charge}{{\boldsymbol{U}}}$$\newcommand{\psicharge}{{\boldsymbol{\psi}}}$$\newcommand{\lapse}{\alpha}$$\newcommand{\shift}{\boldsymbol{\beta}}$$\newcommand{\rhostar}{{\rho_*}}$$\newcommand{\tautilde}{{\tilde{\tau}}}$$\newcommand{\Svectilde}{{\tilde{\boldsymbol{S}}}}$$\newcommand{\rtgamma}{{\sqrt{\gamma}}}$$\newcommand{\T}[2]{{T^{1 2}}}$$\newcommand{\uvec}{{\boldsymbol{u}}}$$\newcommand{\Vvec}{{\boldsymbol{\mathcal{V}}}}$$\newcommand{\vfluid}{{\boldsymbol{v}_{\rm f}}}$$\newcommand{\vVal}{{\tilde{\boldsymbol{v}}}}$$\newcommand{\flux}{{\boldsymbol{\mathcal{F}}}}$$\newcommand{\fluxV}{{\boldsymbol{F}}}$$\newcommand{\source}{{\boldsymbol{\mathcal{S}}}}$$\newcommand{\sourceV}{{\boldsymbol{S}}}$$\newcommand{\area}{{\boldsymbol{A}}}$$\newcommand{\normal}{{\hat{\boldsymbol{n}}}}$$\newcommand{\pt}{{\boldsymbol{p}}}$$\newcommand{\nb}{{\boldsymbol{n}}}$$\newcommand{\meshv}{{\boldsymbol{w}}}$$\newcommand{\facev}{{\boldsymbol{\tilde{w}}_{ij}}}$$\newcommand{\facer}{{\boldsymbol{\tilde{r}}_{ij}}}$$\newcommand{\meshr}{{\boldsymbol{r}}}$$\newcommand{\cmr}{{\boldsymbol{c}}}$We start out with the ** GRHD ** equations in conservative form with the state vector $\state=(\rhostar, \Svectilde, \tautilde)$:\begin{equation}\ddt{\state} + \grad\cdot\flux = \source,\end{equation}where $\rhostar = \lapse\rho\rtgamma u^0$, $\Svectilde = \rhostar h \uvec$, $\tautilde = \lapse^2\rtgamma \T00 - \rhostar$. The associated set of primitive variables are $(\rho, \vel, \epsilon)$, which are the rest mass density, fluid 3-velocity, and internal energy (measured in the rest frame). The flux, $\flux$ is given by\begin{equation} \flux=(\rhostar \vel, \lapse\rtgamma\T{j}{\beta}g_{\beta i}, \lapse^2\rtgamma\T0j - \rhostar\vel\end{equation}where $\vel$ is the 3-velocity, and $\source = (0, \frac 1 2 \lapse\rtgamma \T{\lapse}{\beta}g_{\lapse\beta,i}, s)$ is the source function, and\begin{equation}s = \lapse\rtgamma\left[\left(\T00\beta^i\beta^j + 2\T0i\beta^j\right)K_{ij} - \left(\T00\beta^i + \T0i\right)\partial_i\lapse\right]\end{equation}The stress energy tensor for a perfect fluid is written as \begin{equation}\T{\mu}{\nu} = \rho h u^{\mu} u^{\nu} + P g^{\mu\nu},\end{equation}where $h = 1 + \epsilon + P/\rho$ is the specific enthalpy and $u^{\mu}$ are the respective components of the four velocity. Noting that the mass $\flux$ is defined in terms of $\rhostar$ and $\vel$, we need to first find a mapping between $\vel$ and $u$. Alternative formulationThe Athena++ folks have an alternative formulations that might be superior. Begin with the continuity equation\begin{equation}\grad_{\mu}\rho u^{\mu} = 0,\end{equation}where $\grad$ is the covariant derivative. This can be mapped directly to \begin{equation}\partial_{0} \sqrt{-g}\rho u^0 + \partial_i\sqrt{-g} \rho u^0 v^i = 0 \end{equation}which we can identify with $\rhostar = \alpha\rtgamma \rho u^0$ because $\sqrt{-g} = \alpha\rtgamma$.Now the second equation is conservation of energy-momentum which we write as\begin{equation}\grad_{\nu}T^{\nu}_{\mu} = 0 \end{equation}writing this out we have \begin{equation}\partial_0 g_{\mu\alpha}T^{\alpha 0} + \partial_i g_{\mu\alpha}T^{\alpha i} - \Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} = 0 \end{equation}Noting that\begin{equation}\Gamma^{\alpha}_{\beta\gamma} = \frac 1 2 g^{\alpha\delta}\left(\partial_{\gamma}g_{\beta\delta} + \partial_{\beta}g_{\gamma\delta} - \partial_{\delta}g_{\beta\gamma}\right)\end{equation}Writing this all out, we note the last term is\begin{equation}\Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} =\frac 1 2 g^{\gamma\delta}\left(\partial_{\alpha}g_{\mu\delta} + \partial_{\mu}g_{\alpha \delta} - \partial_{\delta}g_{\mu \alpha}\right) T_{\gamma}^{\alpha} = \frac 1 2 \left(\partial_{\alpha}g_{\mu\delta} + \partial_{\mu}g_{\alpha \delta} - \partial_{\delta}g_{\mu \alpha}\right)T^{\alpha\delta}\end{equation}We sum over $\alpha$ and $\delta$, but noting that we are antisymmetric in first and last terms in $\alpha$ and $\delta$ in the () but symmetric in $T_{\alpha\delta}$ so we have\begin{equation}\Gamma_{\mu\alpha}^{\gamma} g_{\gamma\beta}T^{\alpha\beta} = \frac 1 2 \partial_{\mu}g_{\alpha \delta} T^{\alpha\delta}\end{equation}Thus we have \begin{equation}\partial_0 T^{0}_{\mu} + \partial_i T^{i}_{\mu} = \frac 1 2 \partial_{\mu}g_{\alpha \delta} T^{\alpha\delta}\end{equation}The $\mu = (1,2,3)$, we almost get back the equations in the standard formulation\begin{equation}\partial_0 \rho h u^0 u_i + \partial_j T^j_i = \frac 1 2 \partial_{i}g_{\alpha \delta} T^{\alpha\delta},\end{equation}which modulo a factors of $\lapse\rtgamma$ in front is the same as the "standard" equations.The $T^0_0$ term is more interesting. Here we have\begin{equation}\partial_0 (\rho h u^0 u_0 + + \partial_j T^j_i = \frac 1 2 \partial_{0}g_{\alpha \delta} T^{\alpha\delta},\end{equation}However the disadvantage is that we need the time derivative of the metric. Primitive to Conservative MappingWe want to make a mapping from the primitives to conserved variables:\begin{equation}(\rho, \vel, \epsilon) \rightarrow (\rhostar = \lapse\rho\rtgamma u^0, \Svectilde = \rhostar h \uvec, \tautilde = \lapse^2\rtgamma \T00 - \rhostar).\end{equation}To do so, we first need to determine $u^0$ and $\uvec$. Noting that $g_{\mu\nu} u^{\mu} u^{\nu} = -1$, we find\begin{equation}u^0 = \left(-g_{00} - 2g_{i0} v^i - g_{ij}v^iv^j\right)^{-1/2},\end{equation}where we have used $\vel = \uvec/u^0$. This gives me $\rhostar$ and $\uvec$. We note that the metric is (B&S 2.122)\begin{equation}g_{\mu\nu} = \begin{pmatrix} -\lapse^2 + \shift\cdot\shift & \beta_i \\\beta_j & \gamma_{ij}\end{pmatrix},\end{equation}Lets write some code to define metric contraction on four vectors in this context:
###Code
import NRPy_param_funcs as par
import indexedexp as ixp
import sympy as sp
from outputC import *
import NRPy_param_funcs as par
DIM = 3
# Declare rank-2 covariant gmunu
gmunuDD = ixp.declarerank2("gmunuDD","sym01",DIM=4)
gammaDD = ixp.declarerank2("gammaDD","sym01")
components = ["xx", "xy", "xz", "yy", "yz", "zz"]
names = ""
for comp in components :
names = names + "mi.gamDD{0} ".format(comp)
gxx, gxy, gxz, gyy, gyz, gzz = sp.symbols( names)
gammaDD[0][0] = gxx
gammaDD[0][1] = gxy
gammaDD[0][2] = gxz
gammaDD[1][0] = gxy
gammaDD[1][1] = gyy
gammaDD[1][2] = gyz
gammaDD[2][0] = gxz
gammaDD[2][1] = gyz
gammaDD[2][2] = gzz
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi.alpha mi.rtDetGamma mi.betaX mi.betaY mi.betaZ")
u10, u1x, u1y, u1z = sp.symbols("u1[0] u1[1] u1[2] u1[3]")
u20, u2x, u2y, u2z = sp.symbols("u2[0] u2[1] u2[2] u2[3]")
u1U = ixp.declarerank1("u1Vector", DIM=4)
u2U = ixp.declarerank1("u2Vector", DIM=4)
u1U[0] = u10
u1U[1] = u1x
u1U[2] = u1y
u1U[3] = u1z
u2U[0] = u20
u2U[1] = u2x
u2U[2] = u2y
u2U[3] = u2z
shiftU = ixp.declarerank1("shiftU")
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
beta2 = 0
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDD[i][j] * shiftU[i]*shiftU[j]
gmunuDD[0][0] = -lapse*lapse + beta2
for i in range(DIM) :
gmunuDD[i+1][0] = shiftU[i]
gmunuDD[0][i+1] = shiftU[i]
for j in range(DIM) :
gmunuDD[i+1][j+1] = gammaDD[i][j]
dot4Product = 0
for i in range(4):
for j in range(4):
dot4Product += gmunuDD[i][j]*u1U[i]*u2U[j]
str = outputC( dot4Product, "dotProduct", filename="returnstring")
print(str)
###Output
/*
* Original SymPy expression:
* "dotProduct = mi.betaX*u1[0]*u2[1] + mi.betaX*u1[1]*u2[0] + mi.betaY*u1[0]*u2[2] + mi.betaY*u1[2]*u2[0] + mi.betaZ*u1[0]*u2[3] + mi.betaZ*u1[3]*u2[0] + mi.gamDDxx*u1[1]*u2[1] + mi.gamDDxy*u1[1]*u2[2] + mi.gamDDxy*u1[2]*u2[1] + mi.gamDDxz*u1[1]*u2[3] + mi.gamDDxz*u1[3]*u2[1] + mi.gamDDyy*u1[2]*u2[2] + mi.gamDDyz*u1[2]*u2[3] + mi.gamDDyz*u1[3]*u2[2] + mi.gamDDzz*u1[3]*u2[3] + u1[0]*u2[0]*(-mi.alpha**2 + mi.betaX**2*mi.gamDDxx + 2*mi.betaX*mi.betaY*mi.gamDDxy + 2*mi.betaX*mi.betaZ*mi.gamDDxz + mi.betaY**2*mi.gamDDyy + 2*mi.betaY*mi.betaZ*mi.gamDDyz + mi.betaZ**2*mi.gamDDzz)"
*/
{
const double tmp0 = 2*mi.betaX;
dotProduct = mi.betaX*u1[0]*u2[1] + mi.betaX*u1[1]*u2[0] + mi.betaY*u1[0]*u2[2] + mi.betaY*u1[2]*u2[0] + mi.betaZ*u1[0]*u2[3] + mi.betaZ*u1[3]*u2[0] + mi.gamDDxx*u1[1]*u2[1] + mi.gamDDxy*u1[1]*u2[2] + mi.gamDDxy*u1[2]*u2[1] + mi.gamDDxz*u1[1]*u2[3] + mi.gamDDxz*u1[3]*u2[1] + mi.gamDDyy*u1[2]*u2[2] + mi.gamDDyz*u1[2]*u2[3] + mi.gamDDyz*u1[3]*u2[2] + mi.gamDDzz*u1[3]*u2[3] + u1[0]*u2[0]*(-pow(mi.alpha, 2) + pow(mi.betaX, 2)*mi.gamDDxx + pow(mi.betaY, 2)*mi.gamDDyy + 2*mi.betaY*mi.betaZ*mi.gamDDyz + mi.betaY*mi.gamDDxy*tmp0 + pow(mi.betaZ, 2)*mi.gamDDzz + mi.betaZ*mi.gamDDxz*tmp0);
}
###Markdown
which then gives \begin{equation}u^0 = \left(\lapse^2 - \shift\cdot\shift - 2\shift\cdot\vel - \gamma_{ij}v^iv^j\right)^{-1/2},\end{equation}The other thing is $\uvec = u^0\vel$. So then we can proceed and spit out the conservative variables: $\rhostar, \Svectilde$.To get $\tau$, we note that we have defined the metric as the covariant form, e.g., lower indices. The upper form of $g^{\mu\nu}$ is found in B&S 2.119 and is given by\begin{equation}g^{\mu\nu} = \begin{pmatrix}-\lapse^{-2} & \lapse^{-2}\beta^i \\\lapse^{-2}\beta^j & \gamma^{ij} - \lapse^{-2} \beta^i\beta^j\end{pmatrix}\end{equation}Lets get the form of this in code:The main challenge is the calculation of the inverse of the 3x3 matrix $\gamma_{ij}$. To do so we note:
###Code
import indexedexp as ixp
gammaUU, gammabarDet = ixp.symm_matrix_inverter3x3(gammaDD)
gUUxx = gammaUU[0][0]
gUUxy = gammaUU[0][1]
gUUxz = gammaUU[0][2]
gUUyy = gammaUU[1][1]
gUUyz = gammaUU[1][2]
gUUzz = gammaUU[2][2]
rtDetGamma = sp.sqrt(gammabarDet)
outputC( [gUUxx,gUUxy,gUUxz,gUUyy,gUUyz,gUUzz, rtDetGamma], ["mi.gamUUxx", "mi.gamUUxy","mi.gamUUxz","mi.gamUUyy","mi.gamUUyz","mi.gamUUzz","mi.rtDetGamma"], filename="NRPY+gmunuUU_and_det.h")
#print str
###Output
Wrote to file "NRPY+gmunuUU_and_det.h"
###Markdown
Zach step: Compute $u^0$ from the Valencia 3-velocityAccording to Eqs. 9-11 of [the IllinoisGRMHD paper](https://arxiv.org/pdf/1501.07276.pdf), the Valencia 3-velocity $v^i_{(n)}$ is related to the 4-velocity $u^\mu$ via\begin{align}\alpha v^i_{(n)} &= \frac{u^i}{u^0} + \beta^i \\\implies u^i &= u^0 \left(\alpha v^i_{(n)} - \beta^i\right)\end{align}Defining $v^i = \frac{u^i}{u^0}$, we get$$v^i = \alpha v^i_{(n)} - \beta^i,$$Or in other words in terms of the 3 velocity$$v^i_{(n)} = \alpha^{-1}\left(v^i + \beta^i\right)$$ and in terms of this variable we get\begin{align}g_{00} \left(u^0\right)^2 + 2 g_{0i} u^0 u^i + g_{ij} u^i u^j &= \left(u^0\right)^2 \left(g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j\right)\\\implies u^0 &= \pm \sqrt{\frac{-1}{g_{00} + 2 g_{0i} v^i + g_{ij} v^i v^j}} \\&= \pm \sqrt{\frac{-1}{(-\alpha^2 + \beta^2) + 2 \beta_i v^i + \gamma_{ij} v^i v^j}} \\&= \pm \sqrt{\frac{1}{\alpha^2 - \gamma_{ij}\left(\beta^i + v^i\right)\left(\beta^j + v^j\right)}}\\&= \pm \sqrt{\frac{1}{\alpha^2 - \alpha^2 \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\&= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\end{align}Generally speaking, numerical errors will occasionally drive expressions under the radical to either negative values or potentially enormous values (corresponding to enormous Lorentz factors). Thus a reliable approach for computing $u^0$ requires that we first rewrite the above expression in terms of the Lorentz factor squared: $\Gamma^2=\left(\alpha u^0\right)^2$:\begin{align}u^0 &= \pm \frac{1}{\alpha}\sqrt{\frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}}}\\\implies \left(\alpha u^0\right)^2 &= \frac{1}{1 - \gamma_{ij}v^i_{(n)}v^j_{(n)}} \\\implies \gamma_{ij}v^i_{(n)}v^j_{(n)} &= 1 - \frac{1}{\left(\alpha u^0\right)^2}\end{align}In order for the bottom expression to hold true, the left-hand side must be between 0 and 1. Again, this is not guaranteed due to the appearance of numerical errors. In fact, a robust algorithm will not allow $\Gamma^2$ to become too large (which might contribute greatly to the stress-energy of a given gridpoint), so let's define $\Gamma_{\rm max}$, the largest allowed Lorentz factor. Then our algorithm for computing $u^0$ is as follows:If$$R=\gamma_{ij}v^i_{(n)}v^j_{(n)}>1 - \frac{1}{\Gamma_{\rm max}},$$ then adjust the 3-velocity $v^i$ as follows:$$v^i_{(n)} = \sqrt{\frac{1 - \frac{1}{\Gamma_{\rm max}}}{R}}v^i_{(n)}.$$After this rescaling, we are then guaranteed that if $R$ is recomputed, it will be set to its ceiling value $R=1 - \frac{1}{\Gamma_{\rm max}}$.Then $u^0$ can be safely computed via$$u^0 = \frac{1}{\alpha \sqrt{1-R}}.$$
###Code
import sympy as sp
import NRPy_param_funcs as par
import grid as gri
import indexedexp as ixp
import reference_metric as rfm
from outputC import *
vx, vy, vz = sp.symbols( "vx vy vz")
vU = ixp.declarerank1("vU")
vU[0] = vx
vU[1] = vy
vU[2] = vz
ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU",DIM=3)
ValenciavU[0] = (vx + beta_x)/lapse
ValenciavU[1] = (vy + beta_y)/lapse
ValenciavU[2] = (vz + beta_z)/lapse
# Step 1: Compute R = 1 - 1/max(Gamma)
R = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
R += gammaDD[i][j]*ValenciavU[i]*ValenciavU[j]
GAMMA_SPEED_LIMIT = par.Cparameters("REAL","GRMHD_equations","GAMMA_SPEED_LIMIT")
Rmax = 1 - 1/GAMMA_SPEED_LIMIT
rescaledValenciavU = ixp.zerorank1()
for i in range(DIM):
rescaledValenciavU[i] = ValenciavU[i]*sp.sqrt(Rmax/R)
rescaledu0 = 1/(lapse*sp.sqrt(1-Rmax))
regularu0 = 1/(lapse*sp.sqrt(1-R))
computeu0_Cfunction = "/* Function for computing u^0 from Valencia 3-velocity. */\n"
computeu0_Cfunction += "/* Inputs: vx, vy, vz, lapse, MetricInformation, GAMMA_SPEED_LIMIT (C parameter) */\n"
computeu0_Cfunction += "/* Output: u0=u^0 */\n\n"
computeu0_Cfunction += outputC([R,Rmax],["const double R","const double Rmax"],"returnstring",
params="includebraces=False,CSE_varprefix=tmpR,outCverbose=False")
computeu0_Cfunction += "if(R <= Rmax) "
computeu0_Cfunction += outputC(regularu0,"u0","returnstring",
params="includebraces=True,CSE_varprefix=tmpnorescale,outCverbose=False")
computeu0_Cfunction += " else "
computeu0_Cfunction += outputC([rescaledu0],
["u0"],"returnstring",
params="includebraces=True,CSE_varprefix=tmprescale,outCverbose=False")
print(computeu0_Cfunction)
###Output
/* Function for computing u^0 from Valencia 3-velocity. */
/* Inputs: vx, vy, vz, lapse, MetricInformation, GAMMA_SPEED_LIMIT (C parameter) */
/* Output: u0=u^0 */
const double tmpR0 = pow(mi.alpha, -2);
const double tmpR1 = mi.betaX + vx;
const double tmpR2 = mi.betaY + vy;
const double tmpR3 = mi.betaZ + vz;
const double tmpR4 = 2*tmpR0*tmpR1;
const double R = mi.gamDDxx*tmpR0*pow(tmpR1, 2) + mi.gamDDxy*tmpR2*tmpR4 + mi.gamDDxz*tmpR3*tmpR4 + mi.gamDDyy*tmpR0*pow(tmpR2, 2) + 2*mi.gamDDyz*tmpR0*tmpR2*tmpR3 + mi.gamDDzz*tmpR0*pow(tmpR3, 2);
const double Rmax = 1 - 1/GAMMA_SPEED_LIMIT;
if(R <= Rmax) {
const double tmpnorescale0 = pow(mi.alpha, -2);
const double tmpnorescale1 = mi.betaX + vx;
const double tmpnorescale2 = mi.betaY + vy;
const double tmpnorescale3 = mi.betaZ + vz;
const double tmpnorescale4 = 2*tmpnorescale0*tmpnorescale1;
u0 = 1/(mi.alpha*sqrt(-mi.gamDDxx*tmpnorescale0*pow(tmpnorescale1, 2) - mi.gamDDxy*tmpnorescale2*tmpnorescale4 - mi.gamDDxz*tmpnorescale3*tmpnorescale4 - mi.gamDDyy*tmpnorescale0*pow(tmpnorescale2, 2) - 2*mi.gamDDyz*tmpnorescale0*tmpnorescale2*tmpnorescale3 - mi.gamDDzz*tmpnorescale0*pow(tmpnorescale3, 2) + 1));
}
else {
u0 = 1/(mi.alpha*sqrt(1.0/GAMMA_SPEED_LIMIT));
}
###Markdown
We now note that $\tau = \lapse^2\rtgamma T^{00} - \rhostar$, which gives\begin{equation}\tau = \lapse\rhostar h u^0 - P\rtgamma - \rhostar\end{equation}The code for this is
###Code
rho, epsilon, gamma1, p = sp.symbols("rho ie gamma p")
betaDotV = 0
for i in range(DIM) :
for j in range(DIM) :
betaDotV += gammaDD[i][j] * shiftU[i]*vU[j]
v2 = 0
for i in range(DIM) :
for j in range(DIM) :
v2 += gammaDD[i][j] * vU[i]*vU[j]
u0 = sp.symbols("u0")
uvec4U = ixp.zerorank1(DIM=4)
uvec4D = ixp.zerorank1(DIM=4)
#StildeU = ixp.declarerank1("StildeU")
StildeD = ixp.zerorank1()
rhostar = lapse*rtgamma*rho*u0
h = 1. + epsilon + p/rho
for i in range(1,4):
uvec4U[i] = vU[i-1]*u0
uvec4U[0] = u0
for mu in range(4) :
for nu in range(4) :
uvec4D[mu] += gmunuDD[mu][nu]*uvec4U[nu]
for i in range(DIM):
StildeD[i] = uvec4D[i+1]*rhostar*h
tau = lapse*rhostar*h*u0 - rtgamma*p - rhostar
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC([rhostar, StildeD[0], StildeD[1], StildeD[2], tau], ["con[iRhoStar]", "con[iSx]", "con[iSy]", "con[iSz]", "con[iTau]"], filename="returnstring")
print(str)
f = open("NRPY+prim2Con.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[con[iRhoStar] = mi.alpha*mi.rtDetGamma*rho*u0,
* con[iSx] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaX*u0 + mi.gamDDxx*u0*vx + mi.gamDDxy*u0*vy + mi.gamDDxz*u0*vz),
* con[iSy] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaY*u0 + mi.gamDDxy*u0*vx + mi.gamDDyy*u0*vy + mi.gamDDyz*u0*vz),
* con[iSz] = mi.alpha*mi.rtDetGamma*rho*u0*(ie + p/rho + 1.0)*(mi.betaZ*u0 + mi.gamDDxz*u0*vx + mi.gamDDyz*u0*vy + mi.gamDDzz*u0*vz),
* con[iTau] = mi.alpha**2*mi.rtDetGamma*rho*u0**2*(ie + p/rho + 1.0) - mi.alpha*mi.rtDetGamma*rho*u0 - mi.rtDetGamma*p]"
*/
{
const double tmp0 = mi.rtDetGamma*rho;
const double tmp1 = mi.alpha*tmp0*u0;
const double tmp2 = u0*vx;
const double tmp3 = u0*vy;
const double tmp4 = u0*vz;
const double tmp5 = ie + p/rho + 1.0;
const double tmp6 = tmp1*tmp5;
con[iRhoStar] = tmp1;
con[iSx] = tmp6*(mi.betaX*u0 + mi.gamDDxx*tmp2 + mi.gamDDxy*tmp3 + mi.gamDDxz*tmp4);
con[iSy] = tmp6*(mi.betaY*u0 + mi.gamDDxy*tmp2 + mi.gamDDyy*tmp3 + mi.gamDDyz*tmp4);
con[iSz] = tmp6*(mi.betaZ*u0 + mi.gamDDxz*tmp2 + mi.gamDDyz*tmp3 + mi.gamDDzz*tmp4);
con[iTau] = pow(mi.alpha, 2)*tmp0*tmp5*pow(u0, 2) - mi.rtDetGamma*p - tmp1;
}
###Markdown
compute the fluxThe fluxes are as follows\begin{equation}\frac{\partial}{\partial t} \begin{pmatrix}\rhostar\\\Svectilde\\\tautilde\end{pmatrix} + \frac{\partial}{\partial x^j}\begin{pmatrix} \rhostar v^j\\\lapse\rtgamma T^j_i\\ \lapse^2\rtgamma T^{0j} - \rhostar v^j\end{pmatrix} = \begin{pmatrix} 0 \\ \frac 1 2 \lapse\rtgamma T^{\alpha\beta}g_{\alpha\beta,i} \\ s \end{pmatrix}\end{equation}so the flux is \begin{equation}\mathcal{F} = \begin{pmatrix} \rhostar v^i \\ \lapse\rtgamma T^i_k \\ \lapse^2\rtgamma T^{0i} - \rhostar v^i\end{pmatrix}\end{equation}In the moving-mesh formalism, the flux is just taken along the x directions so we have\begin{equation}\mathcal{F} = \begin{pmatrix} \rhostar v^1 \\ \lapse\rtgamma T^1_k \\ \lapse^2\rtgamma T^{01} - \rhostar v^1\end{pmatrix}\end{equation}Note that we will need to rotate $T^{\mu\nu}$ and $g_{\mu\nu}$ to get the right orientation.In order to do this, we must first compute the stress energy tensor:\begin{equation}T^{\mu\nu} = \rho h u^{\mu}u^{\nu} + Pg^{\mu\nu} = \rho h (u^0)^2v^iv^j + P g^{\mu\nu}\end{equation}
###Code
TmunuUU = ixp.declarerank2("TmunuUU","sym01",DIM=4)
uvecU = ixp.zerorank1()
for i in range(3) :
uvecU[i] = uvec4U[i+1]
TmunuUU[0][0] = rho*h*u0*u0 - p/(lapse*lapse) #is this \pm?
for i in range(3):
TmunuUU[0][i+1] = rho*h*u0*uvecU[i] + p/(lapse*lapse)*shiftU[i]
TmunuUU[i+1][0] = rho*h*u0*uvecU[i] + p/(lapse*lapse)*shiftU[i]
for i in range(3):
for j in range(3):
TmunuUU[i+1][j+1] = rho*h*uvecU[i]*uvecU[j] + p*(gammaUU[i][j] - 1./(lapse*lapse)*shiftU[i]*shiftU[j])
#str = outputC([TmunuUU[1][0], TmunuUU[1][1], TmunuUU[1][2], TmunuUU[1][3]], ["Tmunu10", "Tmunu11", "Tmunu12", "Tmunu13"], filename="returnstring")
#print(str)
#str = outputC([gmunuDD[1][0], gmunuDD[1][1], gmunuDD[1][2], gmunuDD[1][3]], ["gmunu10", "gmunu11", "gmunu12", "gmunu13"], filename="returnstring")
#print(str)
#calculate Tmunu^1_i
Tmunu1D = ixp.zerorank1()
for i in range(3):
for j in range(0,4) :
Tmunu1D[i] += gmunuDD[i+1][j] * TmunuUU[1][j]
#str = outputC([Tmunu1D[0], Tmunu1D[1], Tmunu1D[2]], ["Tmunu1Dx", "Tmunu1Dy", "Tmunu1Dz"], filename="returnstring")
#print str
# now get the flux
fluxRho, fluxMomX, fluxMomY, fluxMomZ, fluxEnergy = sp.symbols("flux[iRhoStar] flux[iSx] flux[iSy] flux[iSz] flux[iTau]")
fluxRho = rhostar * vU[0]
fluxMomX = lapse*rtgamma*Tmunu1D[0]
fluxMomY = lapse*rtgamma*Tmunu1D[1]
fluxMomZ = lapse*rtgamma*Tmunu1D[2]
fluxEnergy = lapse*lapse*rtgamma*TmunuUU[0][1] - rhostar*vU[0]
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC([fluxRho, fluxMomX, fluxMomY, fluxMomZ, fluxEnergy], ["flux[iRhoStar]", "flux[iSx]", "flux[iSy]", "flux[iSz]", "flux[iTau]"], filename="returnstring")
print(str)
f = open("NRPY+calFlux.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[flux[iRhoStar] = mi.alpha*mi.rtDetGamma*rho*u0*vx,
* flux[iSx] = mi.alpha*mi.rtDetGamma*(mi.betaX*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxx*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDxy*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDxz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iSy] = mi.alpha*mi.rtDetGamma*(mi.betaY*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxy*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDyy*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDyz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iSz] = mi.alpha*mi.rtDetGamma*(mi.betaZ*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + mi.gamDDxz*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0)) + mi.gamDDyz*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0)) + mi.gamDDzz*(p*((mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaZ/mi.alpha**2) + rho*u0**2*vx*vz*(ie + p/rho + 1.0))),
* flux[iTau] = mi.alpha**2*mi.rtDetGamma*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) - mi.alpha*mi.rtDetGamma*rho*u0*vx]"
*/
{
const double tmp0 = mi.alpha*mi.rtDetGamma;
const double tmp1 = rho*vx;
const double tmp2 = tmp0*tmp1*u0;
const double tmp3 = pow(mi.alpha, 2);
const double tmp4 = 1.0/tmp3;
const double tmp5 = mi.betaX*tmp4;
const double tmp6 = pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp7 = tmp1*tmp6;
const double tmp8 = p*tmp5 + tmp7;
const double tmp9 = 1.0*tmp5;
const double tmp10 = mi.gamDDxz*mi.gamDDyz;
const double tmp11 = mi.gamDDyy*mi.gamDDzz;
const double tmp12 = pow(mi.gamDDyz, 2);
const double tmp13 = 1.0/(mi.gamDDxx*tmp11 - mi.gamDDxx*tmp12 - pow(mi.gamDDxy, 2)*mi.gamDDzz + 2*mi.gamDDxy*tmp10 - pow(mi.gamDDxz, 2)*mi.gamDDyy);
const double tmp14 = p*(-mi.betaY*tmp9 + tmp13*(-mi.gamDDxy*mi.gamDDzz + tmp10)) + tmp7*vy;
const double tmp15 = p*(-mi.betaZ*tmp9 + tmp13*(mi.gamDDxy*mi.gamDDyz - mi.gamDDxz*mi.gamDDyy)) + tmp7*vz;
const double tmp16 = p*(-1.0*pow(mi.betaX, 2)*tmp4 + tmp13*(tmp11 - tmp12)) + rho*tmp6*pow(vx, 2);
flux[iRhoStar] = tmp2;
flux[iSx] = tmp0*(mi.betaX*tmp8 + mi.gamDDxx*tmp16 + mi.gamDDxy*tmp14 + mi.gamDDxz*tmp15);
flux[iSy] = tmp0*(mi.betaY*tmp8 + mi.gamDDxy*tmp16 + mi.gamDDyy*tmp14 + mi.gamDDyz*tmp15);
flux[iSz] = tmp0*(mi.betaZ*tmp8 + mi.gamDDxz*tmp16 + mi.gamDDyz*tmp14 + mi.gamDDzz*tmp15);
flux[iTau] = mi.rtDetGamma*tmp3*tmp8 - tmp2;
}
###Markdown
Source TermsThe sources terms are for mass, momentum and energy are: \begin{equation}\source = (0, \frac 1 2 \lapse\rtgamma \T{\alpha}{\beta}g_{\alpha\beta,i}, s)\end{equation},For a time stationary metric $s = 0$, so we will ignore this for now. As for the rest, we need to define derivatives of the metric. Suppose I have done this already. Then the code for the source terms is:
###Code
gmunuDDind = [0,0]
gammaDDind = [0,0]
alpha = [0.,0.]
h = sp.symbols( "h")
for ind in range(2) :
gmunuDDind[ind] = ixp.zerorank2(DIM=4) #derivative of gmunu in some direction
gammaDDind[ind] = ixp.zerorank2()
components = ["xx", "xy", "xz", "yy", "yz", "zz"]
names = ""
for comp in components :
names = names + "mi{1}.gamDD{0} ".format(comp, ind+1)
gxx, gxy, gxz, gyy, gyz, gzz = sp.symbols( names)
gammaDDind[ind][0][0] = gxx
gammaDDind[ind][0][1] = gxy
gammaDDind[ind][0][2] = gxz
gammaDDind[ind][1][0] = gxy
gammaDDind[ind][1][1] = gyy
gammaDDind[ind][1][2] = gyz
gammaDDind[ind][2][0] = gxz
gammaDDind[ind][2][1] = gyz
gammaDDind[ind][2][2] = gzz
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi{0}.alpha mi{0}.rtDetGamma mi{0}.betaX mi{0}.betaY mi{0}.betaZ".format(ind+1))
u10, u1x, u1y, u1z = sp.symbols("u1[0] u1[1] u1[2] u1[3]")
u20, u2x, u2y, u2z = sp.symbols("u2[0] u2[1] u2[2] u2[3]")
shiftU = ixp.zerorank1()
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
beta2 = 0
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDDind[ind][i][j] * shiftU[i]*shiftU[j]
gmunuDDind[ind][0][0] = -lapse*lapse + beta2
for i in range(DIM) :
gmunuDDind[ind][i+1][0] = shiftU[i]
gmunuDDind[ind][0][i+1] = shiftU[i]
for j in range(DIM) :
gmunuDDind[ind][i+1][j+1] = gammaDDind[ind][i][j]
dgmunuDD = ixp.zerorank2(DIM=4)
source = 0
for i in range(DIM) :
for j in range(DIM) :
dgmunuDD[i][j] = (gmunuDDind[1][i][j] - gmunuDDind[0][i][j])*(1./(2.*h))
source = source + TmunuUU[i][j]*dgmunuDD[i][j]
#print TmunuUU[2][1]
cFunction = "double u0 = 0.;\n" + computeu0_Cfunction
str = outputC( [source, gmunuDDind[1][0][0]], ["source", "dgmunu"], filename="returnstring")
print(str)
f = open("NRPY+calMomSources.h", "w")
f.write( cFunction + str)
f.close()
###Output
/*
* Original SymPy expressions:
* "[source = 1.0*(-mi1.betaX + mi2.betaX)*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2)/h + 1.0*(-mi1.betaY + mi2.betaY)*(rho*u0**2*vy*(ie + p/rho + 1.0) + mi.betaY*p/mi.alpha**2)/h + 0.5*(-mi1.gamDDxx + mi2.gamDDxx)*(p*((mi.gamDDyy*mi.gamDDzz - mi.gamDDyz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX**2/mi.alpha**2) + rho*u0**2*vx**2*(ie + p/rho + 1.0))/h + 1.0*(-mi1.gamDDxy + mi2.gamDDxy)*(p*((-mi.gamDDxy*mi.gamDDzz + mi.gamDDxz*mi.gamDDyz)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaX*mi.betaY/mi.alpha**2) + rho*u0**2*vx*vy*(ie + p/rho + 1.0))/h + 0.5*(-mi1.gamDDyy + mi2.gamDDyy)*(p*((mi.gamDDxx*mi.gamDDzz - mi.gamDDxz**2)/(mi.gamDDxx*mi.gamDDyy*mi.gamDDzz - mi.gamDDxx*mi.gamDDyz**2 - mi.gamDDxy**2*mi.gamDDzz + 2*mi.gamDDxy*mi.gamDDxz*mi.gamDDyz - mi.gamDDxz**2*mi.gamDDyy) - 1.0*mi.betaY**2/mi.alpha**2) + rho*u0**2*vy**2*(ie + p/rho + 1.0))/h + 0.5*(rho*u0**2*(ie + p/rho + 1.0) - p/mi.alpha**2)*(mi1.alpha**2 - mi1.betaX**2*mi1.gamDDxx - 2*mi1.betaX*mi1.betaY*mi1.gamDDxy - 2*mi1.betaX*mi1.betaZ*mi1.gamDDxz - mi1.betaY**2*mi1.gamDDyy - 2*mi1.betaY*mi1.betaZ*mi1.gamDDyz - mi1.betaZ**2*mi1.gamDDzz - mi2.alpha**2 + mi2.betaX**2*mi2.gamDDxx + 2*mi2.betaX*mi2.betaY*mi2.gamDDxy + 2*mi2.betaX*mi2.betaZ*mi2.gamDDxz + mi2.betaY**2*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaZ**2*mi2.gamDDzz)/h,
* dgmunu = -mi2.alpha**2 + mi2.betaX**2*mi2.gamDDxx + 2*mi2.betaX*mi2.betaY*mi2.gamDDxy + 2*mi2.betaX*mi2.betaZ*mi2.gamDDxz + mi2.betaY**2*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaZ**2*mi2.gamDDzz]"
*/
{
const double tmp0 = pow(mi.alpha, -2);
const double tmp1 = p*tmp0;
const double tmp2 = rho*pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp3 = tmp2*vx;
const double tmp4 = 1.0/h;
const double tmp5 = 1.0*tmp4;
const double tmp6 = 1.0*tmp0;
const double tmp7 = mi.gamDDxz*mi.gamDDyz;
const double tmp8 = mi.gamDDyy*mi.gamDDzz;
const double tmp9 = pow(mi.gamDDyz, 2);
const double tmp10 = pow(mi.gamDDxz, 2);
const double tmp11 = 1.0/(mi.gamDDxx*tmp8 - mi.gamDDxx*tmp9 - pow(mi.gamDDxy, 2)*mi.gamDDzz + 2*mi.gamDDxy*tmp7 - mi.gamDDyy*tmp10);
const double tmp12 = 0.5*tmp4;
const double tmp13 = 2*mi1.betaX;
const double tmp14 = 2*mi2.betaX;
const double tmp15 = -pow(mi2.alpha, 2) + pow(mi2.betaX, 2)*mi2.gamDDxx + pow(mi2.betaY, 2)*mi2.gamDDyy + 2*mi2.betaY*mi2.betaZ*mi2.gamDDyz + mi2.betaY*mi2.gamDDxy*tmp14 + pow(mi2.betaZ, 2)*mi2.gamDDzz + mi2.betaZ*mi2.gamDDxz*tmp14;
source = tmp12*(-mi1.gamDDxx + mi2.gamDDxx)*(p*(-pow(mi.betaX, 2)*tmp6 + tmp11*(tmp8 - tmp9)) + tmp2*pow(vx, 2)) + tmp12*(-mi1.gamDDyy + mi2.gamDDyy)*(p*(-pow(mi.betaY, 2)*tmp6 + tmp11*(mi.gamDDxx*mi.gamDDzz - tmp10)) + tmp2*pow(vy, 2)) + tmp12*(-tmp1 + tmp2)*(pow(mi1.alpha, 2) - pow(mi1.betaX, 2)*mi1.gamDDxx - pow(mi1.betaY, 2)*mi1.gamDDyy - 2*mi1.betaY*mi1.betaZ*mi1.gamDDyz - mi1.betaY*mi1.gamDDxy*tmp13 - pow(mi1.betaZ, 2)*mi1.gamDDzz - mi1.betaZ*mi1.gamDDxz*tmp13 + tmp15) + tmp5*(-mi1.betaX + mi2.betaX)*(mi.betaX*tmp1 + tmp3) + tmp5*(-mi1.betaY + mi2.betaY)*(mi.betaY*tmp1 + tmp2*vy) + tmp5*(-mi1.gamDDxy + mi2.gamDDxy)*(p*(-mi.betaX*mi.betaY*tmp6 + tmp11*(-mi.gamDDxy*mi.gamDDzz + tmp7)) + tmp3*vy);
dgmunu = tmp15;
}
###Markdown
RotationOne of the key ideas behind the moving-mesh idea is that we must rotate the vector to the appropriate direction such that the Riemann solve is along the x-direction. This is done by computing the normal vector also the "x-direction" and two normal vectors orthogonal to it. In MANGA they are labeled $n_0$, $n_1$, and $n_2$. And the rotation matrix looks like\begin{equation}R = \begin{pmatrix}n_{0,x} & n_{0,y} & n_{0,z} \\n_{1,x} & n_{1,y} & n_{1,z} \\n_{2,x} & n_{2,y} & n_{2,z} \end{pmatrix} \end{equation}Likewise, I also define an inverse Rotation as \begin{equation}R^{-1} = \begin{pmatrix}n'_{0,x} & n'_{0,y} & n'_{0,z} \\n'_{1,x} & n'_{1,y} & n'_{1,z} \\n'_{2,x} & n'_{2,y} & n'_{2,z} \end{pmatrix} \end{equation}Base on this how does $g_{\mu\nu}$ and $T_{\mu\nu}$ transform under rotation. We begin by defining an extended rotation matrix:\begin{equation}\mathcal{R} = \begin{pmatrix}1 &0 &0 &0 \\0 &n_{0,x} & n_{0,y} & n_{0,z} \\0 &n_{1,x} & n_{1,y} & n_{1,z} \\0 & n_{2,x} & n_{2,y} & n_{2,z} \end{pmatrix} \end{equation}Now we note that the term $g_{\mu\nu} x^{\mu} x^{\nu}$ is invariant under rotation. Defining $x' = R x$, we then note\begin{equation}g_{\mu\nu} x^{\mu} x^{\nu} = g_{\mu\nu} \mathcal{R}^{-1\mu}_{\alpha}x'^{\alpha} \mathcal{R}^{-1\nu}_{\beta}x'^{\beta} = g_{\alpha\beta} \mathcal{R}^{-1\alpha}_{\mu}\mathcal{R}^{-1\beta}_{\nu}x'^{\mu} x'^{\nu} \rightarrow g'_{\mu\nu} = g_{\alpha\beta} \mathcal{R}^{-1\alpha}_{\mu}\mathcal{R}^{-1\beta}_{\nu},\end{equation}which gives us the appropriate rotated metric. To get the similar transformation for $T^{\mu\nu}$, we note the $T = T^{\mu}_{\mu} = g_{\mu\nu}T^{\mu\nu}$ tranforms as a scalar. So we have\begin{equation}T = g_{\mu\nu}T^{\mu\nu} = g_{\alpha\beta}\mathcal{R}^{-1,\alpha}_{\gamma}\mathcal{R}^{-1,\beta}_{\delta}\mathcal{R}^{\gamma}_{\mu}\mathcal{R}^{\delta}_{\nu} T^{\mu\nu} = \rightarrow g'_{\mu\nu} T'^{\mu\nu}\end{equation}which provide the identity\begin{equation}T'^{\mu\nu} = \mathcal{R}^{\mu}_{\alpha}\mathcal{R}^{\nu}_{\beta} T^{\alpha\beta}\end{equation}
###Code
RotUD = ixp.declarerank2("RotUD", "nosym", DIM=4)
RotInvUD = ixp.declarerank2("RotInvUD", "nosym", DIM=4)
#declare normal primed vectors for rotation
n00, n01, n02 = sp.symbols("n0[0] n0[1] n0[2]")
n10, n11, n12 = sp.symbols("n1[0] n1[1] n1[2]")
n20, n21, n22 = sp.symbols("n2[0] n2[1] n2[2]")
#declare normal primed vectors for inverse rotation
n0p0, n0p1, n0p2 = sp.symbols("n0p[0] n0p[1] n0p[2]")
n1p0, n1p1, n1p2 = sp.symbols("n1p[0] n1p[1] n1p[2]")
n2p0, n2p1, n2p2 = sp.symbols("n2p[0] n2p[1] n2p[2]")
for i in range(4):
RotUD[0][i] = 0.
RotUD[i][0] = 0.
RotInvUD[0][i] = 0.
RotInvUD[i][0] = 0.
RotUD[0][0] = 1.
RotInvUD[0][0] = 1.
RotUD[1][1] = n00
RotUD[1][2] = n01
RotUD[1][3] = n02
RotUD[2][1] = n10
RotUD[2][2] = n11
RotUD[2][3] = n12
RotUD[3][1] = n20
RotUD[3][2] = n21
RotUD[3][3] = n22
RotInvUD[1][1] = n0p0
RotInvUD[1][2] = n0p1
RotInvUD[1][3] = n0p2
RotInvUD[2][1] = n1p0
RotInvUD[2][2] = n1p1
RotInvUD[2][3] = n1p2
RotInvUD[3][1] = n2p0
RotInvUD[3][2] = n2p1
RotInvUD[3][3] = n2p2
gmunuRotDD = ixp.declarerank2("gmunuRotDD", "sym01", DIM=4)
for i in range(4) :
for j in range(4) :
gmunuRotDD[i][j] = 0.
for k in range(4) :
for l in range(4) :
gmunuRotDD[i][j] += gmunuDD[l][k]*RotInvUD[l][i]*RotInvUD[k][j]
outputC([gmunuRotDD[1][1], gmunuRotDD[1][2], gmunuRotDD[1][3],gmunuRotDD[2][2], gmunuRotDD[2][3], gmunuRotDD[3][3]], ["metricInfo.gamDDxx", "metricInfo.gamDDxy","metricInfo.gamDDxz","metricInfo.gamDDyy","metricInfo.gamDDyz","metricInfo.gamDDzz"], filename="NRPY+rotateMetric.h")
# provie
TmunuRotUU = ixp.declarerank2("TmunuRotUU", "sym01", DIM=4)
for i in range(4) :
for j in range(4) :
TmunuRotUU[i][j] = 0.
for k in range(4) :
for l in range(4) :
TmunuRotUU[i][j] += TmunuUU[l][k]*RotUD[i][l]*RotUD[j][k]
str = outputC([TmunuRotUU[0][0], TmunuRotUU[0][1], TmunuRotUU[1][0]], ["Tmunu00", "Tmunu12", "Tmunu21"], filename="returnstring")
print(str)
###Output
Wrote to file "NRPY+rotateMetric.h"
/*
* Original SymPy expressions:
* "[Tmunu00 = 1.0*rho*u0**2*(ie + p/rho + 1.0) - 1.0*p/mi.alpha**2,
* Tmunu12 = n0[0]*(1.0*rho*u0**2*vx*(ie + p/rho + 1.0) + 1.0*mi.betaX*p/mi.alpha**2) + n0[1]*(1.0*rho*u0**2*vy*(ie + p/rho + 1.0) + 1.0*mi.betaY*p/mi.alpha**2) + n0[2]*(1.0*rho*u0**2*vz*(ie + p/rho + 1.0) + 1.0*mi.betaZ*p/mi.alpha**2),
* Tmunu21 = 1.0*n0[0]*(rho*u0**2*vx*(ie + p/rho + 1.0) + mi.betaX*p/mi.alpha**2) + 1.0*n0[1]*(rho*u0**2*vy*(ie + p/rho + 1.0) + mi.betaY*p/mi.alpha**2) + 1.0*n0[2]*(rho*u0**2*vz*(ie + p/rho + 1.0) + mi.betaZ*p/mi.alpha**2)]"
*/
{
const double tmp0 = p/pow(mi.alpha, 2);
const double tmp1 = 1.0*tmp0;
const double tmp2 = rho*pow(u0, 2)*(ie + p/rho + 1.0);
const double tmp3 = 1.0*tmp2;
Tmunu00 = -tmp1 + tmp3;
Tmunu12 = n0[0]*(mi.betaX*tmp1 + tmp3*vx) + n0[1]*(mi.betaY*tmp1 + tmp3*vy) + n0[2]*(mi.betaZ*tmp1 + tmp3*vz);
Tmunu21 = 1.0*n0[0]*(mi.betaX*tmp0 + tmp2*vx) + 1.0*n0[1]*(mi.betaY*tmp0 + tmp2*vy) + 1.0*n0[2]*(mi.betaZ*tmp0 + tmp2*vz);
}
###Markdown
Conservative to Primitive SolverWe now discuss the reverse mapping from conservative to primitive variables.Given the lapse, shift vector and $\rtgamma$, the mapping between primitive and conserved variable is straightforward. However, the reverse is not as simple. In GRMHD, the conservative to primitive solver is amplified by the inclusion of the magnetic field, leading to rather sophisticated root finding strategies. The failure rates of these algorithms are low (??), but since this algorithm may be executed several times per timestep for every gridpoint, even a low failure can give unacceptable collective failure rates. However, for purely polytropic equations of state, e.g., $P\propto\rho^{\Gamma_1}$, the convervative to primitive variable solver is greatly simplified. To construct the conservative-to-primitive variable solver, we restrict ourselves to polytropic equations of states\begin{equation}P = P_0\left(\frac{\rho}{\rho_0}\right)^{\Gamma_1} \quad\textrm{and}\quad \epsilon = \epsilon_0\left(\frac{\rho}{\rho_0}\right)^{\Gamma_1-1},\end{equation}where $P_0$, $\rho_0$, and $\epsilon_0$ are the fiducial pressure, density, and internal energy, and we have used the relation $P = (\Gamma_1 - 1)\rho\epsilon$. For such a polytropic equation of state, the energy equation is redundant and effectively we are only concerned with the continuity and momentum equations. The conservative variables of concern are $\rhostar$ and $\Svectilde$. Noting that the shift, $\alpha$, and $\rtgamma$ are provided by the Einsteins field equation solver, we can write\begin{equation}u^0 = \frac{\rhostar}{\alpha\rtgamma\rho} = u^0(\rho) \quad\textrm{and}\quad \uvec = \frac{\Svectilde}{\alpha\rtgamma\rho h} = \uvec(\rho).\end{equation}Noting that the four velocity $u^2 = g_{\mu\nu}u^{\mu}u^{\nu} = g^{00}u^0u^0 + 2g^{0i}u^0\uvec^i + g_{ij}\uvec^i\uvec^j = -1$, we have\begin{equation} 0 = f(\rho)\equiv \rhostar^2h^2 + \left(-\lapse^2 + \shift\cdot\shift\right)\rhostar^2h^2 (u^0)^2 + 2\rhostar h u^0\shift^i\Svectilde_i + \gamma^{ij}\Svectilde_i\Svectilde_j,\end{equation}which is an implicit equation of either $\rho$ or $u^0$, where $h(\rho = \rhostar/(\alpha\rtgamma u^0)) = 1 + \gamma_1 \epsilon$ which can be inverted by standard nonlinear root finding algorithms, e.g., Newton-raphson. We put this all together to define a function, $f(\rho)$, whose root is zero that we will find via Newton-raphson. Several checks must be performed:1. $\rhostar > 0$ : This check is performed at the very beginning2. $\rho > \rho_{\rm min}$ : This check is performed after the fact3. $u_0 < \alpha^{-1}\Gamma_{\rm max}$ : This check is performed after the fact as well
###Code
DIM = 3
# Declare rank-1 contravariant ("v") vector
vU = ixp.declarerank1("vU")
shiftU = ixp.zerorank1()
rho, gamma1 = sp.symbols("rho gamma")
Sx, Sy, Sz = sp.symbols("con[iSx] con[iSy] con[iSz]")
p0, rho0, rhostar = sp.symbols("p_0 rho_0 rhostar")
# Declare rank-2 covariant gmunu
#gammaDD = ixp.declarerank2("gammaDD","sym01")
StildeD = ixp.declarerank1("StildeD")
lapse, rtgamma, beta_x, beta_y, beta_z = sp.symbols( "mi.alpha mi.rtDetGamma mi.betaX mi.betaY mi.betaZ")
shiftU[0] = beta_x
shiftU[1] = beta_y
shiftU[2] = beta_z
StildeD[0] = Sx
StildeD[1] = Sy
StildeD[2] = Sz
gamma = rtgamma*rtgamma
lapse2 = lapse*lapse
uU0 = rhostar/(lapse*rtgamma*rho)
epsilon = p0/rho0*(rho/rho0)**(gamma1 - 1)/(gamma1 - 1)
h = 1. + gamma1*epsilon
beta2 = 0.
for i in range(DIM) :
for j in range(DIM) :
beta2 += gammaDD[i][j] * shiftU[i]*shiftU[j]
betaDotStilde = 0
for i in range(DIM) :
betaDotStilde += shiftU[i]*StildeD[i]
Stilde2 = 0
for i in range(DIM) :
for j in range(DIM) :
Stilde2 += gammaUU[i][j] * StildeD[i]*StildeD[j]
f = rhostar**2*h**2 + (-lapse2 + beta2)*rhostar**2.*h**2.*uU0**2 + 2.*h*rhostar*betaDotStilde*uU0 + Stilde2
outputC(f,"rootRho",filename="NRPY+rhoRoot.h")
outputC(Stilde2, "Stilde2", filename="NRPY+Stilde2.h")
###Output
Wrote to file "NRPY+rhoRoot.h"
Wrote to file "NRPY+Stilde2.h"
###Markdown
The root solve above finds $\rho$, which then allows us to get \begin{equation}u^0 = \frac{\rhostar}{\alpha\rtgamma\rho}\quad\textrm{and}\quad \vel = \frac{\uvec}{u^0} = \frac{\Svectilde}{\rhostar h(\rho)}.\end{equation}and thus we can find the rest of the primitives.
###Code
#rhostar = sp.symbols("rhostar")
#StildeU = ixp.declarerank1("StildeU")
velU = ixp.zerorank1()
#lapse, rtgamma, rho, gamma1, c = sp.symbols("lapse rtgamma rho gamma1 c")
rho, rhostar = sp.symbols("testPrim[iRho] con[iRhoStar]")
u0 = rhostar/(lapse*rtgamma*rho)
epsilon = p0/rho0*(rho/rho0)**(gamma1 - 1)/(gamma1 - 1)
h = 1. + gamma1*epsilon
for i in range(DIM) :
for j in range(DIM) :
velU[i] += gammaUU[i][j]*StildeD[j]/(rhostar * h)/u0
outputC([h,u0,velU[0],velU[1],velU[2]], ["h", "u0","testPrim[ivx]", "testPrim[ivy]", "testPrim[ivz]"],filename="NRPY+getv.h")
###Output
Wrote to file "NRPY+getv.h"
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Equations of General Relativistic Magnetohydrodynamics (GRMHD) Author: Zach Etienne This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`**Notebook Status:** Self-Validated; induction equation not yet implemented **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** IntroductionWe write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):\begin{eqnarray}\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right) = 0,\end{eqnarray}where $$s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].$$We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where * $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and * $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,* The GRMHD conservative variables: * $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)` * $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)` * $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`* The GRMHD fluxes: * $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)` * $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)` * $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`* GRMHD source terms: * $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)` * $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:* Spacetime quantities: * ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$* Hydrodynamical quantities: * Rest-mass density $\rho_0$ * Pressure $P$ * Internal energy $\epsilon$ * 4-velocity $u^\mu$* Electrodynamical quantities * Magnetic field $B^i= \tilde{B}^i / \gamma$ A Note on NotationAs is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:```pythonT4EMUU = ixp.zerorank2(DIM=4)for mu in range(4): for nu in range(4): Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]```When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:```pythonbetaD = ixp.zerorank1(DIM=3)for i in range(3): for j in range(3): betaD[i] += gammaDD[i][j] * betaU[j]```As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:```python \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2for i in range(3): for mu in range(4): for nu in range(4): S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2``` Table of Contents$$\label{toc}$$Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows1. [Step 1](importmodules): Import needed NRPy+ & Python modules1. [Step 2](stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$: * **compute_T4UU()**, **compute_T4UD()**: 1. [Step 3](declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.1. [Step 4](code_validation): Code Validation against `GRMHD.equations` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Import needed NRPy+ & Python modules \[Back to [top](toc)\]$$\label{importmodules}$$
###Code
# Step 1: Import needed core NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
###Output
_____no_output_____
###Markdown
Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](toc)\]$$\label{stressenergy}$$Recall from above that$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) functionSince a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,$$T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},$$where* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
###Code
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
###Output
_____no_output_____
###Markdown
Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](toc)\]$$\label{declarevarsconstructgrhdeqs}$$
###Code
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
###Output
_____no_output_____
###Markdown
Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation}$$As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in1. this tutorial versus2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
###Code
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2 is None:
return basename+"["+str(idx1)+"]"
if idx3 is None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
import sys
if all_passed:
print("ALL TESTS PASSED!")
else:
print("ERROR: AT LEAST ONE TEST DID NOT PASS")
sys.exit(1)
###Output
ALL TESTS PASSED!
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GRMHD_Equations-Cartesian")
###Output
Created Tutorial-GRMHD_Equations-Cartesian.tex, and compiled LaTeX file to
PDF file Tutorial-GRMHD_Equations-Cartesian.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Equations of General Relativistic Magnetohydrodynamics (GRMHD) Author: Zach Etienne This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`**Notebook Status:** Self-Validated; induction equation not yet implemented **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)** IntroductionWe write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):\begin{eqnarray}\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right) = 0,\end{eqnarray}where $$s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].$$We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where * $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and * $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,* The GRMHD conservative variables: * $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)` * $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)` * $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`* The GRMHD fluxes: * $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)` * $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)` * $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`* GRMHD source terms: * $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)` * $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:* Spacetime quantities: * ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$* Hydrodynamical quantities: * Rest-mass density $\rho_0$ * Pressure $P$ * Internal energy $\epsilon$ * 4-velocity $u^\mu$* Electrodynamical quantities * Magnetic field $B^i= \tilde{B}^i / \gamma$ A Note on NotationAs is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:```pythonT4EMUU = ixp.zerorank2(DIM=4)for mu in range(4): for nu in range(4): Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]```When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:```pythonbetaD = ixp.zerorank1(DIM=3)for i in range(3): for j in range(3): betaD[i] += gammaDD[i][j] * betaU[j]```As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:```python \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2for i in range(3): for mu in range(4): for nu in range(4): S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2``` Table of Contents$$\label{toc}$$Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows1. [Step 1](importmodules): Import needed NRPy+ & Python modules1. [Step 2](stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$: * **compute_T4UU()**, **compute_T4UD()**: 1. [Step 3](declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.1. [Step 4](code_validation): Code Validation against `GRMHD.equations` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Import needed NRPy+ & Python modules \[Back to [top](toc)\]$$\label{importmodules}$$
###Code
# Step 1: Import needed core NRPy+ modules
from outputC import * # NRPy+: Core C code output module
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
###Output
_____no_output_____
###Markdown
Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](toc)\]$$\label{stressenergy}$$Recall from above that$$T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},$$where* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) functionSince a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,$$T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},$$where* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
###Code
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
###Output
_____no_output_____
###Markdown
Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](toc)\]$$\label{declarevarsconstructgrhdeqs}$$
###Code
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
###Output
_____no_output_____
###Markdown
Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation}$$As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in1. this tutorial versus2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
###Code
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
all_passed=True
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
all_passed=False
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2==None:
return basename+"["+str(idx1)+"]"
if idx3==None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
for i in range(len(expr_list)):
comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
import sys
if all_passed:
print("ALL TESTS PASSED!")
else:
print("ERROR: AT LEAST ONE TEST DID NOT PASS")
sys.exit(1)
###Output
ALL TESTS PASSED!
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-GRMHD_Equations-Cartesian.ipynb
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!pdflatex -interaction=batchmode Tutorial-GRMHD_Equations-Cartesian.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
|
docs/examples/1_Introduction_and_Transformations.ipynb | ###Markdown
The TextAttack ecosystem: search, transformations, and constraintsAn attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'. 🍌
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use BERT trained for news classification on the AG News dataset. We've pretrained several models and uploaded them to the [HuggingFace Model Hub](https://huggingface.co/textattack). TextAttack integrates with any model from HuggingFace's Model Hub and any dataset from HuggingFace's `nlp`!
###Code
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-ag-news")
model.tokenizer = AutoTokenizer("textattack/bert-base-uncased-ag-news")
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model)
# Import the dataset
from textattack.datasets import HuggingFaceNlpDataset
dataset = HuggingFaceNlpDataset("ag_news", None, "test")
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> compatible with model BertForSequenceClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to successfully attack 10 samples.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 10:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 10 successes complete.')
###Output
1 of 10 successes complete.
2 of 10 successes complete.
3 of 10 successes complete.
4 of 10 successes complete.
5 of 10 successes complete.
6 of 10 successes complete.
7 of 10 successes complete.
8 of 10 successes complete.
9 of 10 successes complete.
10 of 10 successes complete.
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
ConclusionWe can examine these examples for a good idea of how many words had to be changed to "banana" to change the prediction score from the correct class to another class. The examples without perturbed words were originally misclassified, so they were skipped by the attack. Looks like some examples needed only a couple "banana"s, while others needed up to 17 "banana" substitutions to change the class score. Wow! 🍌 Bonus: Attacking Custom SamplesWe can also attack custom data samples, like these ones I just made up!
###Code
# For AG News, labels are 0: World, 1: Sports, 2: Business, 3: Sci/Tech
custom_dataset = [
('Malaria deaths in Africa fall by 5% from last year', 0),
('Washington Nationals defeat the Houston Astros to win the World Series', 1),
('Exxon Mobil hires a new CEO', 2),
('Microsoft invests $1 billion in OpenAI', 3),
]
results_iterable = attack.attack_dataset(custom_dataset)
logger = CSVLogger(color_method='html')
for result in results_iterable:
logger.log_attack_result(result)
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
[34;1mtextattack[0m: CSVLogger exiting without calling flush().
###Markdown
The TextAttack ecosystem: search, transformations, and constraints [](https://colab.research.google.com/drive/1cBRUj2l0m8o81vJGGFgO-o_zDLj24M5Y?usp=sharing)[](https://github.com/QData/TextAttack/blob/master/docs/examples/1_Introduction_and_Transformations.ipynb) An attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'. 🍌
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use BERT trained for news classification on the AG News dataset. We've pretrained several models and uploaded them to the [HuggingFace Model Hub](https://huggingface.co/textattack). TextAttack integrates with any model from HuggingFace's Model Hub and any dataset from HuggingFace's `nlp`!
###Code
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
from textattack.models.wrappers import HuggingFaceModelWrapper
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-ag-news")
tokenizer = AutoTokenizer("textattack/bert-base-uncased-ag-news")
model_wrapper = HuggingFaceModelWrapper(model, tokenizer)
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model_wrapper)
# Import the dataset
from textattack.datasets import HuggingFaceNlpDataset
dataset = HuggingFaceNlpDataset("ag_news", None, "test")
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> compatible with model BertForSequenceClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to successfully attack 10 samples.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 10:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 10 successes complete.')
###Output
1 of 10 successes complete.
2 of 10 successes complete.
3 of 10 successes complete.
4 of 10 successes complete.
5 of 10 successes complete.
6 of 10 successes complete.
7 of 10 successes complete.
8 of 10 successes complete.
9 of 10 successes complete.
10 of 10 successes complete.
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
ConclusionWe can examine these examples for a good idea of how many words had to be changed to "banana" to change the prediction score from the correct class to another class. The examples without perturbed words were originally misclassified, so they were skipped by the attack. Looks like some examples needed only a couple "banana"s, while others needed up to 17 "banana" substitutions to change the class score. Wow! 🍌 Bonus: Attacking Custom SamplesWe can also attack custom data samples, like these ones I just made up!
###Code
# For AG News, labels are 0: World, 1: Sports, 2: Business, 3: Sci/Tech
custom_dataset = [
('Malaria deaths in Africa fall by 5% from last year', 0),
('Washington Nationals defeat the Houston Astros to win the World Series', 1),
('Exxon Mobil hires a new CEO', 2),
('Microsoft invests $1 billion in OpenAI', 3),
]
results_iterable = attack.attack_dataset(custom_dataset)
logger = CSVLogger(color_method='html')
for result in results_iterable:
logger.log_attack_result(result)
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
[34;1mtextattack[0m: CSVLogger exiting without calling flush().
###Markdown
The TextAttack ecosystem: search, transformations, and constraints [](https://colab.research.google.com/drive/1cBRUj2l0m8o81vJGGFgO-o_zDLj24M5Y?usp=sharing)[](https://github.com/QData/TextAttack/blob/master/docs/examples/1_Introduction_and_Transformations.ipynb) An attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'. 🍌
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use BERT trained for news classification on the AG News dataset. We've pretrained several models and uploaded them to the [HuggingFace Model Hub](https://huggingface.co/textattack). TextAttack integrates with any model from HuggingFace's Model Hub and any dataset from HuggingFace's `datasets`!
###Code
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
from textattack.models.wrappers import HuggingFaceModelWrapper
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-ag-news")
tokenizer = AutoTokenizer("textattack/bert-base-uncased-ag-news")
model_wrapper = HuggingFaceModelWrapper(model, tokenizer)
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model_wrapper)
# Import the dataset
from textattack.datasets import HuggingFaceDataset
dataset = HuggingFaceDataset("ag_news", None, "test")
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> compatible with model BertForSequenceClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to successfully attack 10 samples.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 10:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 10 successes complete.')
###Output
1 of 10 successes complete.
2 of 10 successes complete.
3 of 10 successes complete.
4 of 10 successes complete.
5 of 10 successes complete.
6 of 10 successes complete.
7 of 10 successes complete.
8 of 10 successes complete.
9 of 10 successes complete.
10 of 10 successes complete.
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
ConclusionWe can examine these examples for a good idea of how many words had to be changed to "banana" to change the prediction score from the correct class to another class. The examples without perturbed words were originally misclassified, so they were skipped by the attack. Looks like some examples needed only a couple "banana"s, while others needed up to 17 "banana" substitutions to change the class score. Wow! 🍌 Bonus: Attacking Custom SamplesWe can also attack custom data samples, like these ones I just made up!
###Code
# For AG News, labels are 0: World, 1: Sports, 2: Business, 3: Sci/Tech
custom_dataset = [
('Malaria deaths in Africa fall by 5% from last year', 0),
('Washington Nationals defeat the Houston Astros to win the World Series', 1),
('Exxon Mobil hires a new CEO', 2),
('Microsoft invests $1 billion in OpenAI', 3),
]
results_iterable = attack.attack_dataset(custom_dataset)
logger = CSVLogger(color_method='html')
for result in results_iterable:
logger.log_attack_result(result)
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
[34;1mtextattack[0m: CSVLogger exiting without calling flush().
###Markdown
The TextAttack ecosystem: search, transformations, and constraintsAn attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'. 🍌
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use BERT trained for news classification on the AG News dataset. We've pretrained several models and uploaded them to the [HuggingFace Model Hub](https://huggingface.co/textattack). TextAttack integrates with any model from HuggingFace's Model Hub and any dataset from HuggingFace's `nlp`!
###Code
# Import the model
import transformers
from textattack.models.tokenizers import AutoTokenizer
from textattack.models.wrappers import HuggingFaceModelWrapper
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-ag-news")
tokenizer = AutoTokenizer("textattack/bert-base-uncased-ag-news")
model_wrapper = HuggingFaceModelWrapper(model, tokenizer)
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model_wrapper)
# Import the dataset
from textattack.datasets import HuggingFaceNlpDataset
dataset = HuggingFaceNlpDataset("ag_news", None, "test")
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> compatible with model BertForSequenceClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to successfully attack 10 samples.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
results_iterable = attack.attack_dataset(dataset)
logger = CSVLogger(color_method='html')
num_successes = 0
while num_successes < 10:
result = next(results_iterable)
if isinstance(result, SuccessfulAttackResult):
logger.log_attack_result(result)
num_successes += 1
print(f'{num_successes} of 10 successes complete.')
###Output
1 of 10 successes complete.
2 of 10 successes complete.
3 of 10 successes complete.
4 of 10 successes complete.
5 of 10 successes complete.
6 of 10 successes complete.
7 of 10 successes complete.
8 of 10 successes complete.
9 of 10 successes complete.
10 of 10 successes complete.
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
ConclusionWe can examine these examples for a good idea of how many words had to be changed to "banana" to change the prediction score from the correct class to another class. The examples without perturbed words were originally misclassified, so they were skipped by the attack. Looks like some examples needed only a couple "banana"s, while others needed up to 17 "banana" substitutions to change the class score. Wow! 🍌 Bonus: Attacking Custom SamplesWe can also attack custom data samples, like these ones I just made up!
###Code
# For AG News, labels are 0: World, 1: Sports, 2: Business, 3: Sci/Tech
custom_dataset = [
('Malaria deaths in Africa fall by 5% from last year', 0),
('Washington Nationals defeat the Houston Astros to win the World Series', 1),
('Exxon Mobil hires a new CEO', 2),
('Microsoft invests $1 billion in OpenAI', 3),
]
results_iterable = attack.attack_dataset(custom_dataset)
logger = CSVLogger(color_method='html')
for result in results_iterable:
logger.log_attack_result(result)
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
[34;1mtextattack[0m: CSVLogger exiting without calling flush().
###Markdown
The TextAttack🐙 ecosystem: search, transformations, and constraintsAn attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap 🍌As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'.
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use an LSTM trained for news classification on the AG News dataset. Luckily, TextAttack comes with 1000 text samples from some popular datasets, as well as pretrained models for those datasets. So we don't have to train our own model, or procure someone else's. We can just use the built-in datasets and models for this.
###Code
# Import the dataset.
from textattack.datasets.classification import AGNews
# Create the model.
from textattack.models.classification.lstm import LSTMForAGNewsClassification
model = LSTMForAGNewsClassification()
# Create the goal function using the model.
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model)
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> matches model LSTMForAGNewsClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to attack 10 samples (by setting `num_examples` to 10). Additionally, we set `attack_n` to `True`, which indicates that we should attack 10 samples, no matter what. If the model mispredicts a sample already, it isn't attacked; since `attack_n` is `True`, if a sample is mispredicted, we'll try the next example in the dataset, and continue until `num_examples` attacks have been completed.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
results_iterable = attack.attack_dataset(AGNews(), num_examples=10, attack_n=True)
results = []
logger = CSVLogger(color_method='html')
for result in tqdm(results_iterable, total=10):
logger.log_attack_result(result)
###Output
12it [00:00, 19.61it/s]
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
The TextAttack ecosystem: search, transformations, and constraintsAn attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'. 🍌
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use an LSTM trained for news classification on the AG News dataset. Luckily, TextAttack comes with 1000 text samples from some popular datasets, as well as pretrained models for those datasets. So we don't have to train our own model, or procure someone else's. We can just use the built-in datasets and models for this.
###Code
# Import the dataset.
from textattack.datasets.classification import AGNews
# Create the model.
from textattack.models.classification.lstm import LSTMForAGNewsClassification
model = LSTMForAGNewsClassification()
# Create the goal function using the model.
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model)
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> matches model LSTMForAGNewsClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to attack 10 samples (by setting `num_examples` to 10). Additionally, we set `attack_n` to `True`, which indicates that we should attack 10 samples, no matter what. If the model mispredicts a sample already, it isn't attacked; since `attack_n` is `True`, if a sample is mispredicted, we'll try the next example in the dataset, and continue until `num_examples` attacks have been completed.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
results_iterable = attack.attack_dataset(AGNews(), num_examples=10, attack_n=True)
results = []
logger = CSVLogger(color_method='html')
for result in tqdm(results_iterable, total=10):
logger.log_attack_result(result)
###Output
12it [00:00, 19.61it/s]
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____
###Markdown
The TextAttack🐙 ecosystem: search, transformations, and constraintsAn attack in TextAttack consists of four parts. Goal functionThe **goal function** determines if the attack is successful or not. One common goal function is **untargeted classification**, where the attack tries to perturb an input to change its classification. Search methodThe **search method** explores the space of potential transformations and tries to locate a successful perturbation. Greedy search, beam search, and brute-force search are all examples of search methods. TransformationA **transformation** takes a text input and transforms it, for example replacing words or phrases with similar ones, while trying not to change the meaning. Paraphrase and synonym substitution are two broad classes of transformations. ConstraintsFinally, **constraints** determine whether or not a given transformation is valid. Transformations don't perfectly preserve syntax or semantics, so additional constraints can increase the probability that these qualities are preserved from the source to adversarial example. There are many types of constraints: overlap constraints that measure edit distance, syntactical constraints check part-of-speech and grammar errors, and semantic constraints like language models and sentence encoders. A custom transformationThis lesson explains how to create a custom transformation. In TextAttack, many transformations involve *word swaps*: they take a word and try and find suitable substitutes. Some attacks focus on replacing characters with neighboring characters to create "typos" (these don't intend to preserve the grammaticality of inputs). Other attacks rely on semantics: they take a word and try to replace it with semantic equivalents. Banana word swap 🍌As an introduction to writing transformations for TextAttack, we're going to try a very simple transformation: one that replaces any given word with the word 'banana'. In TextAttack, there's an abstract `WordSwap` class that handles the heavy lifting of breaking sentences into words and avoiding replacement of stopwords. We can extend `WordSwap` and implement a single method, `_get_replacement_words`, to indicate to replace each word with 'banana'.
###Code
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
###Output
_____no_output_____
###Markdown
Using our transformationNow we have the transformation chosen, but we're missing a few other things. To complete the attack, we need to choose the **search method** and **constraints**. And to use the attack, we need a **goal function**, a **model** and a **dataset**. (The goal function indicates the task our model performs – in this case, classification – and the type of attack – in this case, we'll perform an untargeted attack.) Creating the goal function, model, and datasetWe are performing an untargeted attack on a classification model, so we'll use the `UntargetedClassification` class. For the model, let's use an LSTM trained for news classification on the AG News dataset. Luckily, TextAttack comes with 1000 text samples from some popular datasets, as well as pretrained models for those datasets. So we don't have to train our own model, or procure someone else's. We can just use the built-in datasets and models for this.
###Code
# Import the dataset.
from textattack.datasets.classification import AGNews
# Create the model.
from textattack.models.classification.lstm import LSTMForAGNewsClassification
model = LSTMForAGNewsClassification()
# Create the goal function using the model.
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model)
###Output
[34;1mtextattack[0m: Goal function <class 'textattack.goal_functions.classification.untargeted_classification.UntargetedClassification'> matches model LSTMForAGNewsClassification.
###Markdown
Creating the attackLet's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
###Code
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack.shared import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
###Output
_____no_output_____
###Markdown
Let's print our attack to see all the parameters:
###Code
print(attack)
###Output
Attack(
(search_method): GreedySearch
(goal_function): UntargetedClassification
(transformation): BananaWordSwap
(constraints):
(0): RepeatModification
(1): StopwordModification
(is_black_box): True
)
###Markdown
Using the attackLet's use our attack to attack 10 samples (by setting `num_examples` to 10). Additionally, we set `attack_n` to `True`, which indicates that we should attack 10 samples, no matter what. If the model mispredicts a sample already, it isn't attacked; since `attack_n` is `True`, if a sample is mispredicted, we'll take try the next thing in the dataset, and continue until `num_examples` attacks have been completed.
###Code
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
results_iterable = attack.attack_dataset(AGNews(), num_examples=10, attack_n=True)
results = []
logger = CSVLogger(color_method='html')
for result in tqdm(results_iterable, total=10):
logger.log_attack_result(result)
###Output
12it [00:00, 19.61it/s]
###Markdown
Visualizing attack resultsWe are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
###Code
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
###Output
_____no_output_____ |
code/phase1.ipynb | ###Markdown
**`Checking GPU availability`**
###Code
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ')
print('and then re-execute this cell.')
else:
print(gpu_info)
###Output
Sun Apr 11 09:50:52 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.67 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 35C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
**Checking RAM availability**
###Code
from psutil import virtual_memory
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
if ram_gb < 20:
print('To enable a high-RAM runtime, select the Runtime > "Change runtime type"')
print('menu, and then select High-RAM in the Runtime shape dropdown. Then, ')
print('re-execute this cell.')
else:
print('You are using a high-RAM runtime!')
###Output
Your runtime has 27.4 gigabytes of available RAM
You are using a high-RAM runtime!
###Markdown
**Importing Libraries and Dependencies**
###Code
!pip install torchtext==0.6.0 --quiet
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Field, BucketIterator, TabularDataset
import numpy as np
import pandas as pd
import spacy
import random
from torchtext.data.metrics import bleu_score
from pprint import pprint
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
!python -m spacy download en --quiet
!git clone "https://github.com/anoopkunchukuttan/indic_nlp_library"
!git clone https://github.com/anoopkunchukuttan/indic_nlp_resources.git
# The path to the local git repo for Indic NLP library
INDIC_NLP_LIB_HOME=r"/content/indic_nlp_library"
# The path to the local git repo for Indic NLP Resources
INDIC_NLP_RESOURCES="/content/indic_nlp_resources"
import sys
sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME))
from indicnlp import common
common.set_resources_path(INDIC_NLP_RESOURCES)
from indicnlp import loader
loader.load()
from indicnlp.tokenize import indic_tokenize
indic_string='सुनो, कुछ आवाज़ आ रही है। फोन?'
print('Input String: {}'.format(indic_string))
print('Tokens: ')
for t in indic_tokenize.trivial_tokenize(indic_string):
print(t)
print(indic_tokenize.trivial_tokenize(indic_string))
###Output
Input String: सुनो, कुछ आवाज़ आ रही है। फोन?
Tokens:
सुनो
,
कुछ
आवाज़
आ
रही
है
।
फोन
?
['सुनो', ',', 'कुछ', 'आवाज़', 'आ', 'रही', 'है', '।', 'फोन', '?']
###Markdown
**Mounting Google Drive**
###Code
from google.colab import drive
drive.mount('/content/drive')
spacy_english = spacy.load("en")
###Output
_____no_output_____
###Markdown
**Defining Tokenizers for English (spacy) and Hindi (Indic NLP)**
###Code
def tokenize_english(text): #tokenizer for english using Spacy
return [token.text for token in spacy_english.tokenizer(text)]
sample_text = "I am, going to work"
print(tokenize_english(sample_text))
def tokenize_hindi(text): #tokenizer for hindi using Indic NLP
return indic_tokenize.trivial_tokenize(text)
sample_text = 'सुनो, कुछ आवाज़ आ रही है। फोन?'
print(tokenize_hindi(sample_text))
import pandas as pd
raw_data=pd.read_csv('/content/drive/MyDrive/AssignmentNLP/train/train.csv')
!ls '/content/drive/MyDrive/AssignmentNLP/train/train.csv'
###Output
/content/drive/MyDrive/AssignmentNLP/train/train.csv
###Markdown
**Preprocessing**
###Code
raw_data.head(6)
raw_data=raw_data.iloc[:,1:]
raw_data.head(10)
raw_data.hindi.head(10)
df = raw_data
df['hin_len'] = df['hindi'].str.count(' ')
df['eng_len'] = df['english'].str.count(' ')
df = df.query('hin_len < 80 & eng_len < 80')
df = df.query('hin_len < eng_len * 1.5 & hin_len * 1.5 > eng_len')
from sklearn.model_selection import train_test_split
# create train and validation set
train, val = train_test_split(df, test_size=0.1)
train.to_csv("/content/drive/MyDrive/train.csv", index=False)
val.to_csv("/content/drive/MyDrive/val.csv", index=False)
hindi = Field(tokenize=tokenize_hindi, lower=True,
init_token="<sos>", eos_token="<eos>")
english = Field(tokenize=tokenize_english, lower=True,
init_token="<sos>", eos_token="<eos>")
train.head(10)
from torchtext.data import TabularDataset
data_fields = [('hindi', hindi), ('english', english)]
train,val = TabularDataset.splits(path='/content/drive/MyDrive/', train='train.csv', validation='val.csv', format='csv', fields=data_fields)
###Output
_____no_output_____
###Markdown
**Creating the English and Hindi Vocabuaries**
###Code
hindi.build_vocab(train, max_size=10000, min_freq=3)
english.build_vocab(train, max_size=10000, min_freq=3)
print(f"Unique tokens in source (hi) vocabulary: {len(hindi.vocab)}")
print(f"Unique tokens in target (en) vocabulary: {len(english.vocab)}")
hindi.vocab
print(english.vocab.stoi['the'])
print(english.vocab.itos[6])
print(hindi.vocab.itos[6])
train_iter = BucketIterator(train, batch_size=20, sort_key=lambda x: len(x.hindi), shuffle=True)
#batch=next(iter(train_iter))
#print(batch.hindi)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
###Output
_____no_output_____
###Markdown
**Defining the Encoder (LSTM) architecture**
###Code
class EncoderLSTM(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(EncoderLSTM, self).__init__()
self.input_size = input_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = nn.Dropout(p)
self.tag = True
self.embedding = nn.Embedding(self.input_size, self.embedding_size)
self.LSTM = nn.LSTM(self.embedding_size, hidden_size, num_layers, dropout = p)
def forward(self, x):
embedding = self.dropout(self.embedding(x))
outputs, (hidden_state, cell_state) = self.LSTM(embedding)
return hidden_state, cell_state
input_size_encoder = len(hindi.vocab)
encoder_embedding_size = 300
hidden_size = 1240
num_layers = 2
encoder_dropout = float(0.5)
encoder_lstm = EncoderLSTM(input_size_encoder, encoder_embedding_size,hidden_size, num_layers, encoder_dropout).to(device)
###Output
_____no_output_____
###Markdown
**Defining the Decoder(LSTM) Architecture**
###Code
class DecoderLSTM(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p, output_size):
super(DecoderLSTM, self).__init__()
self.input_size = input_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.output_size = output_size
self.dropout = nn.Dropout(p)
self.tag = True
self.embedding = nn.Embedding(self.input_size, self.embedding_size)
self.LSTM = nn.LSTM(self.embedding_size, hidden_size, num_layers, dropout = p)
self.fc = nn.Linear(self.hidden_size, self.output_size)
def forward(self, x, hidden_state, cell_state):
x = x.unsqueeze(0)
embedding = self.dropout(self.embedding(x))
outputs, (hidden_state, cell_state) = self.LSTM(embedding, (hidden_state, cell_state))
predictions = self.fc(outputs)
predictions = predictions.squeeze(0)
return predictions, hidden_state, cell_state
input_size_decoder = len(english.vocab)
decoder_embedding_size = 300
hidden_size = 1240
num_layers = 2
decoder_dropout = float(0.5)
output_size = len(english.vocab)
decoder_lstm = DecoderLSTM(input_size_decoder, decoder_embedding_size, hidden_size, num_layers, decoder_dropout, output_size).to(device)
###Output
_____no_output_____
###Markdown
**Defining the Sequence-to-Sequence Model**
###Code
class Seq2Seq(nn.Module):
def __init__(self, Encoder_LSTM, Decoder_LSTM):
super(Seq2Seq, self).__init__()
self.Encoder_LSTM = Encoder_LSTM
self.Decoder_LSTM = Decoder_LSTM
def forward(self, source, target, tfr=0.5):
batch_size = source.shape[1]
target_len = target.shape[0]
target_vocab_size = len(english.vocab)
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device)
hidden_state, cell_state = self.Encoder_LSTM(source)
x = target[0]
for i in range(1, target_len):
output, hidden_state, cell_state = self.Decoder_LSTM(x, hidden_state, cell_state)
outputs[i] = output
best_guess = output.argmax(1)
x = target[i] if random.random() < tfr else best_guess
return outputs
###Output
_____no_output_____
###Markdown
**Defining Hyperparameters of the model**
###Code
learning_rate = 0.001
writer = SummaryWriter(f"runs/loss_plot")
step = 0
model = Seq2Seq(encoder_lstm, decoder_lstm).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
pad_idx = english.vocab.stoi["<pad>"]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
len(train_iter)
###Output
_____no_output_____
###Markdown
**Defining Utility funtions**
###Code
def translate_sentence(model, sentence, hindi, english, device, max_length=50):
tokens=tokenize_hindi(sentence)
tokens.insert(0, hindi.init_token)
tokens.append(hindi.eos_token)
text_to_indices = [hindi.vocab.stoi[token] for token in tokens]
sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)
with torch.no_grad():
hidden, cell = model.Encoder_LSTM(sentence_tensor)
outputs = [english.vocab.stoi["<sos>"]]
for _ in range(max_length):
previous_word = torch.LongTensor([outputs[-1]]).to(device)
with torch.no_grad():
output, hidden, cell = model.Decoder_LSTM(previous_word, hidden, cell)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
if output.argmax(1).item() == english.vocab.stoi["<eos>"]:
break
translated_sentence = [english.vocab.itos[idx] for idx in outputs]
return translated_sentence[1:]
def checkpoint_and_save(model, best_loss, epoch, optimizer, epoch_loss):
state = {'model': model,'best_loss': best_loss,'epoch': epoch,'rng_state': torch.get_rng_state(), 'optimizer': optimizer.state_dict(),}
torch.save(state, '/content/drive/MyDrive/checkpoint-week1')
torch.save(model.state_dict(),'/content/drive/MyDrive/checkpoint-state-dict-week1')
###Output
_____no_output_____
###Markdown
**Training the Model**
###Code
epoch_loss = 0.0
num_epochs = 100
best_loss = 10000000
best_epoch = -1
sentence1="वे कहते हैं कि जहाज पर आप की जरूरत है।"
ts1 = []
for epoch in range(num_epochs):
print("Epoch - {} / {}".format(epoch+1, num_epochs))
model.eval()
translated_sentence1 = translate_sentence(model, sentence1, hindi, english, device, max_length=50)
print(translated_sentence1)
ts1.append(translated_sentence1)
model.train(True)
for batch_idx, batch in enumerate(train_iter):
input = batch.hindi.to(device)
target = batch.english.to(device)
output = model(input, target)
output = output[1:].reshape(-1, output.shape[2])
target = target[1:].reshape(-1)
optimizer.zero_grad()
loss = criterion(output, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
step += 1
epoch_loss += loss.item()
writer.add_scalar("Training loss", loss, global_step=step)
if epoch_loss < best_loss:
best_loss = epoch_loss
best_epoch = epoch
checkpoint_and_save(model, best_loss, epoch, optimizer, epoch_loss)
if ((epoch - best_epoch) >= 10):
print("no improvement in 10 epochs, break")
break
print("Epoch_Loss - {}".format(loss.item()))
print()
print(epoch_loss / len(train_iter))
print('------------done---------')
#while True:pass
#checkpoint = torch.load('/content/drive/MyDrive/checkpoint-week1')
#state=torch.load('/content/drive/MyDrive/checkpoint-state-dict-week1')
#model.load_state_dict(torch.load('/content/drive/MyDrive/checkpoint-state-dict-week1'))
model.eval()
#print(checkpoint['best_loss'])
model.eval()
sentence="वे कहते हैं कि जहाज पर आप की जरूरत है।"
translated_sentence = translate_sentence(model, sentence, hindi, english, device, max_length=50)
print(translated_sentence)
#checkpoint_and_save(model, best_loss, epoch, optimizer, epoch_loss)
###Output
_____no_output_____
###Markdown
**Generating the translated sentences of the development set**
###Code
hs=pd.read_csv('/content/drive/MyDrive/AssignmentNLP/hindistatements.csv')
hs.head(6)
#raw_data=raw_data.iloc[:,1:]
#raw_data.head(10)
hs.hindi[1]
print(len(hs))
###Output
5000
###Markdown
**Defining the Eglish De-tokenizer**
###Code
op=[]
for i in range(0,5000):
sentence=hs.hindi[i]
translated_sentence = translate_sentence(model, sentence, hindi, english, device, max_length=50)
ts=''
for wd in translated_sentence:
if wd=='<eos>':
break
if wd=='<unk>':
continue
ts=ts+wd+' '
op.append(ts[:-1])
from nltk.tokenize.treebank import TreebankWordDetokenizer
op2=[]
for i in range(0,len(hs)):
sentence=hs.hindi[i]
translated_sentence = translate_sentence(model, sentence, hindi, english, device, max_length=50)
ts=TreebankWordDetokenizer().detokenize(translated_sentence)
op2.append(ts)
print(op[0])
print(op2[0])
###Output
get laughing goals flying town .
get laughing goals flying town . <eos>
###Markdown
**Saving the outputs**
###Code
ip=[]
for i in range(0,len(hs)):
sentence=hs.hindi[i]
ip.append(sentence)
#with open('/content/drive/MyDrive/AssignmentNLP/hin.txt', 'w') as f2:
# for item in op:
# f2.write("%s\n" % item)
with open('/content/drive/MyDrive/AssignmentNLP/english.txt', 'w') as f:
for item in op:
f.write("%s\n" % item)
!ls '/content/drive/MyDrive/AssignmentNLP'
while True:pass
###Output
_____no_output_____ |
autoker/02_example_2d.ipynb | ###Markdown
2. 2-D example with 1 parameters The following example shows how to construct the kernel, automatically, from a symbolic expression defining the linear differential operator in **2D**.We consider the following operator, for an unknwon *u*$$\mathcal{L}^{\phi} u := \phi u + \partial_x u + \partial_{yy} u$$
###Code
# imports
from mlhiphy.calculus import dx, dy, dz
from mlhiphy.calculus import Constant
from mlhiphy.calculus import Unknown
from mlhiphy.kernels import compute_kernel, generic_kernel
from sympy import expand
from sympy import symbols
from sympy import exp
from sympy import Tuple
x, x_i, x_j = symbols('x x_i x_j')
y, y_i, y_j = symbols('y y_i y_j')
X = Tuple(x,y)
X_i = Tuple(x_i,y_i)
X_j = Tuple(x_j,y_j)
u = Unknown('u')
phi = Constant('phi')
theta_1 = Constant('theta_1')
theta_2 = Constant('theta_2')
expr = phi * u + dx(u) + dy(dy(u))
kuu = generic_kernel(expr, u, (X_i, X_j))
from IPython.display import Math
from sympy import latex
Math(latex(expand(kuu)))
# RBF kernel
kuu = exp(- theta_1 * (x_i - x_j)**2 - theta_2 * (y_i - y_j)**2)
kuf = compute_kernel(expr, kuu, X_i)
kfu = compute_kernel(expr, kuu, X_j)
kff = compute_kernel(expr, kuu, (X_i, X_j))
Math(latex(expand(kuf)))
Math(latex(expand(kfu)))
Math(latex(expand(kff)))
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
notebooks/zarr_benchmarking_dask_plots.ipynb | ###Markdown
Look at refresh and move times as a function of data size
###Code
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_c = df_loc[df_loc['c']==1]
p1 = plt.plot(df_c['N'], df_c['refresh_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_c = df_loc[df_loc['c']==1]
p2 = plt.plot(df_c['N'], df_c['refresh_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
p3 = plt.plot(df_loc['N'], df_loc['refresh_time'], '.-', color='y');
plt.xlabel('Height and width of image in pixels')
plt.ylabel('Run time (s)');
plt.title('Refreshing current slice');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_c = df_loc[df_loc['c']==1]
p1 = plt.plot(df_c['N'], df_c['move_new_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_c = df_loc[df_loc['c']==1]
p2 = plt.plot(df_c['N'], df_c['move_new_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
p3 = plt.plot(df_loc['N'], df_loc['move_new_time'], '.-', color='y');
plt.xlabel('Height and width of image in pixels')
plt.ylabel('Run time (s)');
plt.title('Moving to a new slice in a new chunck');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_c = df_loc[df_loc['c']==1]
p1 = plt.plot(df_c['N'], df_c['move_back_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_c = df_loc[df_loc['c']==1]
p2 = plt.plot(df_c['N'], df_c['move_back_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
p3 = plt.plot(df_loc['N'], df_loc['move_back_time'], '.-', color='y');
plt.xlabel('Height and width of image in pixels')
plt.ylabel('Run time (s)');
plt.title('Moving to back to a visited slice');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_c = df_loc[df_loc['c']==2]
p1 = plt.plot(df_c['N'], df_c['move_in_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_c = df_loc[df_loc['c']==2]
p2 = plt.plot(df_c['N'], df_c['move_in_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
p3 = plt.plot(df_loc['N'], df_loc['move_in_time'], '.-', color='y');
plt.xlabel('Height and width of image in pixels')
plt.ylabel('Run time (s)');
plt.title('Moving to a new slice in the same chunck');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
###Output
_____no_output_____
###Markdown
Look at effect of chunk size
###Code
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_N = df_loc[df_loc['N']==2048]
p1 = plt.plot(df_N['c'], df_N['move_new_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_N = df_loc[df_loc['N']==2048]
p2 = plt.plot(df_N['c'], df_N['move_new_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
df_N = df_loc[df_loc['N']==2048]
p3 = plt.plot(df_N['c'], df_N['move_new_time'], '.-', color='y');
plt.xlabel('Chunk size in number of frames')
plt.ylabel('Run time (s)');
plt.title('Moving to a new chunck');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_N = df_loc[df_loc['N']==2048]
p1 = plt.plot(df_N['c'], df_N['refresh_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_N = df_loc[df_loc['N']==2048]
p2 = plt.plot(df_N['c'], df_N['refresh_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
df_N = df_loc[df_loc['N']==2048]
p3 = plt.plot(df_N['c'], df_N['refresh_time'], '.-', color='y');
plt.xlabel('Chunk size in number of frames')
plt.ylabel('Run time (s)');
plt.title('Refreshing current slice');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_N = df_loc[df_loc['N']==2048]
p1 = plt.plot(df_N['c'], df_N['move_back_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_N = df_loc[df_loc['N']==2048]
p2 = plt.plot(df_N['c'], df_N['move_back_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
df_N = df_loc[df_loc['N']==2048]
p3 = plt.plot(df_N['c'], df_N['move_back_time'], '.-', color='y');
plt.xlabel('Chunk size in number of frames')
plt.ylabel('Run time (s)');
plt.title('Moving to back to a visited slice');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='on')]
df_N = df_loc[df_loc['N']==2048]
p1 = plt.plot(df_N['c'], df_N['move_in_time'], '.-', color='b');
df_loc = df[np.logical_and(df['location']=='local', df['opp_caching']=='off')]
df_N = df_loc[df_loc['N']==2048]
p2 = plt.plot(df_N['c'], df_N['move_in_time'], '.-', color='r');
df_loc = df[df['location']=='in-memory']
df_N = df_loc[df_loc['N']==2048]
p3 = plt.plot(df_N['c'], df_N['move_in_time'], '.-', color='y');
plt.xlabel('Chunk size in number of frames')
plt.ylabel('Run time (s)');
plt.title('Moving to a new slice in the same chunck');
plt.legend((p1[0], p2[0], p3[0]), ('With caching', 'Without caching', 'In memory'));
###Output
_____no_output_____ |
32-optimizing.ipynb | ###Markdown
OptimizingRunning faster code. Vectorize [Discrete signal energy](https://en.wikipedia.org/wiki/Energy_(signal_processing):$$ E_{s} \ \ = \ \ \langle x(n), x(n)\rangle \ \ = \sum_{n=-\infty}^{\infty}{|x(n)|^2}$$can be computed as a particular case of the [dot product](https://en.wikipedia.org/wiki/Dot_product):$$ \langle x(n), y(n)\rangle \ \ = \sum_{n=-\infty}^{\infty}{x(n)y(n)}$$where both signals are the same.
###Code
import numpy as np
def non_vectorized_dot_product(x, y):
"""Return the sum of x[i] * y[j] for all pairs of indices i, j.
Example:
>>> my_dot_product(np.arange(20), np.arange(20))
"""
result = 0
for i in range(len(x)):
result += x[i] * y[i]
return result
signal = np.random.random(1000)
print(signal)
%timeit non_vectorized_dot_product(signal, signal)
non_vectorized_dot_product(signal, signal)
###Output
_____no_output_____
###Markdown
Now, using Numpy's array multiplication and sum:
###Code
%timeit np.sum(signal*signal)
np.sum(signal*signal)
###Output
_____no_output_____
###Markdown
Another example to see that vectorization not only involves pure computation:
###Code
# https://softwareengineering.stackexchange.com/questions/254475/how-do-i-move-away-from-the-for-loop-school-of-thought
def cleanup(x, missing=-1, value=0):
"""Return an array that's the same as x, except that where x ==
missing, it has value instead.
>>> cleanup(np.arange(-3, 3), value=10)
... # doctest: +NORMALIZE_WHITESPACE
array([-3, -2, 10, 0, 1, 2])
"""
result = []
for i in range(len(x)):
if x[i] == missing:
result.append(value)
else:
result.append(x[i])
return np.array(result)
array = np.arange(-8,8)
print(array)
print(cleanup(array, value=10, missing=0))
array = np.arange(-1000,1000)
%timeit cleanup(array, value=10, missing=0)
print(array[995:1006])
print(cleanup(array, value=10, missing=0)[995:1006])
# http://www.secnetix.de/olli/Python/list_comprehensions.hawk
# https://docs.python.org/3/library/functions.html#zip
value = [10]*2000
%timeit [xv if c else yv for (c,xv,yv) in zip(array == 0, value, array)]
print([xv if c else yv for (c,xv,yv) in zip(array == 0, value, array)][995:1006])
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.where.html
%timeit np.where(array == 0, 10, array)
print(np.where(array == 0, 10, array)[995:1006])
###Output
_____no_output_____
###Markdown
Use in-place operations
###Code
a = np.random.random(500000)
print(a[0:10])
b = np.copy(a)
%timeit global a; a = 10*a
a = 10*a
print(a[0:10])
a = np.copy(b)
print(a[0:10])
%timeit global a ; a *= 10
a *= 10
print(a[0:10])
###Output
_____no_output_____
###Markdown
Maximize locality in memory acess
###Code
a = np.random.rand(100,50)
b = np.copy(a)
def mult(x, val):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i][j] /= val
%timeit -n 1 -r 1 mult(a, 10)
a = np.copy(b)
def mult2(x, val):
for j in range(x.shape[1]):
for i in range(x.shape[0]):
x[i][j] /= val
%timeit -n 1 -r 1 mult2(a, 10)
# http://www.scipy-lectures.org/advanced/optimizing/
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html
c = np.zeros((1000, 1000), order='C')
%timeit c.sum(axis=0)
c.sum(axis=0).shape
%timeit c.sum(axis=1)
c.sum(axis=1).shape
###Output
_____no_output_____
###Markdown
Delegating in CWhen you want to speed-up your code or simply when you need to reuse C code, it is possible to use it from Python. There are several alternatives:1. [Cython](http://cython.org/): A superset of Python to allow you call C functions and load Python variables with C ones. 2. [SWIG (Simplified Wrapper Interface Generator)](http://www.swig.org/): A software development tool to connect C/C++ programs with other languages (included Python).3. [Ctypes](http://python.net/crew/theller/ctypes/): A Python package that can be used to call shared libraries (`.ddl`/`.so`/`.dylib`) from Python.4. [Python-C-API](https://docs.python.org/3.6/c-api/index.html): A low-level interface between (compiled) C code and Python.We will show how to use Python-C-API because is the most flexible and efficient alternative. However, it is also the hardest to code. The C code to reuse in Python
###Code
!cat sum_array_lib.c
!cat sum_array.c
!gcc -O3 sum_array.c -o sum_array
!./sum_array
###Output
_____no_output_____
###Markdown
The module
###Code
!cat sum_array_module.c
###Output
_____no_output_____
###Markdown
Module compilation
###Code
!cat setup.py
!python setup.py build_ext --inplace
import sum_array_module
import numpy as np
a = np.arange(100000)
%timeit sum_array_module.sumArray(a)
###Output
_____no_output_____
###Markdown
However, remember: vectorize when possible!
###Code
%timeit np.sum(a)
###Output
_____no_output_____ |
linked_lists/add_reverse/add_reverse_challenge.ipynb | ###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume this is a non-circular, singly linked list? * Yes* Do we expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes* Can we assume this fits in memory? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume this is a non-circular, singly linked list? * Yes* Do we expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes* Can we assume this fits in memory? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
# %load ../linked_list/linked_list.py
class Node(object):
def __init__(self, data, next=None):
self.next = next
self.data = data
def __str__(self):
return self.data
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def __len__(self):
curr = self.head
counter = 0
while curr is not None:
counter += 1
curr = curr.next
return counter
def insert_to_front(self, data):
if data is None:
return None
node = Node(data, self.head)
self.head = node
return node
def append(self, data):
if data is None:
return None
node = Node(data)
if self.head is None:
self.head = node
return node
curr_node = self.head
while curr_node.next is not None:
curr_node = curr_node.next
curr_node.next = node
return node
def find(self, data):
if data is None:
return None
curr_node = self.head
while curr_node is not None:
if curr_node.data == data:
return curr_node
curr_node = curr_node.next
return None
def delete(self, data):
if data is None:
return
if self.head is None:
return
if self.head.data == data:
self.head = self.head.next
return
prev_node = self.head
curr_node = self.head.next
while curr_node is not None:
if curr_node.data == data:
prev_node.next = curr_node.next
return
prev_node = curr_node
curr_node = curr_node.next
def delete_alt(self, data):
if data is None:
return
if self.head is None:
return
curr_node = self.head
if curr_node.data == data:
curr_node = curr_node.next
return
while curr_node.next is not None:
if curr_node.next.data == data:
curr_node.next = curr_node.next.next
return
curr_node = curr_node.next
def print_list(self):
curr_node = self.head
while curr_node is not None:
print(curr_node.data)
curr_node = curr_node.next
def get_all_data(self):
data = []
curr_node = self.head
while curr_node is not None:
data.append(curr_node.data)
curr_node = curr_node.next
return data
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
Test: Empty list(s)
Test: Add values of different lengths
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Do you expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume this is a non-circular, singly linked list? * Yes* Do we expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes* Can we assume this fits in memory? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
import unittest
class TestAddReverse(unittest.TestCase):
def test_add_reverse(self):
print('Test: Empty list(s)')
self.assertEqual(MyLinkedList().add_reverse(None, None), None)
self.assertEqual(MyLinkedList().add_reverse(Node(5), None), None)
self.assertEqual(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
self.assertEqual(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
self.assertEqual(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume this is a non-circular, singly linked list? * Yes* Do we expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes* Can we assume this fits in memory? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Add two numbers whose digits are stored in a linked list in reverse order.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Can we assume this is a non-circular, singly linked list? * Yes* Do we expect the return to be in reverse order too? * Yes* What if one of the inputs is None? * Return None for an invalid operation* How large are these numbers--can they fit in memory? * Yes* Can we assume we already have a linked list class that can be used for this problem? * Yes* Can we assume this fits in memory? * Yes Test Cases* Empty list(s) -> None* Add values of different lengths * Input 1: 6->5->None * Input 2: 9->8->7 * Result: 5->4->8* Add values of same lengths * Exercised from values of different lengths * Done here for completeness AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
# %load ../linked_list/linked_list.py
class Node(object):
def __init__(self, data, next=None):
self.next = next
self.data = data
def __str__(self):
return self.data
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def __len__(self):
curr = self.head
counter = 0
while curr is not None:
counter += 1
curr = curr.next
return counter
def insert_to_front(self, data):
if data is None:
return None
node = Node(data, self.head)
self.head = node
return node
def append(self, data):
if data is None:
return None
node = Node(data)
if self.head is None:
self.head = node
return node
curr_node = self.head
while curr_node.next is not None:
curr_node = curr_node.next
curr_node.next = node
return node
def find(self, data):
if data is None:
return None
curr_node = self.head
while curr_node is not None:
if curr_node.data == data:
return curr_node
curr_node = curr_node.next
return None
def delete(self, data):
if data is None:
return
if self.head is None:
return
if self.head.data == data:
self.head = self.head.next
return
prev_node = self.head
curr_node = self.head.next
while curr_node is not None:
if curr_node.data == data:
prev_node.next = curr_node.next
return
prev_node = curr_node
curr_node = curr_node.next
def delete_alt(self, data):
if data is None:
return
if self.head is None:
return
curr_node = self.head
if curr_node.data == data:
curr_node = curr_node.next
return
while curr_node.next is not None:
if curr_node.next.data == data:
curr_node.next = curr_node.next.next
return
curr_node = curr_node.next
def print_list(self):
curr_node = self.head
while curr_node is not None:
print(curr_node.data)
curr_node = curr_node.next
def get_all_data(self):
data = []
curr_node = self.head
while curr_node is not None:
data.append(curr_node.data)
curr_node = curr_node.next
return data
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test **The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
###Output
Test: Empty list(s)
Test: Add values of different lengths
|
examples/1.1 - Introductions to PyDP.ipynb | ###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.1 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Supports Python 3.6 only (more support coming soon)* Current implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
import pandas as pd
import statistics # for calculating mean without applying differential privacy
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.BoundedMean.result()` takes a List of integer/ float as an input and returns the list
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = dp.BoundedMean(privacy_budget)
return x.result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 71.27272727272728
###Markdown
Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`dp.CountInt.result()` takes a List of integer/ float as an input and returns the list
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = dp.CountInt(privacy_budget)
return x.result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.Max.result()` takes a List of integer/ float as an input and returns the list.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = dp.Max(privacy_budget, 0, 150)
return x.result(list(df["carrots_eaten"]), privacy_budget)
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 78.0
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).dp.BoundedSum.result() takes a List of integer/ float as an input and returns the list.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = dp.BoundedSum(privacy_budget)
return x.result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9472.0
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.1 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Supports Python 3.6 only (more support coming soon)* Current implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
import pandas as pd
import statistics # for calculating mean without applying differential privacy
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking Mean of all the entries in a normal fashion without Applying the DP library. This is the actual mean of all the records.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.BoundedMean.result()` takes a List of integer/ float as an input and returns the list
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = dp.BoundedMean(privacy_budget)
return x.result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Mean varries compares to the Mean calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 72.34782608695653
###Markdown
Counts number of animals who ate more than 'limit' carrots without Applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`dp.CountInt.result()` takes a List of integer/ float as an input and returns the list
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = dp.CountInt(privacy_budget)
return x.result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varries compares to the Count calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.Max.result()` takes a List of integer/ float as an input and returns the list.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = dp.Max(privacy_budget, 0, 150)
return x.result(list(df["carrots_eaten"]), privacy_budget)
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varries compares to the Max calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 68.0
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.1 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Supports Python 3.6 only (more support coming soon)* Current implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
import pandas as pd
import statistics # for calculating mean without applying differential privacy
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking Mean of all the entries in a normal fashion without Applying the DP library. This is the actual mean of all the records.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.BoundedMean.result()` takes a List of integer/ float as an input and returns the list
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = dp.BoundedMean(privacy_budget)
return x.result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Mean varries compares to the Mean calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 71.27272727272728
###Markdown
Counts number of animals who ate more than 'limit' carrots without Applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`dp.CountInt.result()` takes a List of integer/ float as an input and returns the list
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = dp.CountInt(privacy_budget)
return x.result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varries compares to the Count calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`dp.Max.result()` takes a List of integer/ float as an input and returns the list.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = dp.Max(privacy_budget, 0, 150)
return x.result(list(df["carrots_eaten"]), privacy_budget)
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varries compares to the Max calculted using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 78.0
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).dp.BoundedSum.result() takes a List of integer/ float as an input and returns the list.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = dp.BoundedSum(privacy_budget)
return x.result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9472.0
|
experiments/baseline_ptn/oracle.run2.framed/trials/1/trial.ipynb | ###Markdown
PTN TemplateThis notebook serves as a template for single dataset PTN experiments It can be run on its own by setting STANDALONE to True (do a find for "STANDALONE" to see where) But it is intended to be executed as part of a *papermill.py script. See any of the experimentes with a papermill script to get started with that workflow.
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
###Output
_____no_output_____
###Markdown
Required ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
###Code
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"labels_source",
"labels_target",
"domains_source",
"domains_target",
"num_examples_per_domain_per_label_source",
"num_examples_per_domain_per_label_target",
"n_shot",
"n_way",
"n_query",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_transforms_source",
"x_transforms_target",
"episode_transforms_source",
"episode_transforms_target",
"pickle_name",
"x_net",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"torch_default_dtype"
}
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.0001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["num_examples_per_domain_per_label_source"]=100
standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 100
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "target_accuracy"
standalone_parameters["x_transforms_source"] = ["unit_power"]
standalone_parameters["x_transforms_target"] = ["unit_power"]
standalone_parameters["episode_transforms_source"] = []
standalone_parameters["episode_transforms_target"] = []
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# uncomment for CORES dataset
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
standalone_parameters["labels_source"] = ALL_NODES
standalone_parameters["labels_target"] = ALL_NODES
standalone_parameters["domains_source"] = [1]
standalone_parameters["domains_target"] = [2,3,4,5]
standalone_parameters["pickle_name"] = "cores.stratified_ds.2022A.pkl"
# Uncomment these for ORACLE dataset
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# standalone_parameters["labels_source"] = ALL_SERIAL_NUMBERS
# standalone_parameters["labels_target"] = ALL_SERIAL_NUMBERS
# standalone_parameters["domains_source"] = [8,20, 38,50]
# standalone_parameters["domains_target"] = [14, 26, 32, 44, 56]
# standalone_parameters["pickle_name"] = "oracle.frame_indexed.stratified_ds.2022A.pkl"
# standalone_parameters["num_examples_per_domain_per_label_source"]=1000
# standalone_parameters["num_examples_per_domain_per_label_target"]=1000
# Uncomment these for Metahan dataset
# standalone_parameters["labels_source"] = list(range(19))
# standalone_parameters["labels_target"] = list(range(19))
# standalone_parameters["domains_source"] = [0]
# standalone_parameters["domains_target"] = [1]
# standalone_parameters["pickle_name"] = "metehan.stratified_ds.2022A.pkl"
# standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# standalone_parameters["num_examples_per_domain_per_label_source"]=200
# standalone_parameters["num_examples_per_domain_per_label_target"]=100
standalone_parameters["n_way"] = len(standalone_parameters["labels_source"])
# Parameters
parameters = {
"experiment_name": "baseline_ptn_oracle.run2.framed",
"lr": 0.001,
"device": "cuda",
"seed": 1337,
"dataset_seed": 1337,
"labels_source": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"labels_target": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"x_transforms_source": [],
"x_transforms_target": [],
"episode_transforms_source": [],
"episode_transforms_target": [],
"num_examples_per_domain_per_label_source": 1000,
"num_examples_per_domain_per_label_target": 1000,
"n_shot": 3,
"n_way": 16,
"n_query": 2,
"train_k_factor": 1,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float64",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_loss",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"pickle_name": "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl",
"domains_source": [8, 32, 50],
"domains_target": [14, 20, 26, 38, 44],
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
# (This is due to the randomized initial weights)
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
###################################
# Build the dataset
###################################
if p.x_transforms_source == []: x_transform_source = None
else: x_transform_source = get_chained_transform(p.x_transforms_source)
if p.x_transforms_target == []: x_transform_target = None
else: x_transform_target = get_chained_transform(p.x_transforms_target)
if p.episode_transforms_source == []: episode_transform_source = None
else: raise Exception("episode_transform_source not implemented")
if p.episode_transforms_target == []: episode_transform_target = None
else: raise Exception("episode_transform_target not implemented")
eaf_source = Episodic_Accessor_Factory(
labels=p.labels_source,
domains=p.domains_source,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_source,
example_transform_func=episode_transform_source,
)
train_original_source, val_original_source, test_original_source = eaf_source.get_train(), eaf_source.get_val(), eaf_source.get_test()
eaf_target = Episodic_Accessor_Factory(
labels=p.labels_target,
domains=p.domains_target,
num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_target,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name),
x_transform_func=x_transform_target,
example_transform_func=episode_transform_target,
)
train_original_target, val_original_target, test_original_target = eaf_target.get_train(), eaf_target.get_val(), eaf_target.get_test()
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
# Some quick unit tests on the data
from steves_utils.transforms import get_average_power, get_average_magnitude
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_source))
assert q_x.dtype == eval(p.torch_default_dtype)
assert s_x.dtype == eval(p.torch_default_dtype)
print("Visually inspect these to see if they line up with expected values given the transforms")
print('x_transforms_source', p.x_transforms_source)
print('x_transforms_target', p.x_transforms_target)
print("Average magnitude, source:", get_average_magnitude(q_x[0].numpy()))
print("Average power, source:", get_average_power(q_x[0].numpy()))
q_x, q_y, s_x, s_y, truth = next(iter(train_processed_target))
print("Average magnitude, target:", get_average_magnitude(q_x[0].numpy()))
print("Average power, target:", get_average_power(q_x[0].numpy()))
###################################
# Build the model
###################################
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=(2,256))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/BodySegmentParameters-checkpoint.ipynb | ###Markdown
Body segment parameters> Marcos Duarte > Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/)) > Federal University of ABC, Brazil "Le proporzioni del corpo umano secondo Vitruvio", also known as the Vitruvian Man, drawing by Leonardo da Vinci circa 1490 based on the work of Marcus Vitruvius Pollio (1st century BC), depicting a man in supposedly ideal human proportions (image from Wikipedia).Body segment parameters (BSP) of the human body, such as length, area, volume, mass, density, center of mass, moment of inertia, and center of volume, are fundamental for the application of mechanics to the understanding of human movement. Anthropometry is the field concerned with the study of such measurements of the human body. Frequently, one cannot measure most of these parameters of each segment of an individual and these quantities are estimated by indirect methods. The main indirect methods are based in data of cadavers (e.g. Dempster's model), body image scanning of living subjects (e.g., Zatsiorsky-Seluyanov's model), and geometric measurements (e.g., Hanavan's model). See [http://kwon3d.com/theory/bsp.html](http://kwon3d.com/theory/bsp.html) and [Human Body Properties Database: Body Segment Parameters](https://www.dh.aist.go.jp/database/properties/index-e.html) for more information on these models. For a review on different methods employed in the determination of BSP, see Drills et al. (1964), Contini (1972), Bjørnstrup (1995), Zatsiorsky (2002) and Nigg and Herzog (2006). There is at least one Python library for the calculation of human body segment parameters, see Dembia et al. (2014), it implements the Yeadon human inertia geometric model. Estimation of body segment parametersNext, let's look on how to estimate some of the BSP using the anthropometric model of Dempster (1955) with some parameters adapted by Winter (2009) and the model of Zatsiorsky and Seluyanov (Zatsiorsky, 2002), from now on, Zatsiorsky, with parameters adjusted by de Leva (1996).For a table with BSP values, also referred as anthropometric table, typically: + The mass of each segment is given as fraction of the total body mass. + The center of mass (CM) position in the sagittal plane of each segment is given as fraction of the segment length with respect to the proximal or distal joint position.+ The radius of gyration (Rg) around the transverse axis (rotation at the sagittal plane) and around other axes of each segment is given as fraction of the segment length with respect to (w.r.t.) the center of mass or w.r.t. the proximal or w.r.t. the distal joint position.First let's look at the Dempster's and Zatsiorsky's anthropometric tables before presenting mathematical definitions for these parameters.
###Code
# Import the necessary libraries
from IPython.display import display, Math, Latex
import numpy as np
%matplotlib notebook
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('max_colwidth', 100)
###Output
_____no_output_____
###Markdown
Dempster's model adapted by Winter
###Code
BSP_Dmarks = pd.read_csv('./../data/BSPlandmarks_Dempster.txt', sep='\t')
display(Latex('BSP segments from Dempster\'s model adapted by Winter (2009):'))
display(BSP_Dmarks)
bsp_D = pd.read_csv('./../data/BSP_DempsterWinter.txt', index_col=0, sep='\t')
display(Latex('BSP values from Dempster\'s model adapted by Winter (2009):'))
display(bsp_D)
###Output
_____no_output_____
###Markdown
Zatsiorsky's model adjusted by de LevaThe segments defined in the Zatsiorsky's model (Zatsiorsky, 2002) adjusted by de Leva (1996) are illustrated in the next figure. Figure. Segment definition employed in the anthropometric model of Zatsiorsky and Seluyanov (Zatsiorsky, 2002) adjusted by de Leva (1996).Image from a Motion Analysis Corporation manual.
###Code
BSP_Zmarks = pd.read_csv('./../data/BSPlandmarks_ZdeLeva.txt', sep='\t')
display(Latex('BSP landmarks from Zatsiorsky\'s model' +
' adjusted by de Leva (1996):'))
display(BSP_Zmarks)
bsp_Zf = pd.read_csv('./../data/BSPfemale_ZdeLeva.txt', index_col=0, sep='\t')
display(Latex('BSP female values from Zatsiorsky\'s model' +
' adjusted by de Leva (1996):'))
display(bsp_Zf)
bsp_Zm = pd.read_csv('./../data/BSPmale_ZdeLeva.txt', index_col=0, sep='\t')
display(Latex('BSP male values from Zatsiorsky\'s model' +
' adjusted by de Leva (1996):'))
display(bsp_Zm)
###Output
_____no_output_____
###Markdown
Differences between the anthropometric models from Dempster and ZatsiorskyThe anthropometric models from Dempster and Zatsiorsky are different in many aspects: Dempster's model is based on the data of 8 cadavers of older male individuals (but two of the individuals were of unknown age) analyzed in the United States. Zatsiorsky's model is based on image scanning of 100 young men and 15 young women, at the time all students of a military school in the former Soviet Union.The difference between models for some segments is huge (see table below): the mass fraction of the thigh segment for Zatsiorsky's model is more than 40% larger than for the Dempster's model, inversely, the trunk segment has about 15% lower mass fraction for Zatisorsky's model. Also, note that some of the segments don't have the same definition in the two models.
###Code
m_D = bsp_D.loc[['Foot', 'Leg', 'Thigh', 'Pelvis', 'Abdomen', 'Thorax', 'Trunk',
'Upper arm', 'Forearm', 'Hand', 'Head neck'], 'Mass']
m_Zf = bsp_Zf.loc[['Foot', 'Shank', 'Thigh', 'Lower trunk', 'Middle trunk', 'Upper trunk',
'Trunk', 'Upper arm', 'Forearm', 'Hand', 'Head'], 'Mass']
m_Zm = bsp_Zm.loc[['Foot', 'Shank', 'Thigh', 'Lower trunk', 'Middle trunk', 'Upper trunk',
'Trunk', 'Upper arm', 'Forearm', 'Hand', 'Head'], 'Mass']
m_D.index = m_Zf.index # because of different names for some segments
display(Latex("Mass fraction difference (in %) of Zatsiorsky's model w.r.t. Dempster's model"))
d = pd.DataFrame({'Females': np.around(100 * (m_Zf - m_D) / m_D), \
'Males': np.around(100 * (m_Zm - m_D) / m_D)})
display(d)
###Output
_____no_output_____
###Markdown
Center of mass[Center of mass](https://en.wikipedia.org/wiki/Center_of_mass) is the average position of the masses of a system. More formally, the center of mass of an object is the unique point at the center of a distribution of mass in space that has the property that the weighted position vectors relative to this point sum to zero. By [mass](https://en.wikipedia.org/wiki/Mass) we mean the inertial mass, a quantitative measure of an object's resistance to acceleration. The mathematical definition for the position of the center of mass, $\mathbf{r}_{cm}$, of a system with N objects, each with mass $m_i$ and position $\mathbf{r}_i$ is:$$ \sum_{i=1}^N m_{i}(\mathbf{r}_i - \mathbf{r}_{cm}) = 0 $$Solving this equation for $\mathbf{r}_{cm}$, we obtain:$$ \mathbf{r}_{cm} = \frac{1}{M}\sum_{i=1}^N m_{i}\mathbf{r}_i \quad \text{where} \quad M = \sum_{i=1}^N m_{i} $$ If we rearrange the equation above, we can see that the center of mass is the unique point that when multiplied by the total mass is equal to the sum of each mass multiplied by its corresponding position:$$ M\mathbf{r}_{cm} = \sum_{i=1}^N m_{i}\mathbf{r}_i $$Using this property, several problems in mechanics can be solved considering only the total mass and the center of mass instead of each particle of a system.A related quantity is the [center of gravity](https://en.wikipedia.org/wiki/Center_of_massCenter_of_gravity), which is the point in an object around which the resultant torque due to gravity forces vanishes. Near the Earth's surface, where the gravity force acts downward as a parallel force field, for an object with dimensions much smaller that the Earth radius, the positions of the center of gravity and of the center of mass are the same for practical purposes in biomechanics.Using the data of the body segment parameters table, the center of mass of a single segment $i$ is (see figure below):$$r_{i} = r_{i,p} + \text{bsp[i,cmp]} * (r_{i,d}-r_{i,p})$$Where $r_{i,p}$ and $\:r_{i,d}$ are the positions of the proximal and distal landmarks used to define the $i$ segment. Note that $r$ is a vector and may have more than one dimension. The equation for the center of mass is valid in each direction and the calculations are performed independently in each direction. In addition, there is no need to include the mass of the segment in the equation above; the mass of the segment is used only when there is more than one segment. For example, given the following coordinates ($x, y$) for the MT2, ankle, knee and hip joints:
###Code
r = np.array([[101.1, 1.3], [84.9, 11.0], [86.4, 54.9], [72.1, 92.8]])/100
display(np.around(r, 3))
###Output
_____no_output_____
###Markdown
The position of the center of mass of each segment and of the lower limb are:
###Code
M = bsp_D.loc[['Foot', 'Leg', 'Thigh'], 'Mass'].sum()
rcm_foot = r[1] + bsp_D.loc['Foot', 'CM prox']*(r[0]-r[1])
rcm_leg = r[2] + bsp_D.loc['Leg', 'CM prox']*(r[1]-r[2])
rcm_thigh = r[3] + bsp_D.loc['Thigh','CM prox']*(r[2]-r[3])
rcm = (bsp_D.loc['Foot','Mass']*rcm_foot + bsp_D.loc['Leg','Mass']*rcm_leg + \
bsp_D.loc['Thigh','Mass']*rcm_thigh)/M
print('Foot CM: ', np.around(rcm_foot, 3), 'm')
print('Leg CM: ', np.around(rcm_leg, 3), 'm')
print('Thigh CM: ', np.around(rcm_thigh, 3), 'm')
print('Lower limb CM: ', np.around(rcm, 3), 'm')
###Output
Foot CM: [0.93 0.062] m
Leg CM: [0.858 0.359] m
Thigh CM: [0.783 0.764] m
Lower limb CM: [0.818 0.584] m
###Markdown
And here is a geometric representation of part of these calculations:
###Code
plt.rc('axes', labelsize=14, linewidth=1.5)
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('lines', markersize=8)
hfig, hax = plt.subplots(1, 1, figsize=(9, 5))
# bones and joints
plt.plot(r[:,0], r[:,1], 'b-')
plt.plot(r[:,0], r[:,1], 'ko', label='joint')
# center of mass of each segment
plt.plot(rcm_foot[0], rcm_foot[1], 'go', label='segment center of mass')
plt.plot(rcm_leg[0], rcm_leg[1], 'go', rcm_thigh[0], rcm_thigh[1], 'go')
# total center of mass
plt.plot(rcm[0], rcm[1], 'ro', label='total center of mass')
hax.legend(frameon=False, loc='upper left', fontsize=12, numpoints=1)
plt.arrow(0, 0, r[3,0], r[3,1], 'b', head_width=0.02, overhang=.5, fc="k", ec="k", lw=2,
length_includes_head=True)
plt.arrow(r[3,0], r[3,1], rcm_thigh[0] - r [3,0], rcm_thigh[1] - r[3,1], head_width=0.02,
overhang=.5, fc="b", ec="b", lw=2, length_includes_head=True)
plt.arrow(0, 0, rcm_thigh[0], rcm_thigh[1], head_width=0.02, overhang=.5, fc="g", ec="g",
lw=2, length_includes_head=True)
plt.text(0.30, .57, '$\mathbf{r}_{thigh,p}$', rotation=38, fontsize=16)
plt.text(0.77, .85, '$bsp_{thigh,cmp}*(\mathbf{r}_{i,d}-\mathbf{r}_{i,p})$',
fontsize=16, color='b')
plt.text(0.1, .55,
'$\mathbf{r}_{thigh,cm}=\mathbf{r}_{i,p}+bsp_{i,cmp}*' +
'(\mathbf{r}_{i,d}-\mathbf{r}_{i,p})$',
rotation=29, fontsize=16, color='g')
hax.set_xlim(0,1.1)
hax.set_ylim(0,1.05)
hax.set_xlabel('x [m]')
hax.set_ylabel('y [m]')
hax.set_title('Determination of center of mass', fontsize=16)
hax.grid()
###Output
_____no_output_____
###Markdown
Center of mass of a body with continuous distribution of massIf the mass distribution of the body is continuous over a volume and each element of mass has [density](https://en.wikipedia.org/wiki/Density), mass per unit volume, $\rho(r),\:$ the position of the center of mass is given by:$$ \mathbf{r}_{cm} \;=\; \frac{1}{M}\int_0^M \mathbf{r}\:dm \;=\; \frac{1}{M}\int_0^V \mathbf{r}\rho(r)\:dV $$For example, the center of mass of a uniform rod with total mass M and length L along to its main axis x is:$$ \mathbf{r}_{cm} \;=\; \frac{1}{M}\int_0^L x\frac{M}{L}\:dx = \frac{1}{M}\frac{M}{L}\left.\frac{x^2}{2}\right|_0^L = \frac{L}{2} $$For a rod with M = 1 kg and L = 1 m, $\mathbf{r}_{cm}$ = 0.5 m. Center of volume Analogous to the center of mass, the center of volume is the unique point at the center of a distribution of volume in space that has the property that the weighted position vectors relative to this point sum to zero. If the density of the object doesn't vary, center of volume and center of mass are in the same position. Center of buoyancy [Center of buoyancy](https://en.wikipedia.org/wiki/Buoyancy) is the center of the volume of water which the submerged part of an object displaces. Center of buoyancy is to center of volume as center of gravity is to center of mass. Moment of inertia[Moment of inertia](https://en.wikipedia.org/wiki/Moment_of_inertia) (or rotational inertia), analogous to mass, is a quantitative measure of the resistance to rotational acceleration about an axis of a distribution of mass in space. The mathematical definition for the moment of inertia, $I$, of a system with N objects rotating around an axis, each with mass $m_i$ and distance $r_i$ to this axis is:$$ I = \sum_{i=1}^N m_{i}r_{i}^2 $$This equation for the moment of inertia can be intuitively deduced if we consider the kinetic energy of a system with $N$ particles each at a distance $r_i$ all rotating around a fixed axis:$$ E_k = \frac{1}{2}\sum_{i=1}^N m_{i}v_{i}^2 = \frac{1}{2}\sum_{i=1}^N m_{i}(\omega_ir_{i})^2 = \frac{1}{2}\omega^2\sum_{i=1}^N m_{i}r_{i}^2 $$The term multiplying the rotational speed at the rightmost equation is the system's moment of inertia for rotation.The value of the moment of inertia is a single scalar for a two-dimensional object or a tensor (a symmetric 3×3 inertia matrix) for a three-dimensional object (we will see that later). Radius of gyration[Radius of gyration](https://en.wikipedia.org/wiki/Radius_of_gyration) is the distance from the axis that all mass can be concentrated to obtain the same moment of inertia of the object. The mathematical definition for the radius of gyration, $R_g$, is:$$ I = M R_g^2 \quad \Rightarrow \quad R_g = \sqrt{\frac{I}{M}} = \sqrt{\frac{\sum_{i=1}^N m_{i}r_{i}^2}{\sum_{i=1}^N m_{i}}} $$The radius of gyration (as a fraction of the segment length) is the quantity that is given in the table of body segment parameters. Because of that, we don't need to sum each element of mass of the segment to calculate its moment of inertia; we just need to take the mass of the segment times the radius or gyration squared.Using the body segment parameters, the moment of inertia of a single segment $i$ rotating around its own center of mass is (see figure below):$$ I_{i,cm} = M * \text{bsp[i,mass]} * \left(\text{bsp[i,rgcm]} * ||r_{i,d}-r_{i,p}||\right)\:^2 $$Where $M$ is the total body mass of the subject and $||r_{i,d}-r_{i,p}||$ is the length of the segment $i$.For example, the moment of inertia of each segment of the lower limb around each corresponding segment center of mass considering the coordinates (x, y) for the MT2, ankle, knee and hip joints given above are:
###Code
norm = np.linalg.norm
M = 100 # body mass
Icm_foot = M*bsp_D.loc['Foot', 'Mass']*((bsp_D.loc['Foot', 'Rg CM']*norm(r[0]-r[1]))**2)
Icm_leg = M*bsp_D.loc['Leg', 'Mass']*((bsp_D.loc['Leg', 'Rg CM']*norm(r[1]-r[2]))**2)
Icm_thigh = M*bsp_D.loc['Thigh','Mass']*((bsp_D.loc['Thigh','Rg CM']*norm(r[2]-r[3]))**2)
print('Icm foot: ', np.around(Icm_foot, 3), 'kgm2')
print('Icm leg: ', np.around(Icm_leg, 3), 'kgm2')
print('Icm thigh: ', np.around(Icm_thigh, 3), 'kgm2')
###Output
Icm foot: 0.012 kgm2
Icm leg: 0.082 kgm2
Icm thigh: 0.171 kgm2
###Markdown
Parallel axis theoremThe value of the moment of inertia is by definition dependent of the distance of the mass to the axis of rotation. This is the reason the radius of gyration in the table above is given for three different axis (center of mass and proximal and distal joint positions). In case we want the moment of inertia of an object around a different parallel axis than the axis passing through the center of mass, we can use the [parallel axis theorem](https://en.wikipedia.org/wiki/Parallel_axis_theorem), which states that the moment of inertia about a parallel axis is the moment of inertia around the center of mass plus the moment of inertia of the body treated as a point mass at this new axis location:$$ I_{new\:axis}\;=\;I_{cm}\;+\;md_{new\:axis\:to\:cm}^2 $$The sum at the right side involves two positive terms; this means that the smallest possible moment of inertia is around the center of mass.For example, using the parallel axis theorem the moment of inertia of the lower limb around its center of mass is:
###Code
Icmll = (Icm_foot + M*bsp_D.loc['Foot', 'Mass']*norm(rcm-rcm_foot )**2 + \
Icm_leg + M*bsp_D.loc['Leg', 'Mass']*norm(rcm-rcm_leg )**2 + \
Icm_thigh + M*bsp_D.loc['Thigh','Mass']*norm(rcm-rcm_thigh)**2)
print('Icm lower limb: ', np.around(Icmll, 3), 'kgm2')
###Output
Icm lower limb: 1.257 kgm2
###Markdown
To calculate the moment of inertia of the lower limb around the hip, we use again the parallel axis theorem:
###Code
Ihipll = (Icm_foot + M*bsp_D.loc['Foot', 'Mass']*norm(r[3]-rcm_foot )**2 + \
Icm_leg + M*bsp_D.loc['Leg', 'Mass']*norm(r[3]-rcm_leg )**2 + \
Icm_thigh + M*bsp_D.loc['Thigh','Mass']*norm(r[3]-rcm_thigh)**2)
print('Ihip lower limb: ', np.around(Ihipll, 3), 'kgm2')
###Output
Ihip lower limb: 3.317 kgm2
###Markdown
Note that for the correct use of the parallel axis theorem we have to input the moment of inertia around the center of mass of each body. For example, we CAN NOT calculate the moment of inertia around the hip with the moment of inertia of the entire lower limb:
###Code
# THIS IS WRONG:
I = (Icm_foot + M*bsp_D.loc['Foot', 'Mass']*norm(r[3]-rcm)**2 + \
Icm_leg + M*bsp_D.loc['Leg', 'Mass']*norm(r[3]-rcm)**2 + \
Icm_thigh + M*bsp_D.loc['Thigh','Mass']*norm(r[3]-rcm)**2)
print('Icm lower limb: ', np.around(I, 3), 'kgm2. THIS IS WRONG!')
###Output
Icm lower limb: 2.324 kgm2. THIS IS WRONG!
|
notebooks/01-saxs-theory-p1/04-drawing-3d-images.ipynb | ###Markdown
4. Drawing Sphere as 3D Image 4.1 using matplotlib only* You can draw 3D images with ax.voxels() method.* However, using ax.voxels() can be problematic because it is slow.* One solution to this problem will be given in the next section below.
###Code
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib widget
###Output
_____no_output_____
###Markdown
* Note that we have chosen a smaller number of grids, i.e. N = 32, to reduce the total 3D data volume.
###Code
N = 32
100**2, N**3
###Output
_____no_output_____
###Markdown
* Note that we are using %%timeit to * Be careful that %%timeit restricts the scope of the variables defined in the cell to the inside of the cell.* That is, variables newly defined in the cell cannot be used in later cells, without redefining.
###Code
%%timeit -n 1 -r 1
canvas = np.ones((N,N,N))
fig, ax = plt.subplots(figsize=(4,4), subplot_kw={"projection":"3d"})
ax.voxels(canvas);
def plot_a_shape(shape):
canvas = np.zeros((N,N,N))
canvas[shape] = 1
fig, ax = plt.subplots(figsize=(4,4), subplot_kw={"projection":"3d"})
ax.voxels(canvas)
x = y = z = np.arange(N)
xx, yy, zz = np.meshgrid(x, y, z)
%%timeit -n 1 -r 1
plot_a_shape((xx - 16)**2 + (yy - 16)**2 + (zz - 16)**2 < 4**2)
###Output
_____no_output_____
###Markdown
4.2 How to draw faster* To draw faster, use draw_voxles_as_dots() function from "learnsaxs" package instead.* As its name suggests, draw_voxles_as_dots() draws data as dots with ax.scatter() method, which results in faster rendering.* Of course the resulting image is less accurate, but it suffices for our purposes. We are taking advantage of the drawing speed.
###Code
def plot_a_shape_as_dots(shape):
from learnsaxs import draw_voxles_as_dots
canvas = np.zeros((N,N,N))
canvas[shape] = 1
fig, ax = plt.subplots(figsize=(4,4), subplot_kw={"projection":"3d"})
draw_voxles_as_dots(ax, canvas) # faster than ax.voxels(...)
ax.set_xlim(0, N)
ax.set_ylim(0, N)
ax.set_zlim(0, N)
%%timeit -n 1 -r 1
plot_a_shape_as_dots((xx - 16)**2 + (yy - 16)**2 + (zz - 16)**2 < 4**2)
###Output
_____no_output_____ |
DATA_EDA.ipynb | ###Markdown
import osfor dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
###Code
df_trans.head()
df_cus.head()
print(df_cus['customer_id'].nunique())
print(df_trans['customer_id'].nunique())
df_trans.t_dat.max()
###Output
_____no_output_____
###Markdown
Task 1 - Use age, R, F, M values for basic customer clustering/ Can include FN, Active,club_member_status, fashion_news_frequency EDA of FN, Active,club_member_status, fashion_news_frequency - Do we need to take them into account Produce a list of articles bought by each customer Create the User product matrix Use ALS Calculate the RFM values for each customer from transcation data
###Code
df_trans['Frequency']=df_trans.groupby('customer_id')['article_id'].transform('count')
df_trans['Monetary']=df_trans.groupby('customer_id')['price'].transform('sum')
order_dt = df_trans.groupby('customer_id')['t_dat'].max().rename('max_order_dt').reset_index()
df_trans_2=pd.merge(df_trans,order_dt,how='left',on='customer_id')
df_RFM = df_trans_2[['customer_id','Frequency','Monetary','max_order_dt']].drop_duplicates().reset_index()
df_RFM['Recency']= (pd.to_datetime(df_RFM['max_order_dt'].max())-pd.to_datetime(df_RFM['max_order_dt'])).dt.days
df_RFM.dtypes
print(df_RFM.shape[0])
print(df_trans['customer_id'].nunique())
###Output
_____no_output_____
###Markdown
EDA for FN, Active, club member and fashion_news FN is if a customer get Fashion News newsletter, Active is if the customer is active for communication, sales channel id, 2 is online and 1 store.
###Code
df_cus.head()
print((df_cus.isna().sum()/df_cus.shape[0])*100)
print(df_cus.shape[0])
df_cus['FN'].fillna(0,inplace=True)
df_cus['Active'].fillna(0,inplace=True)
df_cus['club_member_status'].fillna('NONE',inplace=True)
df_cus['fashion_news_frequency'].fillna('NONE',inplace=True)
df_cus.loc[df_cus['fashion_news_frequency']=='None','fashion_news_frequency']='NONE'
imp = SimpleImputer()
df_cus['age']= imp.fit_transform(df_cus['age'].values.reshape(-1,1))
for col in df_cus.columns[1:-1] :
print('-'*50)
print(col.upper())
print(df_cus[col].value_counts())
###Output
_____no_output_____
###Markdown
35% of the customers are open to fashion newsletter 34% of the customers are active for communication Create dummies for these variables and use in clustering
###Code
df_cus['fashion_news_frequency'].value_counts()
df_cus['Active'].value_counts()
df_cus['club_member_status'].value_counts()/df_cus.shape[0]
df_cus['FN'].value_counts()
###Output
_____no_output_____
###Markdown
Subset the data for FN =1 Check the distribution of fashion news_frequency, active 92% of the customers have club_member_status as Active.We should not use this for clustering as most of the customers exhibit same behaviour
###Code
QC= df_cus[df_cus['FN']==1]
QC['Active'].fillna(0,inplace=True)
QC.head()
QC.shape
cols = ['Active','fashion_news_frequency']
for col in cols:
print(col.upper())
print(QC[col].value_counts())
print('missing values are',round((QC[col].isna().sum()/QC.shape[0])*100,2),'%')
###Output
_____no_output_____
###Markdown
FN is if a customer get Fashion News newsletter, Active is if the customer is active for communication, sales channel id, 2 is online and 1 store. Answered by competition host
###Code
print(df_trans['sales_channel_id'].value_counts())
print('no of unique customers are', df_trans['customer_id'].nunique())
print('no of unique products:',df_trans['article_id'].nunique())
#print('We have',(pd.to_datetime(df_trans['t_dat']).max()-pd.to_datetime(df_trans['t_dat']).min()).days,'of data ranging from',df_trans['t_dat'].min(),'to',df_trans['t_dat'].max())
#data_range = pd.to_datetime(df_trans['t_dat']).max()-pd.to_datetime(df_trans['t_dat']).min()).days
data_min = pd.to_datetime(df_trans['t_dat']).min()
data_max = pd.to_datetime(df_trans['t_dat']).max()
print(f"The data has {(data_max-data_min).days} days and ranges from {data_min} to {data_max}")
print("The data has %d days and ranges from %s to %s"%((data_max-data_min).days,data_min,data_max))
###Output
_____no_output_____
###Markdown
Monthly Sales figure for customers (Time Series)
###Code
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
plt.figure(figsize = (18,16))
df_trans.groupby('t_dat')['article_id'].count().plot(kind='line',xlabel='Date',ylabel="count of purchases", title="Count of Products purchased with time")
df= pd.DataFrame(df_trans.groupby('sales_channel_id')['article_id'].count().reset_index())
plt.bar(df['sales_channel_id'],df['article_id'])
x_labels =['sales_chnl_1','sales_chnl_2']
plt.xticks(df['sales_channel_id'],x_labels)
#plt.ticklabel_format.ScalarFormatter(style='plain')
plt.xlabel('sales_channel_id')
plt.ylabel('count of articles purchased')
plt.margins(0.2)
plt.show()
df_trans.columns
df_trans.groupby(['customer_id',pd.to_datetime(df_trans['t_dat']).dt.year])['article_id'].count().sort_values(ascending=False).groupby('t_dat').sum().reset_index()
###Output
_____no_output_____
###Markdown
Customers data
###Code
import pandas as pd
df_cus = pd.read_csv("../input/h-and-m-personalized-fashion-recommendations/customers.csv")
###Output
_____no_output_____
###Markdown
"FN" is Fashion News and "Active" = 1 when the customer is active for communication
###Code
df_cus.head(5)
df_cus['club_member_status'].value_counts()
df_cus['age'].hist()
import pandas as pd
df_sku= pd.read_csv('../input/h-and-m-personalized-fashion-recommendations/articles.csv')
df_cus.head()
print(df_sku['article_id'].nunique())
print(df_sku['product_code'].nunique())
df_sku.nunique()
###Output
_____no_output_____ |
Normalization_testing.ipynb | ###Markdown
Preprocessing Normalization Mode Experiments (Keras Pre-train Model)- caffe: RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling.- tf: Scale pixels between -1 and 1, sample-wise.- torch: Scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset.[keras/applications](https://github.com/tensorflow/tensorflow/tree/v2.4.1/tensorflow/python/keras/applications)[keras/applications/imagenet_utils](https://github.com/tensorflow/tensorflow/blob/85c8b2a817f95a3e979ecd1ed95bff1dc1335cff/tensorflow/python/keras/applications/imagenet_utils.pyL104) Conclusiontf & torch (both):**InceptionResNetV2**, **MobileNetV1**, **MobileNetV2**, **NASNet**, **DenseNet**---tf: **InceptionV3**, **ResNetV2**, **Xception**---original(0~255) & caffe (both):**EfficientNet**, **MobileNetV3**, **ResNetV1**, **VGG** Upload Image
###Code
from google.colab import files
uploaded = files.upload()
###Output
_____no_output_____
###Markdown
Inference
###Code
from skimage.io import imread
from skimage.transform import resize
from skimage import img_as_ubyte
import tensorflow as tf
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.keras.applications.imagenet_utils import decode_predictions
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
# Input image read
image = imread('./panda.jpg')
# Pre-train model
model = tf.keras.applications.InceptionV3()
# Normalization
image_size = model.input_shape[1]
x = img_as_ubyte(resize(image, (image_size,image_size),anti_aliasing=False))
caffe = imagenet_utils.preprocess_input(x, data_format=None, mode='caffe')
torch = imagenet_utils.preprocess_input(x, data_format=None, mode='torch')
tf = imagenet_utils.preprocess_input(x, data_format=None, mode='tf')
# Visualization
plt.subplot(1,4,1)
plt.title('0~255')
plt.imshow(image)
plt.subplot(1,4,2)
plt.title("caffe")
plt.imshow(caffe)
plt.subplot(1,4,3)
plt.title("torch")
plt.imshow(torch)
plt.subplot(1,4,4)
plt.title("tf")
plt.imshow(tf)
# Inference
for i in [x,caffe,torch,tf]:
i = np.expand_dims(i, 0)
y = model.predict(i)
print(decode_predictions(y))
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
|
Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/Course_1_Part_2_Lesson_2_Notebook.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
The Hello World of Deep Learning with Neural Networks Like every first app you should start with something super simple that shows the overall scaffolding for how your code works. In the case of creating neural networks, the sample I like to use is one where it learns the relationship between two numbers. So, for example, if you were writing code for a function like this, you already know the 'rules' — ```float hw_function(float x){ float y = (2 * x) - 1; return y;}```So how would you train a neural network to do the equivalent task? Using data! By feeding it with a set of Xs, and a set of Ys, it should be able to figure out the relationship between them. This is obviously a very different paradigm than what you might be used to, so let's step through it piece by piece. ImportsLet's start with our imports. Here we are importing TensorFlow and calling it tf for ease of use.We then import a library called numpy, which helps us to represent our data as lists easily and quickly.The framework for defining a neural network as a set of Sequential layers is called keras, so we import that too.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Define and Compile the Neural NetworkNext we will create the simplest possible neural network. It has 1 layer, and that layer has 1 neuron, and the input shape to it is just 1 value.
###Code
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
###Output
_____no_output_____
###Markdown
Now we compile our Neural Network. When we do so, we have to specify 2 functions, a loss and an optimizer.If you've seen lots of math for machine learning, here's where it's usually used, but in this case it's nicely encapsulated in functions for you. But what happens here — let's explain...We know that in our function, the relationship between the numbers is y=2x-1. When the computer is trying to 'learn' that, it makes a guess...maybe y=10x+10. The LOSS function measures the guessed answers against the known correct answers and measures how well or how badly it did.It then uses the OPTIMIZER function to make another guess. Based on how the loss function went, it will try to minimize the loss. At that point maybe it will come up with somehting like y=5x+5, which, while still pretty bad, is closer to the correct result (i.e. the loss is lower)It will repeat this for the number of EPOCHS which you will see shortly. But first, here's how we tell it to use 'MEAN SQUARED ERROR' for the loss and 'STOCHASTIC GRADIENT DESCENT' for the optimizer. You don't need to understand the math for these yet, but you can see that they work! :)Over time you will learn the different and appropriate loss and optimizer functions for different scenarios.
###Code
model.compile(optimizer='sgd', loss='mean_squared_error')
###Output
_____no_output_____
###Markdown
Providing the DataNext up we'll feed in some data. In this case we are taking 6 xs and 6ys. You can see that the relationship between these is that y=2x-1, so where x = -1, y=-3 etc. etc. A python library called 'Numpy' provides lots of array type data structures that are a defacto standard way of doing it. We declare that we want to use these by specifying the values as an np.array[]
###Code
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
###Output
_____no_output_____
###Markdown
Training the Neural Network The process of training the neural network, where it 'learns' the relationship between the Xs and Ys is in the **model.fit** call. This is where it will go through the loop we spoke about above, making a guess, measuring how good or bad it is (aka the loss), using the opimizer to make another guess etc. It will do it for the number of epochs you specify. When you run this code, you'll see the loss on the right hand side.
###Code
model.fit(xs, ys, epochs=500)
###Output
_____no_output_____
###Markdown
Ok, now you have a model that has been trained to learn the relationship between X and Y. You can use the **model.predict** method to have it figure out the Y for a previously unknown X. So, for example, if X = 10, what do you think Y will be? Take a guess before you run this code:
###Code
print(model.predict([10.0]))
###Output
[[18.975279]]
###Markdown
You might have thought 19, right? But it ended up being a little under. Why do you think that is? Remember that neural networks deal with probabilities, so given the data that we fed the NN with, it calculated that there is a very high probability that the relationship between X and Y is Y=2X-1, but with only 6 data points we can't know for sure. As a result, the result for 10 is very close to 19, but not necessarily 19. As you work with neural networks, you'll see this pattern recurring. You will almost always deal with probabilities, not certainties, and will do a little bit of coding to figure out what the result is based on the probabilities, particularly when it comes to classification.
###Code
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
The Hello World of Deep Learning with Neural Networks Like every first app you should start with something super simple that shows the overall scaffolding for how your code works. In the case of creating neural networks, the sample I like to use is one where it learns the relationship between two numbers. So, for example, if you were writing code for a function like this, you already know the 'rules' — ```float hw_function(float x){ float y = (2 * x) - 1; return y;}```So how would you train a neural network to do the equivalent task? Using data! By feeding it with a set of Xs, and a set of Ys, it should be able to figure out the relationship between them. This is obviously a very different paradigm than what you might be used to, so let's step through it piece by piece. ImportsLet's start with our imports. Here we are importing TensorFlow and calling it tf for ease of use.We then import a library called numpy, which helps us to represent our data as lists easily and quickly.The framework for defining a neural network as a set of Sequential layers is called keras, so we import that too.
###Code
import tensorflow as tf
import numpy as np
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Define and Compile the Neural NetworkNext we will create the simplest possible neural network. It has 1 layer, and that layer has 1 neuron, and the input shape to it is just 1 value.
###Code
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
###Output
_____no_output_____
###Markdown
Now we compile our Neural Network. When we do so, we have to specify 2 functions, a loss and an optimizer.If you've seen lots of math for machine learning, here's where it's usually used, but in this case it's nicely encapsulated in functions for you. But what happens here — let's explain...We know that in our function, the relationship between the numbers is y=2x-1. When the computer is trying to 'learn' that, it makes a guess...maybe y=10x+10. The LOSS function measures the guessed answers against the known correct answers and measures how well or how badly it did.It then uses the OPTIMIZER function to make another guess. Based on how the loss function went, it will try to minimize the loss. At that point maybe it will come up with somehting like y=5x+5, which, while still pretty bad, is closer to the correct result (i.e. the loss is lower)It will repeat this for the number of EPOCHS which you will see shortly. But first, here's how we tell it to use 'MEAN SQUARED ERROR' for the loss and 'STOCHASTIC GRADIENT DESCENT' for the optimizer. You don't need to understand the math for these yet, but you can see that they work! :)Over time you will learn the different and appropriate loss and optimizer functions for different scenarios.
###Code
model.compile(optimizer='sgd', loss='mean_squared_error')
###Output
_____no_output_____
###Markdown
Providing the DataNext up we'll feed in some data. In this case we are taking 6 xs and 6ys. You can see that the relationship between these is that y=2x-1, so where x = -1, y=-3 etc. etc. A python library called 'Numpy' provides lots of array type data structures that are a defacto standard way of doing it. We declare that we want to use these by specifying the values as an np.array[]
###Code
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
###Output
_____no_output_____
###Markdown
Training the Neural Network The process of training the neural network, where it 'learns' the relationship between the Xs and Ys is in the **model.fit** call. This is where it will go through the loop we spoke about above, making a guess, measuring how good or bad it is (aka the loss), using the opimizer to make another guess etc. It will do it for the number of epochs you specify. When you run this code, you'll see the loss on the right hand side.
###Code
model.fit(xs, ys, epochs=500)
###Output
Epoch 1/500
1/1 [==============================] - 0s 411ms/step - loss: 29.4101
Epoch 2/500
1/1 [==============================] - 0s 4ms/step - loss: 23.4487
Epoch 3/500
1/1 [==============================] - 0s 4ms/step - loss: 18.7522
Epoch 4/500
1/1 [==============================] - 0s 3ms/step - loss: 15.0510
Epoch 5/500
1/1 [==============================] - 0s 3ms/step - loss: 12.1330
Epoch 6/500
1/1 [==============================] - 0s 3ms/step - loss: 9.8312
Epoch 7/500
1/1 [==============================] - 0s 4ms/step - loss: 8.0144
Epoch 8/500
1/1 [==============================] - 0s 4ms/step - loss: 6.5793
Epoch 9/500
1/1 [==============================] - 0s 3ms/step - loss: 5.4446
Epoch 10/500
1/1 [==============================] - 0s 3ms/step - loss: 4.5464
Epoch 11/500
1/1 [==============================] - 0s 3ms/step - loss: 3.8343
Epoch 12/500
1/1 [==============================] - 0s 4ms/step - loss: 3.2688
Epoch 13/500
1/1 [==============================] - 0s 3ms/step - loss: 2.8187
Epoch 14/500
1/1 [==============================] - 0s 4ms/step - loss: 2.4595
Epoch 15/500
1/1 [==============================] - 0s 4ms/step - loss: 2.1719
Epoch 16/500
1/1 [==============================] - 0s 4ms/step - loss: 1.9408
Epoch 17/500
1/1 [==============================] - 0s 4ms/step - loss: 1.7542
Epoch 18/500
1/1 [==============================] - 0s 3ms/step - loss: 1.6027
Epoch 19/500
1/1 [==============================] - 0s 3ms/step - loss: 1.4790
Epoch 20/500
1/1 [==============================] - 0s 3ms/step - loss: 1.3772
Epoch 21/500
1/1 [==============================] - 0s 4ms/step - loss: 1.2927
Epoch 22/500
1/1 [==============================] - 0s 4ms/step - loss: 1.2219
Epoch 23/500
1/1 [==============================] - 0s 3ms/step - loss: 1.1620
Epoch 24/500
1/1 [==============================] - 0s 4ms/step - loss: 1.1107
Epoch 25/500
1/1 [==============================] - 0s 4ms/step - loss: 1.0664
Epoch 26/500
1/1 [==============================] - 0s 4ms/step - loss: 1.0275
Epoch 27/500
1/1 [==============================] - 0s 4ms/step - loss: 0.9931
Epoch 28/500
1/1 [==============================] - 0s 4ms/step - loss: 0.9622
Epoch 29/500
1/1 [==============================] - 0s 4ms/step - loss: 0.9342
Epoch 30/500
1/1 [==============================] - 0s 4ms/step - loss: 0.9085
Epoch 31/500
1/1 [==============================] - 0s 4ms/step - loss: 0.8847
Epoch 32/500
1/1 [==============================] - 0s 4ms/step - loss: 0.8626
Epoch 33/500
1/1 [==============================] - 0s 5ms/step - loss: 0.8417
Epoch 34/500
1/1 [==============================] - 0s 12ms/step - loss: 0.8219
Epoch 35/500
1/1 [==============================] - 0s 5ms/step - loss: 0.8031
Epoch 36/500
1/1 [==============================] - 0s 5ms/step - loss: 0.7850
Epoch 37/500
1/1 [==============================] - 0s 5ms/step - loss: 0.7677
Epoch 38/500
1/1 [==============================] - 0s 4ms/step - loss: 0.7510
Epoch 39/500
1/1 [==============================] - 0s 5ms/step - loss: 0.7348
Epoch 40/500
1/1 [==============================] - 0s 5ms/step - loss: 0.7191
Epoch 41/500
1/1 [==============================] - 0s 5ms/step - loss: 0.7039
Epoch 42/500
1/1 [==============================] - 0s 5ms/step - loss: 0.6890
Epoch 43/500
1/1 [==============================] - 0s 4ms/step - loss: 0.6746
Epoch 44/500
1/1 [==============================] - 0s 4ms/step - loss: 0.6605
Epoch 45/500
1/1 [==============================] - 0s 4ms/step - loss: 0.6468
Epoch 46/500
1/1 [==============================] - 0s 4ms/step - loss: 0.6334
Epoch 47/500
1/1 [==============================] - 0s 4ms/step - loss: 0.6202
Epoch 48/500
1/1 [==============================] - 0s 5ms/step - loss: 0.6074
Epoch 49/500
1/1 [==============================] - 0s 4ms/step - loss: 0.5949
Epoch 50/500
1/1 [==============================] - 0s 5ms/step - loss: 0.5826
Epoch 51/500
1/1 [==============================] - 0s 5ms/step - loss: 0.5706
Epoch 52/500
1/1 [==============================] - 0s 7ms/step - loss: 0.5588
Epoch 53/500
1/1 [==============================] - 0s 4ms/step - loss: 0.5473
Epoch 54/500
1/1 [==============================] - 0s 5ms/step - loss: 0.5361
Epoch 55/500
1/1 [==============================] - 0s 4ms/step - loss: 0.5250
Epoch 56/500
1/1 [==============================] - 0s 5ms/step - loss: 0.5142
Epoch 57/500
1/1 [==============================] - 0s 4ms/step - loss: 0.5037
Epoch 58/500
1/1 [==============================] - 0s 5ms/step - loss: 0.4933
Epoch 59/500
1/1 [==============================] - 0s 4ms/step - loss: 0.4832
Epoch 60/500
1/1 [==============================] - 0s 5ms/step - loss: 0.4732
Epoch 61/500
1/1 [==============================] - 0s 4ms/step - loss: 0.4635
Epoch 62/500
1/1 [==============================] - 0s 4ms/step - loss: 0.4540
Epoch 63/500
1/1 [==============================] - 0s 5ms/step - loss: 0.4447
Epoch 64/500
1/1 [==============================] - 0s 5ms/step - loss: 0.4355
Epoch 65/500
1/1 [==============================] - 0s 4ms/step - loss: 0.4266
Epoch 66/500
1/1 [==============================] - 0s 6ms/step - loss: 0.4178
Epoch 67/500
1/1 [==============================] - 0s 6ms/step - loss: 0.4092
Epoch 68/500
1/1 [==============================] - 0s 9ms/step - loss: 0.4008
Epoch 69/500
1/1 [==============================] - 0s 3ms/step - loss: 0.3926
Epoch 70/500
1/1 [==============================] - 0s 4ms/step - loss: 0.3845
Epoch 71/500
1/1 [==============================] - 0s 5ms/step - loss: 0.3766
Epoch 72/500
1/1 [==============================] - 0s 4ms/step - loss: 0.3689
Epoch 73/500
1/1 [==============================] - 0s 6ms/step - loss: 0.3613
Epoch 74/500
1/1 [==============================] - 0s 3ms/step - loss: 0.3539
Epoch 75/500
1/1 [==============================] - 0s 3ms/step - loss: 0.3466
Epoch 76/500
1/1 [==============================] - 0s 5ms/step - loss: 0.3395
Epoch 77/500
1/1 [==============================] - 0s 7ms/step - loss: 0.3325
Epoch 78/500
1/1 [==============================] - 0s 3ms/step - loss: 0.3257
Epoch 79/500
1/1 [==============================] - 0s 6ms/step - loss: 0.3190
Epoch 80/500
1/1 [==============================] - 0s 7ms/step - loss: 0.3125
Epoch 81/500
1/1 [==============================] - 0s 3ms/step - loss: 0.3060
Epoch 82/500
1/1 [==============================] - 0s 5ms/step - loss: 0.2998
Epoch 83/500
1/1 [==============================] - 0s 7ms/step - loss: 0.2936
Epoch 84/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2876
Epoch 85/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2817
Epoch 86/500
1/1 [==============================] - 0s 7ms/step - loss: 0.2759
Epoch 87/500
1/1 [==============================] - 0s 5ms/step - loss: 0.2702
Epoch 88/500
1/1 [==============================] - 0s 4ms/step - loss: 0.2647
Epoch 89/500
1/1 [==============================] - 0s 5ms/step - loss: 0.2592
Epoch 90/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2539
Epoch 91/500
1/1 [==============================] - 0s 6ms/step - loss: 0.2487
Epoch 92/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2436
Epoch 93/500
1/1 [==============================] - 0s 4ms/step - loss: 0.2386
Epoch 94/500
1/1 [==============================] - 0s 6ms/step - loss: 0.2337
Epoch 95/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2289
Epoch 96/500
1/1 [==============================] - 0s 4ms/step - loss: 0.2242
Epoch 97/500
1/1 [==============================] - 0s 14ms/step - loss: 0.2196
Epoch 98/500
1/1 [==============================] - 0s 28ms/step - loss: 0.2151
Epoch 99/500
1/1 [==============================] - 0s 3ms/step - loss: 0.2106
Epoch 100/500
1/1 [==============================] - 0s 6ms/step - loss: 0.2063
Epoch 101/500
1/1 [==============================] - 0s 4ms/step - loss: 0.2021
Epoch 102/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1979
Epoch 103/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1939
Epoch 104/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1899
Epoch 105/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1860
Epoch 106/500
1/1 [==============================] - 0s 7ms/step - loss: 0.1822
Epoch 107/500
1/1 [==============================] - 0s 6ms/step - loss: 0.1784
Epoch 108/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1747
Epoch 109/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1712
Epoch 110/500
1/1 [==============================] - 0s 7ms/step - loss: 0.1676
Epoch 111/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1642
Epoch 112/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1608
Epoch 113/500
1/1 [==============================] - 0s 7ms/step - loss: 0.1575
Epoch 114/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1543
Epoch 115/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1511
Epoch 116/500
1/1 [==============================] - 0s 10ms/step - loss: 0.1480
Epoch 117/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1450
Epoch 118/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1420
Epoch 119/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1391
Epoch 120/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1362
Epoch 121/500
1/1 [==============================] - 0s 7ms/step - loss: 0.1334
Epoch 122/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1307
Epoch 123/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1280
Epoch 124/500
1/1 [==============================] - 0s 7ms/step - loss: 0.1254
Epoch 125/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1228
Epoch 126/500
1/1 [==============================] - 0s 6ms/step - loss: 0.1203
Epoch 127/500
1/1 [==============================] - 0s 5ms/step - loss: 0.1178
Epoch 128/500
1/1 [==============================] - 0s 16ms/step - loss: 0.1154
Epoch 129/500
1/1 [==============================] - 0s 5ms/step - loss: 0.1130
Epoch 130/500
1/1 [==============================] - 0s 3ms/step - loss: 0.1107
Epoch 131/500
1/1 [==============================] - 0s 5ms/step - loss: 0.1084
Epoch 132/500
1/1 [==============================] - 0s 5ms/step - loss: 0.1062
Epoch 133/500
1/1 [==============================] - 0s 4ms/step - loss: 0.1040
Epoch 134/500
1/1 [==============================] - 0s 5ms/step - loss: 0.1019
Epoch 135/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0998
Epoch 136/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0977
Epoch 137/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0957
Epoch 138/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0938
Epoch 139/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0918
Epoch 140/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0899
Epoch 141/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0881
Epoch 142/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0863
Epoch 143/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0845
Epoch 144/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0828
Epoch 145/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0811
Epoch 146/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0794
Epoch 147/500
1/1 [==============================] - 0s 9ms/step - loss: 0.0778
Epoch 148/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0762
Epoch 149/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0746
Epoch 150/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0731
Epoch 151/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0716
Epoch 152/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0701
Epoch 153/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0687
Epoch 154/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0673
Epoch 155/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0659
Epoch 156/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0645
Epoch 157/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0632
Epoch 158/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0619
Epoch 159/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0606
Epoch 160/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0594
Epoch 161/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0582
Epoch 162/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0570
Epoch 163/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0558
Epoch 164/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0547
Epoch 165/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0535
Epoch 166/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0524
Epoch 167/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0514
Epoch 168/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0503
Epoch 169/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0493
Epoch 170/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0483
Epoch 171/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0473
Epoch 172/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0463
Epoch 173/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0453
Epoch 174/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0444
Epoch 175/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0435
Epoch 176/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0426
Epoch 177/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0417
Epoch 178/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0409
Epoch 179/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0400
Epoch 180/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0392
Epoch 181/500
1/1 [==============================] - 0s 8ms/step - loss: 0.0384
Epoch 182/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0376
Epoch 183/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0368
Epoch 184/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0361
Epoch 185/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0353
Epoch 186/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0346
Epoch 187/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0339
Epoch 188/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0332
Epoch 189/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0325
Epoch 190/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0319
Epoch 191/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0312
Epoch 192/500
1/1 [==============================] - 0s 33ms/step - loss: 0.0306
Epoch 193/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0299
Epoch 194/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0293
Epoch 195/500
1/1 [==============================] - 0s 8ms/step - loss: 0.0287
Epoch 196/500
1/1 [==============================] - 0s 18ms/step - loss: 0.0281
Epoch 197/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0276
Epoch 198/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0270
Epoch 199/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0264
Epoch 200/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0259
Epoch 201/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0254
Epoch 202/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0248
Epoch 203/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0243
Epoch 204/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0238
Epoch 205/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0233
Epoch 206/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0229
Epoch 207/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0224
Epoch 208/500
1/1 [==============================] - 0s 8ms/step - loss: 0.0219
Epoch 209/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0215
Epoch 210/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0210
Epoch 211/500
1/1 [==============================] - 0s 9ms/step - loss: 0.0206
Epoch 212/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0202
Epoch 213/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0198
Epoch 214/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0194
Epoch 215/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0190
Epoch 216/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0186
Epoch 217/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0182
Epoch 218/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0178
Epoch 219/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0175
Epoch 220/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0171
Epoch 221/500
1/1 [==============================] - 0s 11ms/step - loss: 0.0167
Epoch 222/500
1/1 [==============================] - 0s 12ms/step - loss: 0.0164
Epoch 223/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0161
Epoch 224/500
1/1 [==============================] - 0s 10ms/step - loss: 0.0157
Epoch 225/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0154
Epoch 226/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0151
Epoch 227/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0148
Epoch 228/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0145
Epoch 229/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0142
Epoch 230/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0139
Epoch 231/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0136
Epoch 232/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0133
Epoch 233/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0131
Epoch 234/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0128
Epoch 235/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0125
Epoch 236/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0123
Epoch 237/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0120
Epoch 238/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0118
Epoch 239/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0115
Epoch 240/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0113
Epoch 241/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0111
Epoch 242/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0108
Epoch 243/500
1/1 [==============================] - 0s 9ms/step - loss: 0.0106
Epoch 244/500
1/1 [==============================] - 0s 9ms/step - loss: 0.0104
Epoch 245/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0102
Epoch 246/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0100
Epoch 247/500
1/1 [==============================] - 0s 9ms/step - loss: 0.0098
Epoch 248/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0096
Epoch 249/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0094
Epoch 250/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0092
Epoch 251/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0090
Epoch 252/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0088
Epoch 253/500
1/1 [==============================] - 0s 10ms/step - loss: 0.0086
Epoch 254/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0084
Epoch 255/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0083
Epoch 256/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0081
Epoch 257/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0079
Epoch 258/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0078
Epoch 259/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0076
Epoch 260/500
1/1 [==============================] - 0s 10ms/step - loss: 0.0075
Epoch 261/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0073
Epoch 262/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0072
Epoch 263/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0070
Epoch 264/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0069
Epoch 265/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0067
Epoch 266/500
1/1 [==============================] - 0s 14ms/step - loss: 0.0066
Epoch 267/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0064
Epoch 268/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0063
Epoch 269/500
1/1 [==============================] - 0s 10ms/step - loss: 0.0062
Epoch 270/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0061
Epoch 271/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0059
Epoch 272/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0058
Epoch 273/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0057
Epoch 274/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0056
Epoch 275/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0055
Epoch 276/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0053
Epoch 277/500
1/1 [==============================] - 0s 13ms/step - loss: 0.0052
Epoch 278/500
1/1 [==============================] - 0s 12ms/step - loss: 0.0051
Epoch 279/500
1/1 [==============================] - 0s 12ms/step - loss: 0.0050
Epoch 280/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0049
Epoch 281/500
1/1 [==============================] - 0s 10ms/step - loss: 0.0048
Epoch 282/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0047
Epoch 283/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0046
Epoch 284/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0045
Epoch 285/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0044
Epoch 286/500
1/1 [==============================] - 0s 24ms/step - loss: 0.0043
Epoch 287/500
1/1 [==============================] - 0s 12ms/step - loss: 0.0043
Epoch 288/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0042
Epoch 289/500
1/1 [==============================] - 0s 13ms/step - loss: 0.0041
Epoch 290/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0040
Epoch 291/500
1/1 [==============================] - 0s 21ms/step - loss: 0.0039
Epoch 292/500
1/1 [==============================] - 0s 13ms/step - loss: 0.0038
Epoch 293/500
1/1 [==============================] - 0s 17ms/step - loss: 0.0038
Epoch 294/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0037
Epoch 295/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0036
Epoch 296/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0035
Epoch 297/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0035
Epoch 298/500
1/1 [==============================] - 0s 11ms/step - loss: 0.0034
Epoch 299/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0033
Epoch 300/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0032
Epoch 301/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0032
Epoch 302/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0031
Epoch 303/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0031
Epoch 304/500
1/1 [==============================] - 0s 2ms/step - loss: 0.0030
Epoch 305/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0029
Epoch 306/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0029
Epoch 307/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0028
Epoch 308/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0028
Epoch 309/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0027
Epoch 310/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0026
Epoch 311/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0026
Epoch 312/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0025
Epoch 313/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0025
Epoch 314/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0024
Epoch 315/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0024
Epoch 316/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0023
Epoch 317/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0023
Epoch 318/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0022
Epoch 319/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0022
Epoch 320/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0021
Epoch 321/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0021
Epoch 322/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0021
Epoch 323/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0020
Epoch 324/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0020
Epoch 325/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0019
Epoch 326/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0019
Epoch 327/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0019
Epoch 328/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0018
Epoch 329/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0018
Epoch 330/500
1/1 [==============================] - 0s 11ms/step - loss: 0.0017
Epoch 331/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0017
Epoch 332/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0017
Epoch 333/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0016
Epoch 334/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0016
Epoch 335/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0016
Epoch 336/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0015
Epoch 337/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0015
Epoch 338/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0015
Epoch 339/500
1/1 [==============================] - 0s 8ms/step - loss: 0.0014
Epoch 340/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0014
Epoch 341/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0014
Epoch 342/500
1/1 [==============================] - 0s 3ms/step - loss: 0.0014
Epoch 343/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0013
Epoch 344/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0013
Epoch 345/500
1/1 [==============================] - 0s 8ms/step - loss: 0.0013
Epoch 346/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0013
Epoch 347/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0012
Epoch 348/500
1/1 [==============================] - 0s 6ms/step - loss: 0.0012
Epoch 349/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0012
Epoch 350/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0012
Epoch 351/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0011
Epoch 352/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0011
Epoch 353/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0011
Epoch 354/500
1/1 [==============================] - 0s 7ms/step - loss: 0.0011
Epoch 355/500
1/1 [==============================] - 0s 4ms/step - loss: 0.0010
Epoch 356/500
1/1 [==============================] - 0s 5ms/step - loss: 0.0010
Epoch 357/500
1/1 [==============================] - 0s 6ms/step - loss: 9.9549e-04
Epoch 358/500
1/1 [==============================] - 0s 3ms/step - loss: 9.7505e-04
Epoch 359/500
1/1 [==============================] - 0s 3ms/step - loss: 9.5502e-04
Epoch 360/500
1/1 [==============================] - 0s 6ms/step - loss: 9.3540e-04
Epoch 361/500
1/1 [==============================] - 0s 3ms/step - loss: 9.1618e-04
Epoch 362/500
1/1 [==============================] - 0s 9ms/step - loss: 8.9737e-04
Epoch 363/500
1/1 [==============================] - 0s 7ms/step - loss: 8.7894e-04
Epoch 364/500
1/1 [==============================] - 0s 4ms/step - loss: 8.6088e-04
Epoch 365/500
1/1 [==============================] - 0s 5ms/step - loss: 8.4320e-04
Epoch 366/500
1/1 [==============================] - 0s 6ms/step - loss: 8.2588e-04
Epoch 367/500
1/1 [==============================] - 0s 5ms/step - loss: 8.0891e-04
Epoch 368/500
1/1 [==============================] - 0s 4ms/step - loss: 7.9230e-04
Epoch 369/500
1/1 [==============================] - 0s 6ms/step - loss: 7.7603e-04
Epoch 370/500
1/1 [==============================] - 0s 4ms/step - loss: 7.6009e-04
Epoch 371/500
1/1 [==============================] - 0s 6ms/step - loss: 7.4447e-04
Epoch 372/500
1/1 [==============================] - 0s 5ms/step - loss: 7.2918e-04
Epoch 373/500
1/1 [==============================] - 0s 5ms/step - loss: 7.1421e-04
Epoch 374/500
1/1 [==============================] - 0s 6ms/step - loss: 6.9954e-04
Epoch 375/500
1/1 [==============================] - 0s 5ms/step - loss: 6.8517e-04
Epoch 376/500
1/1 [==============================] - 0s 5ms/step - loss: 6.7109e-04
Epoch 377/500
1/1 [==============================] - 0s 5ms/step - loss: 6.5731e-04
Epoch 378/500
1/1 [==============================] - 0s 5ms/step - loss: 6.4380e-04
Epoch 379/500
1/1 [==============================] - 0s 5ms/step - loss: 6.3058e-04
Epoch 380/500
1/1 [==============================] - 0s 16ms/step - loss: 6.1763e-04
Epoch 381/500
1/1 [==============================] - 0s 6ms/step - loss: 6.0494e-04
Epoch 382/500
1/1 [==============================] - 0s 5ms/step - loss: 5.9252e-04
Epoch 383/500
1/1 [==============================] - 0s 6ms/step - loss: 5.8034e-04
Epoch 384/500
1/1 [==============================] - 0s 6ms/step - loss: 5.6842e-04
Epoch 385/500
1/1 [==============================] - 0s 6ms/step - loss: 5.5675e-04
Epoch 386/500
1/1 [==============================] - 0s 3ms/step - loss: 5.4531e-04
Epoch 387/500
1/1 [==============================] - 0s 11ms/step - loss: 5.3411e-04
Epoch 388/500
1/1 [==============================] - 0s 6ms/step - loss: 5.2314e-04
Epoch 389/500
1/1 [==============================] - 0s 8ms/step - loss: 5.1239e-04
Epoch 390/500
1/1 [==============================] - 0s 4ms/step - loss: 5.0187e-04
Epoch 391/500
1/1 [==============================] - 0s 6ms/step - loss: 4.9156e-04
Epoch 392/500
1/1 [==============================] - 0s 5ms/step - loss: 4.8146e-04
Epoch 393/500
1/1 [==============================] - 0s 6ms/step - loss: 4.7157e-04
Epoch 394/500
1/1 [==============================] - 0s 5ms/step - loss: 4.6189e-04
Epoch 395/500
1/1 [==============================] - 0s 7ms/step - loss: 4.5240e-04
Epoch 396/500
1/1 [==============================] - 0s 4ms/step - loss: 4.4311e-04
Epoch 397/500
1/1 [==============================] - 0s 7ms/step - loss: 4.3400e-04
Epoch 398/500
1/1 [==============================] - 0s 16ms/step - loss: 4.2509e-04
Epoch 399/500
1/1 [==============================] - 0s 27ms/step - loss: 4.1636e-04
Epoch 400/500
1/1 [==============================] - 0s 7ms/step - loss: 4.0781e-04
Epoch 401/500
1/1 [==============================] - 0s 7ms/step - loss: 3.9943e-04
Epoch 402/500
1/1 [==============================] - 0s 8ms/step - loss: 3.9122e-04
Epoch 403/500
1/1 [==============================] - 0s 6ms/step - loss: 3.8319e-04
Epoch 404/500
1/1 [==============================] - 0s 5ms/step - loss: 3.7532e-04
Epoch 405/500
1/1 [==============================] - 0s 9ms/step - loss: 3.6761e-04
Epoch 406/500
1/1 [==============================] - 0s 9ms/step - loss: 3.6006e-04
Epoch 407/500
1/1 [==============================] - 0s 9ms/step - loss: 3.5266e-04
Epoch 408/500
1/1 [==============================] - 0s 3ms/step - loss: 3.4542e-04
Epoch 409/500
1/1 [==============================] - 0s 3ms/step - loss: 3.3832e-04
Epoch 410/500
1/1 [==============================] - 0s 7ms/step - loss: 3.3137e-04
Epoch 411/500
1/1 [==============================] - 0s 3ms/step - loss: 3.2457e-04
Epoch 412/500
1/1 [==============================] - 0s 5ms/step - loss: 3.1790e-04
Epoch 413/500
1/1 [==============================] - 0s 11ms/step - loss: 3.1137e-04
Epoch 414/500
1/1 [==============================] - 0s 4ms/step - loss: 3.0497e-04
Epoch 415/500
1/1 [==============================] - 0s 9ms/step - loss: 2.9871e-04
Epoch 416/500
1/1 [==============================] - 0s 8ms/step - loss: 2.9257e-04
Epoch 417/500
1/1 [==============================] - 0s 4ms/step - loss: 2.8657e-04
Epoch 418/500
1/1 [==============================] - 0s 4ms/step - loss: 2.8068e-04
Epoch 419/500
1/1 [==============================] - 0s 10ms/step - loss: 2.7491e-04
Epoch 420/500
1/1 [==============================] - 0s 7ms/step - loss: 2.6927e-04
Epoch 421/500
1/1 [==============================] - 0s 7ms/step - loss: 2.6374e-04
Epoch 422/500
1/1 [==============================] - 0s 7ms/step - loss: 2.5832e-04
Epoch 423/500
1/1 [==============================] - 0s 5ms/step - loss: 2.5301e-04
Epoch 424/500
1/1 [==============================] - 0s 6ms/step - loss: 2.4782e-04
Epoch 425/500
1/1 [==============================] - 0s 7ms/step - loss: 2.4273e-04
Epoch 426/500
1/1 [==============================] - 0s 5ms/step - loss: 2.3774e-04
Epoch 427/500
1/1 [==============================] - 0s 7ms/step - loss: 2.3285e-04
Epoch 428/500
1/1 [==============================] - 0s 7ms/step - loss: 2.2807e-04
Epoch 429/500
1/1 [==============================] - 0s 5ms/step - loss: 2.2339e-04
Epoch 430/500
1/1 [==============================] - 0s 7ms/step - loss: 2.1880e-04
Epoch 431/500
1/1 [==============================] - 0s 4ms/step - loss: 2.1430e-04
Epoch 432/500
1/1 [==============================] - 0s 4ms/step - loss: 2.0990e-04
Epoch 433/500
1/1 [==============================] - 0s 5ms/step - loss: 2.0559e-04
Epoch 434/500
1/1 [==============================] - 0s 5ms/step - loss: 2.0137e-04
Epoch 435/500
1/1 [==============================] - 0s 4ms/step - loss: 1.9723e-04
Epoch 436/500
1/1 [==============================] - 0s 4ms/step - loss: 1.9318e-04
Epoch 437/500
1/1 [==============================] - 0s 6ms/step - loss: 1.8921e-04
Epoch 438/500
1/1 [==============================] - 0s 4ms/step - loss: 1.8533e-04
Epoch 439/500
1/1 [==============================] - 0s 5ms/step - loss: 1.8152e-04
Epoch 440/500
1/1 [==============================] - 0s 4ms/step - loss: 1.7779e-04
Epoch 441/500
1/1 [==============================] - 0s 4ms/step - loss: 1.7414e-04
Epoch 442/500
1/1 [==============================] - 0s 5ms/step - loss: 1.7056e-04
Epoch 443/500
1/1 [==============================] - 0s 4ms/step - loss: 1.6706e-04
Epoch 444/500
1/1 [==============================] - 0s 6ms/step - loss: 1.6363e-04
Epoch 445/500
1/1 [==============================] - 0s 3ms/step - loss: 1.6027e-04
Epoch 446/500
1/1 [==============================] - 0s 3ms/step - loss: 1.5697e-04
Epoch 447/500
1/1 [==============================] - 0s 3ms/step - loss: 1.5375e-04
Epoch 448/500
1/1 [==============================] - 0s 6ms/step - loss: 1.5059e-04
Epoch 449/500
1/1 [==============================] - 0s 3ms/step - loss: 1.4750e-04
Epoch 450/500
1/1 [==============================] - 0s 5ms/step - loss: 1.4447e-04
Epoch 451/500
1/1 [==============================] - 0s 4ms/step - loss: 1.4150e-04
Epoch 452/500
1/1 [==============================] - 0s 11ms/step - loss: 1.3859e-04
Epoch 453/500
1/1 [==============================] - 0s 3ms/step - loss: 1.3575e-04
Epoch 454/500
1/1 [==============================] - 0s 7ms/step - loss: 1.3296e-04
Epoch 455/500
1/1 [==============================] - 0s 11ms/step - loss: 1.3023e-04
Epoch 456/500
1/1 [==============================] - 0s 3ms/step - loss: 1.2755e-04
Epoch 457/500
1/1 [==============================] - 0s 5ms/step - loss: 1.2493e-04
Epoch 458/500
1/1 [==============================] - 0s 3ms/step - loss: 1.2237e-04
Epoch 459/500
1/1 [==============================] - 0s 6ms/step - loss: 1.1985e-04
Epoch 460/500
1/1 [==============================] - 0s 7ms/step - loss: 1.1739e-04
Epoch 461/500
1/1 [==============================] - 0s 3ms/step - loss: 1.1498e-04
Epoch 462/500
1/1 [==============================] - 0s 5ms/step - loss: 1.1262e-04
Epoch 463/500
1/1 [==============================] - 0s 4ms/step - loss: 1.1030e-04
Epoch 464/500
1/1 [==============================] - 0s 4ms/step - loss: 1.0804e-04
Epoch 465/500
1/1 [==============================] - 0s 7ms/step - loss: 1.0582e-04
Epoch 466/500
1/1 [==============================] - 0s 3ms/step - loss: 1.0364e-04
Epoch 467/500
1/1 [==============================] - 0s 4ms/step - loss: 1.0152e-04
Epoch 468/500
1/1 [==============================] - 0s 6ms/step - loss: 9.9431e-05
Epoch 469/500
1/1 [==============================] - 0s 3ms/step - loss: 9.7388e-05
Epoch 470/500
1/1 [==============================] - 0s 4ms/step - loss: 9.5387e-05
Epoch 471/500
1/1 [==============================] - 0s 6ms/step - loss: 9.3429e-05
Epoch 472/500
1/1 [==============================] - 0s 4ms/step - loss: 9.1508e-05
Epoch 473/500
1/1 [==============================] - 0s 5ms/step - loss: 8.9628e-05
Epoch 474/500
1/1 [==============================] - 0s 4ms/step - loss: 8.7788e-05
Epoch 475/500
1/1 [==============================] - 0s 6ms/step - loss: 8.5985e-05
Epoch 476/500
1/1 [==============================] - 0s 3ms/step - loss: 8.4218e-05
Epoch 477/500
1/1 [==============================] - 0s 6ms/step - loss: 8.2488e-05
Epoch 478/500
1/1 [==============================] - 0s 4ms/step - loss: 8.0794e-05
Epoch 479/500
1/1 [==============================] - 0s 8ms/step - loss: 7.9134e-05
Epoch 480/500
1/1 [==============================] - 0s 3ms/step - loss: 7.7508e-05
Epoch 481/500
1/1 [==============================] - 0s 5ms/step - loss: 7.5916e-05
Epoch 482/500
1/1 [==============================] - 0s 5ms/step - loss: 7.4358e-05
Epoch 483/500
1/1 [==============================] - 0s 5ms/step - loss: 7.2831e-05
Epoch 484/500
1/1 [==============================] - 0s 3ms/step - loss: 7.1335e-05
Epoch 485/500
1/1 [==============================] - 0s 6ms/step - loss: 6.9870e-05
Epoch 486/500
1/1 [==============================] - 0s 5ms/step - loss: 6.8434e-05
Epoch 487/500
1/1 [==============================] - 0s 5ms/step - loss: 6.7028e-05
Epoch 488/500
1/1 [==============================] - 0s 4ms/step - loss: 6.5652e-05
Epoch 489/500
1/1 [==============================] - 0s 5ms/step - loss: 6.4303e-05
Epoch 490/500
1/1 [==============================] - 0s 6ms/step - loss: 6.2983e-05
Epoch 491/500
1/1 [==============================] - 0s 3ms/step - loss: 6.1689e-05
Epoch 492/500
1/1 [==============================] - 0s 11ms/step - loss: 6.0421e-05
Epoch 493/500
1/1 [==============================] - 0s 15ms/step - loss: 5.9180e-05
Epoch 494/500
1/1 [==============================] - 0s 12ms/step - loss: 5.7964e-05
Epoch 495/500
1/1 [==============================] - 0s 11ms/step - loss: 5.6773e-05
Epoch 496/500
1/1 [==============================] - 0s 3ms/step - loss: 5.5607e-05
Epoch 497/500
1/1 [==============================] - 0s 4ms/step - loss: 5.4464e-05
Epoch 498/500
1/1 [==============================] - 0s 9ms/step - loss: 5.3346e-05
Epoch 499/500
1/1 [==============================] - 0s 12ms/step - loss: 5.2250e-05
Epoch 500/500
1/1 [==============================] - 0s 21ms/step - loss: 5.1176e-05
###Markdown
Ok, now you have a model that has been trained to learn the relationship between X and Y. You can use the **model.predict** method to have it figure out the Y for a previously unknown X. So, for example, if X = 10, what do you think Y will be? Take a guess before you run this code:
###Code
print(model.predict([10.0]))
###Output
[[18.97913]]
|
material_acompanhamento/lecture03.ipynb | ###Markdown
Aula 3: Python - um pouco mais - avançado **O que veremos hoje?**- como declarar funções e importar pacotes em python- pacote para manipulação numérica de informações- pacote para visualização de dados (1D) **material de apoio**- numpy guia de usuário: https://numpy.org/doc/stable/numpy-user.pdf- numpy: https://ncar-hackathons.github.io/scientific-computing/numpy/intro.html- numpy + scipy: https://github.com/gustavo-marques/hands-on-examples/blob/master/scientific-computing/numpy_scipy.ipynb- matplotlib documentação: https://matplotlib.org/3.3.2/contents.html- Python - um pouco mais - avançadoQuando definimos um algoritmo para solucionar um problema, é comum na programação estabelecermos pequenos problemas que possam ser resolvidos. Tal qual já falamos, seria uma espécie de passo a passo para se chegar na solução de um problema.Em python podemos programar estes pequenos problemas em pequeno (não tanto às vezes) códigos, aplicando os conceitos de funções e métodos. Funções seguem o estilo de programação procedural, ou seja, diversas funções sendo requisitadas quando necessário. Enquanto que métodos estão associados à programação orientada a objetos, uma ténica de programação abstrata. Neste curso trataremos somente sobre funções, por questões de tempo. Funções (functions)**Definição**É nada mais do que um bloco de código independente, que realiza uma ou mais ações específicas, retornando ou não um valor/variável quando requisitada. As funções possuem um nome, utilizado para chamá-las, e podemos transmitir informações para elas na forma de ```argumentos```.**Sintaxe**Vamos destrinchar brevemente uma função.Utilizamos o termo ```def``` para indicar ao python que vamos começar a definir uma nova função e, logo em seguida, dizemos o nome desta função. Dentro dos parênteses colocamos os argumentos, que podem ter qualquer nome. Para entender argumentos, vamos pensar que são variáveis que passarem para a função trabalhar. Isto é necessário uma vez que, por se tratarem de blocos de códigos independentes, as funções não possuem acesso às variáveis gerais de um programa. Por fim, ao final da definição do nome da função e seus argumentos, inserimos ```:``` para indicarmos um novo bloco de códigos.Logo abaixo, entre aspas triplas, colocamos uma ```docstring``` que nada mais é do que uma documentação da função, indicando sua utilidade, explicando os argumentos que ela precisa receber para funcionar adequadamente e outras informações úteis. Isto é muito importante, pois nos ajudará no futuro quando tivermos diversas funções e não lembrarmos para que todas servem. O docstring vai nos salvar um tempo precioso.Por fim, funções podem ou não retornar valores quando requisitadas. Pode ser o resultado de um cálculo, uma mensagem dizendo que o código rodou bem ou não ou simplesmente nada (por exemplo, uma função para plotar e salvar uma figura).Convencionalmente, definimos as funções logo após importar pacotes, ou seja, no início do código. Mas veremos exemplos mais avançados e complexos ao longo das aulas.
###Code
def K_para_C(T_em_K):
# sequencia de bloco de códigos identados
print('Estamos dentro de uma função!')
# conversão
T_em_C = T_em_K - 273.15
return T_em_C
temperatura = K_para_C(273.15)
print(temperatura)
###Output
Estamos dentro de uma função!
0.0
###Markdown
Podemos deixar uma função mais completa adicionando informações sobre como ela funciona. Para isso, usamos as ```docstrings```:
###Code
def K_para_C(T_em_K):
"""
Este conjunto de texto entre aspas triplas se chamada docstring. É usada para passar informações
importantes sobre a função, como: valores que ela recebe e retorna, o que ela faz, se é baseada em algum
artigo científico, por exemplo, e muito mais. Quando tivermos dúvida sobre alguma função, podemos buscar
o docstring dela utilizando o comando: K_para_C?
No nosso caso, essa é uma função para converter uma temperatura de Kelvin para Celsius
parameters
----------
T_em_K: float
Temperatura em Kelvin para ser convertida.
returns
-------
T_em_C: float
Temperatura em graus Celsius.
"""
# sequencia de bloco de códigos identados
print('Estamos dentro de uma função!')
# conversão
T_em_C = T_em_K - 273.15
return T_em_C
###Output
_____no_output_____
###Markdown
Pacotes (packages ou libraries)São conjuntos de funções específicas para resolver um problema, ou realizar análise de dados. Na verdade, utilizamos diversos pacotes ao programar em python, como veremos mais a frente. Podemos criar nosso próprio pacote, gerando instaladores e etc, mas o mais usual é instalarmos estes pacotes através de sistemas de gerenciamento de pacotes como o ```pip``` ou o próprio ```conda/anaconda```. --------------- Pacote Numérico: NumPy- background de qualquer pacote científico hoje em dia- utilizado para operações numéricas (matriciais ou escalares)- alta performance**Instalação**```bashpip install numpy```ou ```bashconda install numpy``` **Importar** pacote:Para utilizar o pacote NumPy, precisamos importa-lo em nosso código. Fazemos isso através do comando ```import``` e, no caso abaixo, adicionamos um apelido para o numpy. Isso nos facilitará para utilizar as funções deste pacote.```pythonimport numpy as np```Se pudéssemos traduzir o comando acima em português, o faríamos da seguinte forma: ```importe numpy como np```. **Conceitos**Lembrando alguns conceitos matemáticos:- Vetores (N,):$V = \begin{bmatrix}1 & 2 & 3\end{bmatrix}$- Matrizes (NxM, linhasxcolunas):$M = \begin{bmatrix}1 & 2 & 3\\ 4 & 5 & 6\end{bmatrix}$ Vamos ver alguns exemplos e entender na prática a funcionalidade do NumPy.
###Code
# primeiro importamos e só o precisamos fazer uma vez em cada código
import numpy as np
V = np.array([1, 2, 3])
M = np.array([[1, 2, 3], [4, 5, 6]])
M
###Output
_____no_output_____
###Markdown
Apesar de falarmos em vetores e matrizes, para o numpy é tudo a mesma coisa. O que diferencia é apenas a dimensão.Nota: ```ndarray``` significa n-dimensional array (matriz com n-dimensões)
###Code
# tipo
type(M), type(V)
###Output
_____no_output_____
###Markdown
Como descobrir a quantidade de dimensões?
###Code
V.ndim, M.ndim
###Output
_____no_output_____
###Markdown
Podemos verificar o formato da matriz que estamos trabalhando, utilizando o método .shape, ou o tamanho geral da matriz, com o método .size:
###Code
V.shape, V.size
M.shape, M.size
###Output
_____no_output_____
###Markdown
**Utilidade**Os conceitos citados acima parecem muito com as listas que vimos anteriormente. Porém, neste caso, utilizar numpy nos permitirá trabalhar com operações matriciais, além de ter uma performance, em termos de memória, bem melhor.Além disso, uma vez que criamos uma matriz com o numpy, não poderemos atribuir nenhum valor que não seja do mesmo tipo do que a matriz foi criada:
###Code
V[0] = 'teste'
# mas porque este funciona?
A = np.array(['teste', 0])
A[0] = 0
###Output
_____no_output_____
###Markdown
Podemos também indicar o tipo de matriz que queremos, ao cria-la, utilizando o argumento ```dtype``` e inserindo alguma das opções que já nos é conhecida (int, float, bool e complex):
###Code
c = np.array([1, 2, 3], dtype=complex)
c
###Output
_____no_output_____
###Markdown
**Principais funções disponíveis no Numpy**
###Code
# criar um vetor ordenado (crescente) de 1 a 1
x = np.arange(0, 100, 1)
x
# podemos criar o mesmo vetor, mas na ordem descrescente
x_inverso = np.arange(100, 0, -1)
x_inverso
# criar um vetor de 0 a 100, com de 10 intervalos
y = np.linspace(0, 100, 10)
y
# criar uma grade
x,y = np.mgrid[0:5, 0:5]
x
# utilizando números aleatórios
np.random.rand(3,3)
###Output
_____no_output_____
###Markdown
Outros métodos:cumsum, dot, det, sort, max, min, argmax, argmin, sqrt, e outros.Testem o que estes métodos fazem.Lembre que parar usar você deve:```pythonx = np.arange(0,10,1)soma = x.sum()``` ---------------Mesclando numpy e visualização de dados Pacote de visualização: Matplotlib- base para qualquer visualização de dados- é um saco importar, porém com o tempo fica menor pior- muito poderoso em termos de controle dos elementos do gráfico e da estrutura em si, isto é: - estruturas complexas de eixos de visualização podem ser manipuladas por este pacote**Importar**```pythonimport matplotlib.pyplot as plt``` Fonte: Prof Storopoli [https://github.com/storopoli/ciencia-de-dados]
###Code
import matplotlib.pyplot as plt
# quando usando notebook, use o comando abaixo para exibir as figuras no proprio arquivo:
%matplotlib inline
###Output
_____no_output_____
###Markdown
Podemos plotar um gráfico único em uma imagem, utilizando:
###Code
# iniciamos (instanciar) uma nova figura
fig = plt.figure()
# note que, como nada foi plotado efetivamente, nada aparece aqui embaixo.
###Output
_____no_output_____
###Markdown
Refazendo o comando e inserindo um plot 1D simples:
###Code
fig = plt.figure()
plt.plot([1,2,3])
###Output
_____no_output_____
###Markdown
Ainda temos a opção de plotar diversos gráficos em uma mesma figura. Para isso, usamos um outro método do matplotlib:
###Code
fig,axes = plt.subplots(nrows=1, ncols=2)
###Output
_____no_output_____
###Markdown
Diferente do primeiro método, este segundo já nos gera dois eixos para plotarmos as informações que quiseremos. Note que a variável ```axes``` neste caso e uma matriz 1D com shape ```(1,2)```, contendo estes eixos.Assim para de fato inserirmos alguma informação na figura com subplots, fazemos:
###Code
fig,axes = plt.subplots(1,2)
# exibindo o tipo de axes
print(type(axes))
# plotando no gráfico da esquerda (1o gráfico)
ax = axes[0]
ax.plot([1,2,3])
# plotando no gráfico da direita (2o gráfico)
axes[1].plot([3,2,1])
###Output
<class 'numpy.ndarray'>
###Markdown
Ainda podemos deixar a figura mais organizada. Note que ambos os subplots compartilham da mesmo eixo y (ordenadas), com mesmo limite de valores. Podemos dizer para elas compartilharem este mesmo eixo com o argumento ```sharey```. Isto também é válido para as abscissas (eixo x), porém com ```sharex```. Vejam:
###Code
fig,axes = plt.subplots(nrows=1, ncols=2, sharey=True)
# exibindo o tipo de axes
print(type(axes))
# plotando no gráfico da esquerda (1o gráfico)
ax = axes[0]
ax.plot([1,2,3])
# plotando no gráfico da direita (2o gráfico)
axes[1].plot([3,2,1])
###Output
<class 'numpy.ndarray'>
###Markdown
E ainda podemos especificar o tamanho da nossa figura com o argumento ```figsize```. Este argumento também funciona em ```plt.figure()```.
###Code
fig,axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,5))
# plotando no gráfico da esquerda (1o gráfico)
ax = axes[0]
ax.plot([1,2,3])
# plotando no gráfico da direita (2o gráfico)
axes[1].plot([3,2,1])
###Output
_____no_output_____
###Markdown
Customizando os gráficosPodemos ainda trocar as cores, os marcadores dentro de uma figura,usando comando simples, como:
###Code
fig,axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,5))
# plotando no gráfico da esquerda (1o gráfico)
ax = axes[0]
ax.plot([1,2,3], 'r-')
# plotando no gráfico da direita (2o gráfico)
axes[1].plot([3,2,1], 'g-o')
###Output
_____no_output_____
###Markdown
Aproveitando que falamos de função no início, vamos montar uma função que reproduza as figuras feitas acima:
###Code
def plot_simples():
fig,axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(15,5))
# plotando no gráfico da esquerda (1o gráfico)
ax = axes[0]
ax.plot([1,2,3], 'r-')
# plotando no gráfico da direita (2o gráfico)
axes[1].plot([3,2,1], 'g-o')
# retornamos fig e axes pq precisaremos no futuro
return fig,axes
###Output
_____no_output_____
###Markdown
Agora, usando nossa função, vamos fazer mais customizações na nossa figura:
###Code
import numpy as np
# usando a função. Note que atribuimos o retorno da função a duas variáveis
fig,axes = plot_simples()
# inserindo rótulos ao eixo das abscissas (x) e ordenada (y):
axes[0].set_xlabel('Abscissa')
axes[0].set_ylabel('Ordenada')
axes[1].set_xlabel('Abscissa')
# adicionando um título para cada subplot
axes[0].set_title('Figura da esquerda')
axes[1].set_title('Figura da direita')
# podemos configurar o intervalo discreto das abscissas e ordenadas
axes[0].set_xticks(np.arange(0,3,1))
# podemos ainda trocar os rótulos de cada tick
axes[0].set_xticklabels(['primeiro', 'segundo', 'terceiro'])
# configurar os limites
axes[1].set_xlim([0,10])
# inserindo outros elementos:
axes[0].grid('--', alpha=.3)
axes[1].grid('--', alpha=.3)
###Output
_____no_output_____
###Markdown
**Tipos de gráficos para 1D**- linha: ```plt.plot()```- barra: ```plt.barh()```- histograma: ```plt.hist()```Podemos plotar, de forma semelhante, histogramas ou gráficos de barra ou histograma:
###Code
fig,axes = plt.subplots(1,2,figsize=(15,5))
# grafico de barras horizontal
axes[0].barh(x,y)
# histograma para x
_,_,_ = axes[1].hist(x,5)
# bonus: personalizacao dos subplots usando apenas uma linha com compreensão de listas (list comprehension)
_ = [ax.grid('--', alpha=.3) for ax in axes]
###Output
_____no_output_____
###Markdown
Se tiver dúvidas de como usar uma função, você pode consultar a documentação do matplotlib ou, aqui mesmo, pedir por ajuda com:
###Code
# precisando de ajuda?
help(plt.plot)
###Output
Help on function plot in module matplotlib.pyplot:
plot(*args, scalex=True, scaley=True, data=None, **kwargs)
Plot y versus x as lines and/or markers.
Call signatures::
plot([x], y, [fmt], *, data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
The coordinates of the points or line nodes are given by *x*, *y*.
The optional parameter *fmt* is a convenient way for defining basic
formatting like color, marker and linestyle. It's a shortcut string
notation described in the *Notes* section below.
>>> plot(x, y) # plot x and y using default line style and color
>>> plot(x, y, 'bo') # plot x and y using blue circle markers
>>> plot(y) # plot y using x as index array 0..N-1
>>> plot(y, 'r+') # ditto, but with red plusses
You can use `.Line2D` properties as keyword arguments for more
control on the appearance. Line properties and *fmt* can be mixed.
The following two calls yield identical results:
>>> plot(x, y, 'go--', linewidth=2, markersize=12)
>>> plot(x, y, color='green', marker='o', linestyle='dashed',
... linewidth=2, markersize=12)
When conflicting with *fmt*, keyword arguments take precedence.
**Plotting labelled data**
There's a convenient way for plotting objects with labelled data (i.e.
data that can be accessed by index ``obj['y']``). Instead of giving
the data in *x* and *y*, you can provide the object in the *data*
parameter and just give the labels for *x* and *y*::
>>> plot('xlabel', 'ylabel', data=obj)
All indexable objects are supported. This could e.g. be a `dict`, a
`pandas.DataFrame` or a structured numpy array.
**Plotting multiple sets of data**
There are various ways to plot multiple sets of data.
- The most straight forward way is just to call `plot` multiple times.
Example:
>>> plot(x1, y1, 'bo')
>>> plot(x2, y2, 'go')
- Alternatively, if your data is already a 2d array, you can pass it
directly to *x*, *y*. A separate data set will be drawn for every
column.
Example: an array ``a`` where the first column represents the *x*
values and the other columns are the *y* columns::
>>> plot(a[0], a[1:])
- The third way is to specify multiple sets of *[x]*, *y*, *[fmt]*
groups::
>>> plot(x1, y1, 'g^', x2, y2, 'g-')
In this case, any additional keyword argument applies to all
datasets. Also this syntax cannot be combined with the *data*
parameter.
By default, each line is assigned a different style specified by a
'style cycle'. The *fmt* and line property parameters are only
necessary if you want explicit deviations from these defaults.
Alternatively, you can also change the style cycle using
:rc:`axes.prop_cycle`.
Parameters
----------
x, y : array-like or scalar
The horizontal / vertical coordinates of the data points.
*x* values are optional and default to ``range(len(y))``.
Commonly, these parameters are 1D arrays.
They can also be scalars, or two-dimensional (in that case, the
columns represent separate data sets).
These arguments cannot be passed as keywords.
fmt : str, optional
A format string, e.g. 'ro' for red circles. See the *Notes*
section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting
basic line properties. All of these and more can also be
controlled by keyword arguments.
This argument cannot be passed as keyword.
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*.
.. note::
Technically there's a slight ambiguity in calls where the
second label is a valid *fmt*. ``plot('n', 'o', data=obj)``
could be ``plt(x, y)`` or ``plt(y, fmt)``. In such cases,
the former interpretation is chosen, but a warning is issued.
You may suppress the warning by adding an empty format string
``plot('n', 'o', '', data=obj)``.
Returns
-------
list of `.Line2D`
A list of lines representing the plotted data.
Other Parameters
----------------
scalex, scaley : bool, default: True
These parameters determine if the view limits are adapted to the
data limits. The values are passed on to `autoscale_view`.
**kwargs : `.Line2D` properties, optional
*kwargs* are used to specify properties like a line label (for
auto legends), linewidth, antialiasing, marker face color.
Example::
>>> plot([1, 2, 3], [1, 2, 3], 'go-', label='line 1', linewidth=2)
>>> plot([1, 2, 3], [1, 4, 9], 'rs', label='line 2')
If you make multiple lines with one plot call, the kwargs
apply to all those lines.
Here is a list of available `.Line2D` properties:
Properties:
agg_filter: a filter function, which takes a (m, n, 3) float array and a dpi value, and returns a (m, n, 3) array
alpha: float or None
animated: bool
antialiased or aa: bool
clip_box: `.Bbox`
clip_on: bool
clip_path: Patch or (Path, Transform) or None
color or c: color
contains: unknown
dash_capstyle: {'butt', 'round', 'projecting'}
dash_joinstyle: {'miter', 'round', 'bevel'}
dashes: sequence of floats (on/off ink in points) or (None, None)
data: (2, N) array or two 1D arrays
drawstyle or ds: {'default', 'steps', 'steps-pre', 'steps-mid', 'steps-post'}, default: 'default'
figure: `.Figure`
fillstyle: {'full', 'left', 'right', 'bottom', 'top', 'none'}
gid: str
in_layout: bool
label: object
linestyle or ls: {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
linewidth or lw: float
marker: marker style string, `~.path.Path` or `~.markers.MarkerStyle`
markeredgecolor or mec: color
markeredgewidth or mew: float
markerfacecolor or mfc: color
markerfacecoloralt or mfcalt: color
markersize or ms: float
markevery: None or int or (int, int) or slice or List[int] or float or (float, float) or List[bool]
path_effects: `.AbstractPathEffect`
picker: unknown
pickradius: float
rasterized: bool or None
sketch_params: (scale: float, length: float, randomness: float)
snap: bool or None
solid_capstyle: {'butt', 'round', 'projecting'}
solid_joinstyle: {'miter', 'round', 'bevel'}
transform: `matplotlib.transforms.Transform`
url: str
visible: bool
xdata: 1D array
ydata: 1D array
zorder: float
See Also
--------
scatter : XY scatter plot with markers of varying size and/or color (
sometimes also called bubble chart).
Notes
-----
**Format Strings**
A format string consists of a part for color, marker and line::
fmt = '[marker][line][color]'
Each of them is optional. If not provided, the value from the style
cycle is used. Exception: If ``line`` is given, but no ``marker``,
the data will be a line without markers.
Other combinations such as ``[color][marker][line]`` are also
supported, but note that their parsing may be ambiguous.
**Markers**
============= ===============================
character description
============= ===============================
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
============= ===============================
**Line Styles**
============= ===============================
character description
============= ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
============= ===============================
Example format strings::
'b' # blue markers with default shape
'or' # red circles
'-g' # green solid line
'--' # dashed line with default color
'^k:' # black triangle_up markers connected by a dotted line
**Colors**
The supported color abbreviations are the single letter codes
============= ===============================
character color
============= ===============================
``'b'`` blue
``'g'`` green
``'r'`` red
``'c'`` cyan
``'m'`` magenta
``'y'`` yellow
``'k'`` black
``'w'`` white
============= ===============================
and the ``'CN'`` colors that index into the default property cycle.
If the color is the only part of the format string, you can
additionally use any `matplotlib.colors` spec, e.g. full names
(``'green'``) or hex strings (``'#008000'``).
###Markdown
Enfim, para **salvar** uma figura, utilizamos o método ```plt.savefig()```:- formatos disponíveis: pdf, png, jpg, tif, svg- dpi: qualidade da figura salva- bbox_to_inches: use ```tight``` para remover espaços em branco ao redor
###Code
# usando a nossa função
fig,ax = plot_simples()
# salvando a figura
plt.savefig('nome_da_figure.png', dpi=150, bbox_to_inches='tight')
###Output
_____no_output_____
###Markdown
Exercício:Utilizando o dicionário criado com a lista de espécies, plote um gráfico de barras horizontais, utilizando ```plt.barh()```.**dica**: use ```list()``` para converter as chaves e valores do dicionário para uma lista.
###Code
# espaço reservado para tentar resolver o exercício
###Output
_____no_output_____
###Markdown
É claro que existem diversas formas de visualização de gráficos de uma dimensão no python. Apresentamos dois tipos bem básicos, porém muito utilizados no dia a dia de um cientista. Para mais exemplo, navegue pela documentação do matplotlib. Ao longo do curso iremos explorar diversos formatos de visualização e explicaremos cada caso conforme uso. Exercícios de casa: perfis verticaisArquivos com temperatura e salinidade climatológica do World Ocean Atlas (WOA) serão fornecidos para uma região específica do litoral Norte de São Paulo: Ubatuba. 1. carregue os dados com numpy usando genfromtxt ou loadtxt(sep=','), sendo uma variável para cada propriedade```pythontemperatura = np.loadtxt('../dados/salinidade_woa2018_ubatuba_60m.csv', delimiter=',')```2. explore a estrutura da matriz que virá. Por exemplo, identifique: - o que é cada coluna? E cada linha? - como acessá-los pelo indexamento de matrizes?Esteja familiarizado com a matriz fornecida antes de prosseguir para a visualização, assim erros que poderão assustar serão evitados.
###Code
# codigo para baixar o arquivo, caso você esteja rodando este notebook no Google Colab
!wget --directory-prefix=../dados/ https://raw.githubusercontent.com/nilodna/python-basico/feature_iojr-shortcourse/dados/temperatura_woa2018_ubatuba_60m.csv
!wget --directory-prefix=../dados/ https://raw.githubusercontent.com/nilodna/python-basico/feature_iojr-shortcourse/dados/salinidade_woa2018_ubatuba_60m.csv
# explorando matrizes de temperatura e salinidade
###Output
_____no_output_____ |
experiments/5_seg_reg_net/vgg_cells/experiments_stats.ipynb | ###Markdown
1. Setup
###Code
import sys
sys.path.append('../../..')
import matplotlib.pyplot as plt
import warnings
from experiments.experiment_utils import *
%matplotlib inline
%load_ext autoreload
%autoreload 2
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
2. Experiment stats- Architecture: SegRegNet;- Train size: 64;- Batch size: 16;- 5 runs with random train images; Only density_maps
###Code
res_list = load_experiments_results('.', ['n_64', 'loss_logcosh', 'patch_4_128x128'], csv_name='results_density')
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_logcosh', 'patch_4_128x128']))
res_df.head()
###Output
n_64_sigma_5_randseed_641_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_642_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_643_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_644_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_645_loss_logcosh_patch_4_128x128
###Markdown
Density_map * (seg_map > pred_seg_thr)
###Code
res_list = load_experiments_results('.', ['n_64', 'loss_logcosh', 'patch_4_128x128'], csv_name='results_density_seg')
res_df = get_mean_and_std_of_experiments(res_list, ','.join(['vgg_cells', 'n_64', 'loss_logcosh', 'patch_4_128x128']))
res_df.head()
###Output
n_64_sigma_5_randseed_641_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_642_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_643_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_644_loss_logcosh_patch_4_128x128
n_64_sigma_5_randseed_645_loss_logcosh_patch_4_128x128
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.