path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
wk08/quiz_wk08_soln.ipynb | ###Markdown
Week 08 Quiz: Functions basics -solution The following code calls a function "poly3" for which the code is given:
###Code
import numpy as np # not needed in example below
def poly3(x):
""" POLY3: Evaluates the cubic polynomial y = a0 + a1*x+a2*x**2+a3*x**3
using Horner's method, where a0=1, a1=-1, a2=2, a3=1.
"""
a0 = 1
a1 = -1
a2 = 2
a3 = 1
y = a0 + x*(a1 + x*(a2+x*a3))
return y
myx = -2
new = poly3(myx)
###Output
_____no_output_____ |
notebooks/elastic_lsh/notebooks/04 - finding correlated groups of features.ipynb | ###Markdown
load features
###Code
feature_vector_dir = "/Users/pimh/Desktop/feature_vectors/"
# feature_vector_ids = np.random.choice(os.listdir(feature_vector_dir), 25_000)
feature_vector_ids = os.listdir(feature_vector_dir)
feature_vector_paths = [
os.path.join(feature_vector_dir, id) for id in feature_vector_ids
]
feature_vectors = []
for path in feature_vector_paths:
with open(path) as f:
feature_vector = np.fromfile(f, dtype=np.float32)
feature_vectors.append(feature_vector)
feature_vectors = np.stack(feature_vectors)
feature_vectors.shape
###Output
_____no_output_____
###Markdown
correlation matrixtake the absolute value because we just care about the _strength_ of the correlation, not its sign. strong clusters.
###Code
corr_matrix = np.abs(np.corrcoef(feature_vectors.T))
corr_matrix.shape
sns.heatmap(corr_matrix[:500, :500])
###Output
_____no_output_____
###Markdown
cluster correlated columns
###Code
clusters = KMeans(n_clusters=256).fit(corr_matrix)
clusters.labels_
pd.Series(clusters.labels_).value_counts().reset_index(drop=True).plot();
###Output
_____no_output_____
###Markdown
save them
###Code
with open("data/column_labels.npy", "wb") as f:
np.save(f, clusters.labels_)
###Output
_____no_output_____ |
SWELL-KW/SWELL-KW_FastGRNN.ipynb | ###Markdown
SWELL-KW FastGRNN Adapted from Microsoft's notebooks, available at https://github.com/microsoft/EdgeML authored by Dennis et al.
###Code
import pandas as pd
import numpy as np
from tabulate import tabulate
import os
import datetime as datetime
import pickle as pkl
from sklearn.model_selection import train_test_split
import pathlib
from os import mkdir
def loadData(dirname):
x_train = np.load(dirname + '/' + 'x_train.npy')
y_train = np.load(dirname + '/' + 'y_train.npy')
x_test = np.load(dirname + '/' + 'x_test.npy')
y_test = np.load(dirname + '/' + 'y_test.npy')
x_val = np.load(dirname + '/' + 'x_val.npy')
y_val = np.load(dirname + '/' + 'y_val.npy')
return x_train, y_train, x_test, y_test, x_val, y_val
def makeEMIData(subinstanceLen, subinstanceStride, sourceDir, outDir):
x_train, y_train, x_test, y_test, x_val, y_val = loadData(sourceDir)
x, y = bagData(x_train, y_train, subinstanceLen, subinstanceStride)
np.save(outDir + '/x_train.npy', x)
np.save(outDir + '/y_train.npy', y)
print('Num train %d' % len(x))
x, y = bagData(x_test, y_test, subinstanceLen, subinstanceStride)
np.save(outDir + '/x_test.npy', x)
np.save(outDir + '/y_test.npy', y)
print('Num test %d' % len(x))
x, y = bagData(x_val, y_val, subinstanceLen, subinstanceStride)
np.save(outDir + '/x_val.npy', x)
np.save(outDir + '/y_val.npy', y)
print('Num val %d' % len(x))
def bagData(X, Y, subinstanceLen, subinstanceStride):
numClass = 2
numSteps = 20
numFeats = 22
assert X.ndim == 3
print(X.shape)
assert X.shape[1] == numSteps
assert X.shape[2] == numFeats
assert subinstanceLen <= numSteps
assert subinstanceLen > 0
assert subinstanceStride <= numSteps
assert subinstanceStride >= 0
assert len(X) == len(Y)
assert Y.ndim == 2
assert Y.shape[1] == numClass
x_bagged = []
y_bagged = []
for i, point in enumerate(X[:, :, :]):
instanceList = []
start = 0
end = subinstanceLen
while True:
x = point[start:end, :]
if len(x) < subinstanceLen:
x_ = np.zeros([subinstanceLen, x.shape[1]])
x_[:len(x), :] = x[:, :]
x = x_
instanceList.append(x)
if end >= numSteps:
break
start += subinstanceStride
end += subinstanceStride
bag = np.array(instanceList)
numSubinstance = bag.shape[0]
label = Y[i]
label = np.argmax(label)
labelBag = np.zeros([numSubinstance, numClass])
labelBag[:, label] = 1
x_bagged.append(bag)
label = np.array(labelBag)
y_bagged.append(label)
return np.array(x_bagged), np.array(y_bagged)
subinstanceLen=8
subinstanceStride=3
extractedDir = '/home/sf/data/SWELL-KW/'
#mkdir('/home/sf/data/SWELL-KW/FG_8_3')
rawDir = extractedDir + '/RAW'
sourceDir = rawDir
outDir = extractedDir + '/FG_%d_%d/' % (subinstanceLen, subinstanceStride)
makeEMIData(subinstanceLen, subinstanceStride, sourceDir, outDir)
from __future__ import print_function
import os
import sys
import tensorflow as tf
import numpy as np
os.environ['CUDA_VISIBLE_DEVICES'] ='0'
# FastGRNN and FastRNN imports
from edgeml.graph.rnn import EMI_DataPipeline
from edgeml.graph.rnn import EMI_FastGRNN
from edgeml.graph.rnn import EMI_FastRNN
from edgeml.trainer.emirnnTrainer import EMI_Trainer, EMI_Driver
import edgeml.utils
# Network parameters for our FastGRNN + FC Layer
NUM_HIDDEN = 128
NUM_TIMESTEPS = 8
NUM_FEATS = 22
FORGET_BIAS = 1.0
NUM_OUTPUT = 2
USE_DROPOUT = False
KEEP_PROB = 0.9
# Non-linearities can be chosen among "tanh, sigmoid, relu, quantTanh, quantSigm"
UPDATE_NL = "quantTanh"
GATE_NL = "quantSigm"
# Ranks of Parameter matrices for low-rank parameterisation to compress models.
WRANK = 5
URANK = 6
# For dataset API
PREFETCH_NUM = 5
BATCH_SIZE = 32
# Number of epochs in *one iteration*
NUM_EPOCHS = 3
# Number of iterations in *one round*. After each iteration,
# the model is dumped to disk. At the end of the current
# round, the best model among all the dumped models in the
# current round is picked up..
NUM_ITER = 4
# A round consists of multiple training iterations and a belief
# update step using the best model from all of these iterations
NUM_ROUNDS = 30
# A staging direcory to store models
MODEL_PREFIX = '/home/sf/data/SWELL-KW/FG_8_13/model-fgrnn'
###Output
_____no_output_____
###Markdown
Loading Data
###Code
# Loading the data
path='/home/sf/data/SWELL-KW/FG_8_3/'
x_train, y_train = np.load(path + 'x_train.npy'), np.load(path + 'y_train.npy')
x_test, y_test = np.load(path + 'x_test.npy'), np.load(path + 'y_test.npy')
x_val, y_val = np.load(path + 'x_val.npy'), np.load(path + 'y_val.npy')
# BAG_TEST, BAG_TRAIN, BAG_VAL represent bag_level labels. These are used for the label update
# step of EMI/MI RNN
BAG_TEST = np.argmax(y_test[:, 0, :], axis=1)
BAG_TRAIN = np.argmax(y_train[:, 0, :], axis=1)
BAG_VAL = np.argmax(y_val[:, 0, :], axis=1)
NUM_SUBINSTANCE = x_train.shape[1]
print("x_train shape is:", x_train.shape)
print("y_train shape is:", y_train.shape)
print("x_test shape is:", x_val.shape)
print("y_test shape is:", y_val.shape)
###Output
x_train shape is: (3679, 5, 8, 22)
y_train shape is: (3679, 5, 2)
x_test shape is: (409, 5, 8, 22)
y_test shape is: (409, 5, 2)
###Markdown
Computation Graph
###Code
# Define the linear secondary classifier
def createExtendedGraph(self, baseOutput, *args, **kwargs):
W1 = tf.Variable(np.random.normal(size=[NUM_HIDDEN, NUM_OUTPUT]).astype('float32'), name='W1')
B1 = tf.Variable(np.random.normal(size=[NUM_OUTPUT]).astype('float32'), name='B1')
y_cap = tf.add(tf.tensordot(baseOutput, W1, axes=1), B1, name='y_cap_tata')
self.output = y_cap
self.graphCreated = True
def restoreExtendedGraph(self, graph, *args, **kwargs):
y_cap = graph.get_tensor_by_name('y_cap_tata:0')
self.output = y_cap
self.graphCreated = True
def feedDictFunc(self, keep_prob=None, inference=False, **kwargs):
if inference is False:
feedDict = {self._emiGraph.keep_prob: keep_prob}
else:
feedDict = {self._emiGraph.keep_prob: 1.0}
return feedDict
EMI_FastGRNN._createExtendedGraph = createExtendedGraph
EMI_FastGRNN._restoreExtendedGraph = restoreExtendedGraph
if USE_DROPOUT is True:
EMI_FastGRNN.feedDictFunc = feedDictFunc
inputPipeline = EMI_DataPipeline(NUM_SUBINSTANCE, NUM_TIMESTEPS, NUM_FEATS, NUM_OUTPUT)
emiFastGRNN = EMI_FastGRNN(NUM_SUBINSTANCE, NUM_HIDDEN, NUM_TIMESTEPS, NUM_FEATS, wRank=WRANK, uRank=URANK,
gate_non_linearity=GATE_NL, update_non_linearity=UPDATE_NL, useDropout=USE_DROPOUT)
emiTrainer = EMI_Trainer(NUM_TIMESTEPS, NUM_OUTPUT, lossType='xentropy')
print("x_train shape is:", x_train.shape)
print("y_train shape is:", y_train.shape)
print("x_test shape is:", x_val.shape)
print("y_test shape is:", y_val.shape)
tf.reset_default_graph()
g1 = tf.Graph()
with g1.as_default():
# Obtain the iterators to each batch of the data
x_batch, y_batch = inputPipeline()
# Create the forward computation graph based on the iterators
y_cap = emiFastGRNN(x_batch)
# Create loss graphs and training routines
emiTrainer(y_cap, y_batch)
###Output
_____no_output_____
###Markdown
EMI Driver
###Code
with g1.as_default():
emiDriver = EMI_Driver(inputPipeline, emiFastGRNN, emiTrainer)
emiDriver.initializeSession(g1)
y_updated, modelStats = emiDriver.run(numClasses=NUM_OUTPUT, x_train=x_train,
y_train=y_train, bag_train=BAG_TRAIN,
x_val=x_val, y_val=y_val, bag_val=BAG_VAL,
numIter=NUM_ITER, keep_prob=KEEP_PROB,
numRounds=NUM_ROUNDS, batchSize=BATCH_SIZE,
numEpochs=NUM_EPOCHS, modelPrefix=MODEL_PREFIX,
fracEMI=0.5, updatePolicy='top-k', k=1)
###Output
Update policy: top-k
Training with MI-RNN loss for 15 rounds
Round: 0
Epoch 2 Batch 100 ( 330) Loss 0.09010 Acc 0.49375 | Val acc 0.65037 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1000
Epoch 2 Batch 100 ( 330) Loss 0.08737 Acc 0.53750 | Val acc 0.66748 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1001
Epoch 2 Batch 100 ( 330) Loss 0.08466 Acc 0.50625 | Val acc 0.68215 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1002
Epoch 2 Batch 100 ( 330) Loss 0.08016 Acc 0.57500 | Val acc 0.69193 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1003
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1003
Round: 1
Epoch 2 Batch 100 ( 330) Loss 0.07575 Acc 0.64375 | Val acc 0.70416 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1004
Epoch 2 Batch 100 ( 330) Loss 0.07176 Acc 0.71875 | Val acc 0.71883 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1005
Epoch 2 Batch 100 ( 330) Loss 0.06881 Acc 0.72500 | Val acc 0.71883 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1006
Epoch 2 Batch 100 ( 330) Loss 0.06622 Acc 0.72500 | Val acc 0.72372 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1007
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1007
Round: 2
Epoch 2 Batch 100 ( 330) Loss 0.06392 Acc 0.72500 | Val acc 0.72616 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1008
Epoch 2 Batch 100 ( 330) Loss 0.06182 Acc 0.73125 | Val acc 0.74083 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1009
Epoch 2 Batch 100 ( 330) Loss 0.06007 Acc 0.74375 | Val acc 0.75061 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1010
Epoch 2 Batch 100 ( 330) Loss 0.05848 Acc 0.76250 | Val acc 0.76528 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1011
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1011
Round: 3
Epoch 2 Batch 100 ( 330) Loss 0.05694 Acc 0.75000 | Val acc 0.76039 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1012
Epoch 2 Batch 100 ( 330) Loss 0.05462 Acc 0.73125 | Val acc 0.76528 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1013
Epoch 2 Batch 100 ( 330) Loss 0.05265 Acc 0.78750 | Val acc 0.77751 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1014
Epoch 2 Batch 100 ( 330) Loss 0.05050 Acc 0.78750 | Val acc 0.77262 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1015
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1014
Round: 4
Epoch 2 Batch 100 ( 330) Loss 0.05050 Acc 0.78750 | Val acc 0.77262 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1016
Epoch 2 Batch 100 ( 330) Loss 0.04971 Acc 0.77500 | Val acc 0.78240 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1017
Epoch 2 Batch 100 ( 330) Loss 0.04890 Acc 0.76250 | Val acc 0.77751 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1018
Epoch 2 Batch 100 ( 330) Loss 0.04847 Acc 0.76250 | Val acc 0.78484 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1019
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1019
Round: 5
Epoch 2 Batch 100 ( 330) Loss 0.04872 Acc 0.79375 | Val acc 0.78973 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1020
Epoch 2 Batch 100 ( 330) Loss 0.04798 Acc 0.79375 | Val acc 0.78973 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1021
Epoch 2 Batch 100 ( 330) Loss 0.04730 Acc 0.79375 | Val acc 0.79462 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1022
Epoch 2 Batch 100 ( 330) Loss 0.04622 Acc 0.80000 | Val acc 0.79707 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1023
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1023
Round: 6
Epoch 2 Batch 100 ( 330) Loss 0.04515 Acc 0.80000 | Val acc 0.80685 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1024
Epoch 2 Batch 100 ( 330) Loss 0.04360 Acc 0.80625 | Val acc 0.80929 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1025
Epoch 2 Batch 100 ( 330) Loss 0.04234 Acc 0.81875 | Val acc 0.80440 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1026
Epoch 2 Batch 100 ( 330) Loss 0.04157 Acc 0.83750 | Val acc 0.81907 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1027
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1027
Round: 7
Epoch 2 Batch 100 ( 330) Loss 0.04077 Acc 0.85000 | Val acc 0.81174 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1028
Epoch 2 Batch 100 ( 330) Loss 0.04000 Acc 0.85000 | Val acc 0.81418 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1029
Epoch 2 Batch 100 ( 330) Loss 0.03823 Acc 0.85000 | Val acc 0.81418 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1030
Epoch 2 Batch 100 ( 330) Loss 0.03676 Acc 0.85625 | Val acc 0.83619 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1031
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1031
Round: 8
Epoch 2 Batch 100 ( 330) Loss 0.03610 Acc 0.85625 | Val acc 0.83863 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1032
Epoch 2 Batch 100 ( 330) Loss 0.03498 Acc 0.86250 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1033
Epoch 2 Batch 100 ( 330) Loss 0.03424 Acc 0.87500 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1034
Epoch 2 Batch 100 ( 330) Loss 0.03375 Acc 0.88125 | Val acc 0.83863 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1035
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1033
Round: 9
Epoch 2 Batch 100 ( 330) Loss 0.03424 Acc 0.87500 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1036
Epoch 2 Batch 100 ( 330) Loss 0.03375 Acc 0.88125 | Val acc 0.83863 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1037
Epoch 2 Batch 100 ( 330) Loss 0.03238 Acc 0.87500 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1038
Epoch 2 Batch 100 ( 330) Loss 0.02963 Acc 0.90000 | Val acc 0.83619 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1039
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1036
Round: 10
Epoch 2 Batch 100 ( 330) Loss 0.03375 Acc 0.88125 | Val acc 0.83863 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1040
Epoch 2 Batch 100 ( 330) Loss 0.03238 Acc 0.87500 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1041
Epoch 2 Batch 100 ( 330) Loss 0.02963 Acc 0.90000 | Val acc 0.83619 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1042
Epoch 2 Batch 100 ( 330) Loss 0.03045 Acc 0.89375 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1043
INFO:tensorflow:Restoring parameters from /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn-1041
Round: 11
Epoch 2 Batch 100 ( 330) Loss 0.02963 Acc 0.90000 | Val acc 0.83619 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1044
Epoch 2 Batch 100 ( 330) Loss 0.03045 Acc 0.89375 | Val acc 0.84108 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1045
Epoch 2 Batch 100 ( 330) Loss 0.03176 Acc 0.88750 | Val acc 0.84352 | Model saved to /home/sf/data/SWELL-KW/FG_8_13/model-fgrnn, global_step 1046
###Markdown
Evaluating the trained model
###Code
# Early Prediction Policy: We make an early prediction based on the predicted classes
# probability. If the predicted class probability > minProb at some step, we make
# a prediction at that step.
def earlyPolicy_minProb(instanceOut, minProb, **kwargs):
assert instanceOut.ndim == 2
classes = np.argmax(instanceOut, axis=1)
prob = np.max(instanceOut, axis=1)
index = np.where(prob >= minProb)[0]
if len(index) == 0:
assert (len(instanceOut) - 1) == (len(classes) - 1)
return classes[-1], len(instanceOut) - 1
index = index[0]
return classes[index], index
def getEarlySaving(predictionStep, numTimeSteps, returnTotal=False):
predictionStep = predictionStep + 1
predictionStep = np.reshape(predictionStep, -1)
totalSteps = np.sum(predictionStep)
maxSteps = len(predictionStep) * numTimeSteps
savings = 1.0 - (totalSteps / maxSteps)
if returnTotal:
return savings, totalSteps
return savings
k = 2
predictions, predictionStep = emiDriver.getInstancePredictions(x_test, y_test, earlyPolicy_minProb, minProb=0.99)
bagPredictions = emiDriver.getBagPredictions(predictions, minSubsequenceLen=k, numClass=NUM_OUTPUT)
print('Accuracy at k = %d: %f' % (k, np.mean((bagPredictions == BAG_TEST).astype(int))))
print('Additional savings: %f' % getEarlySaving(predictionStep, NUM_TIMESTEPS))
# A slightly more detailed analysis method is provided.
df = emiDriver.analyseModel(predictions, BAG_TEST, NUM_SUBINSTANCE, NUM_OUTPUT)
###Output
len acc macro-fsc macro-pre macro-rec micro-fsc micro-pre \
0 1 0.860078 0.859642 0.868740 0.862055 0.860078 0.860078
1 2 0.868885 0.868860 0.870591 0.869796 0.868885 0.868885
2 3 0.875734 0.875619 0.875752 0.875529 0.875734 0.875734
3 4 0.866928 0.866436 0.869102 0.865945 0.866928 0.866928
4 5 0.863992 0.862807 0.871518 0.862185 0.863992 0.863992
micro-rec fscore_01
0 0.860078 0.867470
1 0.868885 0.870656
2 0.875734 0.871847
3 0.866928 0.858333
4 0.863992 0.850054
Max accuracy 0.875734 at subsequencelength 3
Max micro-f 0.875734 at subsequencelength 3
Micro-precision 0.875734 at subsequencelength 3
Micro-recall 0.875734 at subsequencelength 3
Max macro-f 0.875619 at subsequencelength 3
macro-precision 0.875752 at subsequencelength 3
macro-recall 0.875529 at subsequencelength 3
###Markdown
Picking the best model
###Code
k=3
emiDriver.loadSavedGraphToNewSession(MODEL_PREFIX , 1032)
devnull = open(os.devnull, 'r')
for val in modelStats:
round_, acc, modelPrefix, globalStep = val
emiDriver.loadSavedGraphToNewSession(modelPrefix, globalStep, redirFile=devnull)
predictions, predictionStep = emiDriver.getInstancePredictions(x_test, y_test, earlyPolicy_minProb,
minProb=0.99, keep_prob=1.0)
bagPredictions = emiDriver.getBagPredictions(predictions, minSubsequenceLen=k, numClass=NUM_OUTPUT)
print("Round: %2d, Validation accuracy: %.4f" % (round_, acc), end='')
print(', Test Accuracy (k = %d): %f, ' % (k, np.mean((bagPredictions == BAG_TEST).astype(int))), end='')
print('Additional savings: %f' % getEarlySaving(predictionStep, NUM_TIMESTEPS))
MODEL_PREFIX = '/home/sf/data/SWELL-KW/FG_8_13/model-fgrnn'
import time
k=2
start = time.time()
emiDriver.loadSavedGraphToNewSession(MODEL_PREFIX , 1118)
predictions, predictionStep = emiDriver.getInstancePredictions(x_test, y_test, earlyPolicy_minProb,
minProb=0.99, keep_prob=1.0)###
bagPredictions = emiDriver.getBagPredictions(predictions, minSubsequenceLen=k, numClass=NUM_OUTPUT)###
end = time.time()
print(start-end)
print('Accuracy at k = %d: %f' % (k, np.mean((bagPredictions == BAG_TEST).astype(int))))
params = {
"NUM_HIDDEN" : 128,
"NUM_TIMESTEPS" : 8, #subinstance length.
"NUM_FEATS" : 22,
"FORGET_BIAS" : 1.0,
"UPDATE_NL" : "quantTanh",
"GATE_NL" : "quantSigm",
"NUM_OUTPUT" : 3,
"WRANK" : 5,
"URANK" : 6,
"USE_DROPOUT" : False,
"KEEP_PROB" : 0.9,
"PREFETCH_NUM" : 5,
"BATCH_SIZE" : 32,
"NUM_EPOCHS" : 2,
"NUM_ITER" : 4,
"NUM_ROUNDS" : 10,
"MODEL_PREFIX" : '/home/sf/data/DREAMER/Dominance/48_16/models/Fast-GRNN/model-fgrnn'
}
fgrnn_dict = {**params}
fgrnn_dict["k"] = k
fgrnn_dict["accuracy"] = np.mean((bagPredictions == BAG_TEST).astype(int))
fgrnn_dict["total_savings"] = getEarlySaving(predictionStep, NUM_TIMESTEPS)
fgrnn_dict["y_test"] = BAG_TEST
fgrnn_dict["y_pred"] = bagPredictions
# A slightly more detailed analysis method is provided.
df = emiDriver.analyseModel(predictions, BAG_TEST, NUM_SUBINSTANCE, NUM_OUTPUT)
print (tabulate(df, headers=list(df.columns), tablefmt='grid'))
dirname = "/home/sf/data/SWELL-KW/"
pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
print ("Results for this run have been saved at" , dirname, ".")
now = datetime.datetime.now()
filename = list((str(now.year),"-",str(now.month),"-",str(now.day),"|",str(now.hour),"-",str(now.minute)))
filename = ''.join(filename)
#Save the dictionary containing the params and the results.
pkl.dump(fgrnn_dict,open(dirname + filename + ".pkl",mode='wb'))
###Output
Results for this run have been saved at /home/sf/data/SWELL-KW/ .
|
Pandas_Tutorial.ipynb | ###Markdown
Section 3 of:https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python Loading in Data
###Code
import pandas as pd
!wget https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv
df = pd.read_csv('sbux.csv')
# using the URL directly works too!
df = pd.read_csv('https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv')
type(df)
!head sbux.csv
df.head()
df.head(10)
df.tail()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1259 entries, 0 to 1258
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 date 1259 non-null object
1 open 1259 non-null float64
2 high 1259 non-null float64
3 low 1259 non-null float64
4 close 1259 non-null float64
5 volume 1259 non-null int64
6 Name 1259 non-null object
dtypes: float64(4), int64(1), object(2)
memory usage: 69.0+ KB
###Markdown
Selecting Rows and Columns
###Code
df[0,0] # doesn't work
df.columns
# columns can be assigned
df.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'name']
df['open']
df[['open', 'close']]
type(df['open'])
type(df[['open', 'close']])
df.iloc[0]
df.loc[0]
df2 = pd.read_csv('sbux.csv', index_col='date')
df2.head()
df2.loc['2013-02-08']
type(df2.loc['2013-02-08'])
df[df['open'] > 64]
df[df['name'] != 'SBUX']
df['name'] != 'SBUX'
type(df['name'] != 'SBUX')
# by the way, this works with Numpy too
import numpy as np
A = np.arange(10)
A
A[A % 2 == 0]
# don't want "objects" when we're doing math!
df.values
A = df[['open', 'close']].values
A
type(A)
# write a dataframe to file
smalldf = df[['open', 'close']]
smalldf.to_csv('output.csv')
!head output.csv
# what if we don't want an index column?
smalldf.to_csv('output.csv', index=False)
!head output.csv
###Output
open,close
27.92,28.185
28.26,28.07
28.0,28.13
28.23,27.915
27.765,27.775
27.805,27.17
27.18,27.225
27.3,26.655
26.535,26.675
###Markdown
The apply() function
###Code
def date_to_year(row):
return int(row['date'].split('-')[0])
df.apply(date_to_year, axis=1)
df['year'] = df.apply(date_to_year, axis=1)
df.head()
###Output
_____no_output_____
###Markdown
Plotting with Pandas
###Code
df['open'].hist();
df['open'].plot();
df[['open', 'high', 'low', 'close']].plot.box();
from pandas.plotting import scatter_matrix
scatter_matrix(df[['open', 'high', 'low', 'close']],
alpha=0.2, figsize=(6, 6));
###Output
_____no_output_____ |
_draft_notebooks/2020-11-25-introduction-to-self-supervision.ipynb | ###Markdown
Introduction to self-supervised visual learning> A short introduction to self-supervised visual-based learning methods.- toc: true- badges: true- comments: true- categories: self-supervised-learning- hide: false- search_exclude: false
###Code
#hide
import graphviz
import pathlib
import shutil
from IPython.display import SVG
asset_path = pathlib.Path("./2020-11-25")
asset_path.mkdir(exist_ok=True)
def generate_graph(
inp,
fname="tmp",
path=asset_path,
engine="dot",
mode="LR",
) -> str:
"""
Generates and renders a DOT graph as an SVG file.
Return the location of the saved file.
"""
# WSL error if run in WSL Path
# First create in home and then move file
tmp_path = pathlib.Path("~").expanduser()
graph = graphviz.Source(
source=f'digraph G{{ rankdir="{mode}" {inp} ;}}',
filename=fname,
format="svg",
directory=tmp_path,
engine=engine
)
loc = graph.render()
target_loc = asset_path / f"{fname}.svg"
shutil.move(loc, target_loc)
return target_loc
###Output
_____no_output_____
###Markdown
AboutAfter understanding the remote sensing data we will be working with, we will focus on the deep-learning part of my master thesis, specifically self-supervised learning.For the following deep-learning topics, it would be helpful to have a general understanding of what deep-learning, or more generally, machine learning is. Still, I will try my best to keep the content comfortable enough for interested readers to follow along. :relaxed:There are many great introduction resources and courses for machine learning! For a quick introduction to deep-learning, I highly recommend [3Blue1Brown's](https://www.youtube.com/c/3blue1brown) series on neural networks:> youtube: https://www.youtube.com/watch?v=aircAruvnKk&list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi3Blue1Brown's tutorial on neural networksA different highly-compressed resource for machine learning in general is [The Hundred-Page Machine Learning Book](https://leanpub.com/theMLbook) by Andriy Burkov.With that out of the way, let's take a quick refresher on learning methods and see what *self-supervised* learning methods are. Learning MethodsIn general, there are three primary methods to train deep neural networks:- Supervised Learning- Unsupervised Learning- Reinforcement Learning Supervised LearningMost current state-of-the-art computer vision applications utilize *supervised learning methods*. In this setting, the input data are *labeled* images. Humans annotate these labels. A simple scenario would be a dataset for a classification application with the two classes, "dog" and "cat". Here, a human would look at Fig. 1and conclude that it should belong to the category "dog" and annotate it as such. Fig 1: Example image of a dog (Image by Karen Warfel from Pixabay)This process is repeated for every image in the dataset until all images are labeled. Then the learning process starts. The model tries to predict the correct label, and *learns* from its mistake by comparing its predictions to the *ground-truth* labels.
###Code
#hide
generate_graph(
"""
ordering=in
nodesep=0.60
fontsize=12
Model[shape=box style=rounded width=1 height=0.7 label=Model]
Data -> Model -> Prediction [weight=10]; "Human Labels" -> Loss;
Prediction -> Loss;
Loss -> Model [label="Update" tailport=s headport=s]""",
fname="supervised-learning"
);
###Output
_____no_output_____
###Markdown
Fig. 2: Supervised training loop Fig. 2 shows the general learning, or training, loop of a supervised method.1. Data is fed into the Model. For example, many images of dogs and cats.2. The model *predicts* an output. In our scenario, it could guess that the current image is a dog.3. The prediction of the model is compared to the *correct* label. The difference between the correct label and the prediction is mathematically expressed as a loss.4. The loss is then used to tell the model how *wrong* the prediction was and is used to update the model's parameters. This update procedure is the *learning* part. Although supervised-learning leads to the highest model performance, there is one significant limitation: cost. The cost of annotating all images is immense. The standard large-scale image dataset used to compare research results, ImageNet {% cite Russakovsky2015 %}, has more than 1.3 million labeled images with 1,000 classes! These images have to be manually annotated and verified. The necessary steps to gather data for supervised-learning can be seen in Fig. 3. The annotation process is even worse for video datasets, as these have a temporal dimension.
###Code
#hide
generate_graph(
"""
Labeling[color="red"]
"Data acquisition" -> Verifying;
"Data acquisition" -> Cleaning;
"Data acquisition" -> Preprocessing;
Verifying -> Labeling;
Cleaning -> Labeling;
Preprocessing -> Labeling;
Labeling -> "Re-Verify"
""",
fname="data-gathering-label"
)
###Output
_____no_output_____
###Markdown
Fig. 3: Data gathering process for supervised-learning methodsAn alternative is unsupervised learning, which does not reach the same model performance as supervised learning (yet).But, unsupervised learning has one significant practical advantage... Unsupervised learningUnsupervised learning methods do not require *any* human-annotated labels. The specific methods use different approaches to *learn* from the data itself. They iterate and update their predictions in various ways and usually define their loss based on their previous predictions. The process of iterating through the data and *learning* from some definition of error/loss remains. Compare Fig. 2 with Fig. 4.
###Code
#hide
generate_graph(
"""
ordering=in
nodesep=0.60
fontsize=12
Model[shape=box style=rounded width=1 height=0.7 label=Model]
Data -> Model -> Prediction [weight=10];
Prediction -> Loss [weight=10];
Loss -> Model [label="Update" tailport=s headport=s]""",
fname="unsupervised-learning"
);
###Output
_____no_output_____
###Markdown
Fig. 4: Unsupervised training loopA classic unsupervised machine learning method is [k-means clustering](https://en.wikipedia.org/wiki/K-means_clustering). Here the data is grouped into clusters of data points that are close to each other. What these clusters *represent* is not essential for the algorithm. The interpretation is left for human evaluation. Fig. 5 shows the result of k-means on two-dimensional data. As we can see, no information about the data points themselves is required. The algorithm simply uses the *distance* between the points to *group* them. The main idea being that these groups have something *interesting* in common and belong together. Fig. 5: Example solution of k-means (Image by Akshay Singhal from Gatevidyalay)In the context of deep-learning, the time spent to train unsupervised models is generally similar to the training time of supervised methods. No time is saved during the training process! The main reason why unsupervised-learning methods are so interesting is because they don't require any labels!The absence of labels reduces the overall cost and time to generate the required data and *may* lead to faster production times.One promising subset of unsupervised learning methods for deep learning is called *self-supervised* learning.We will take a closer look at self-supervised learning in the next section.Before that, let's look at the last significant learning approach:reinforcement learning. Reinforcement learningReinforcement learning is somewhat very different from the previouslearning methods due to the learning procedure's nature. Reinforcement learning does not learn from *static* data but from interactions with its *environment* as an *actor*, see Fig. 6.
###Code
#hide
generate_graph(
"""
ordering=in
nodesep=0.60
fontsize=12
Agent[shape=box style=rounded width=1 height=0.7 label=Agent]
Agent -> Environment [label="interacts" headport=n tailport=n weight=0];
Environment -> Agent [label="Reward & State update" headport=s tailport=s weight=0]
""",
fname="reinforcement-learning"
);
###Output
_____no_output_____
###Markdown
Fig. 6: Reinforcement training loop In short, the model tries to learn the actions which will get the highest reward. Classical variations use reinforcement learning to beat computer or board games. One model with high media coverage was [AlphaGo](https://en.wikipedia.org/wiki/AlphaGo). AlphaGo beat the 18-time world champion Lee Sedol in the board game Go,a too hard game for machines to master for a very long time. Because reinforcement learning is so very different, we won't be going into any more detail, even if it is a fascinating subject. Let's go back to the previous, shortly introduced unsupervised learning method, *self-supervised* learning, as this method will be sticking with us for a very long time. Self-supervised learningSelf-supervised learning was introduced as an unsupervised learning method. Therefore, self-supervised learning methods do not require any human-annotated labels. At this point, you might ask why the term *human-annotated* was always mentioned in conjunction with labels instead of just labels. The reason is that self-supervised methods create their labels! Instead of relying on labels from hard-working humans, these methods automatically generate their own labels, *pseudolabels*, through a so-called *pretext task*. A general self-supervised training loop can be seen in Fig. 7.These pretext tasks are very different from each other.
###Code
#hide
generate_graph(
"""
ordering=in
nodesep=0.60
fontsize=12
Model[shape=box style=rounded width=1 height=0.7 label=Model]
Data -> Model -> Prediction [weight=10];
"Pretext\ntask" -> "Pseudolabels" [weight=10];
"Pseudolabels" -> Loss;
Prediction -> Loss;
Loss -> Model [label="Update" tailport=s headport=s]""",
fname="self-supervised-learning"
);
###Output
_____no_output_____
###Markdown
Fig. 7: Self-supervised training loop In my opinion, an ingenious idea was to, first, simply rotate the images byeither 0°, 90°, or 270°. Then, to let the network guess by how much the image was rotated. {% cite Gidaris2018 %}The idea is that the model learnsthe objects' visual features to recognize the _correct_ orientation of the image. In the end, this is nothing more than a classification task, where the pseudolabels are generated by randomly rotating the image. See Fig. 8 for an overview. The pretext task isn'tlimited to creating pseudolabels from the input data. Some variationscombine the input with the model's prediction to createpseudolabels, and others combine multiple pretext tasks.
###Code
#hide
SVG(generate_graph("""
Model[shape=box style=rounded width=1 height=0.7 label=Model]
Image -> "Randomly rotate";
"Randomly rotate" -> "Rotated Image";
"Rotated Image" -> Model[weight=10];
Model -> Prediction[weight=8];
Prediction-> Loss[weight=8];
"Randomly rotate" -> "Pseudolabel=90°"[weight=0, headport=n];
"Pseudolabel=90°" -> Loss[weight=0 headport=e tailport=s];
Loss -> Model[xlabel="Update ", tailport=w, headport=w]
""",
mode="tb",
fname="rot-chart"
))
###Output
_____no_output_____
###Markdown
Fig. 8: Rotation based self-supervised learning But, what is the result of such an unsupervised training loop? In short, a *trained* model. The model should detect and differentiate objects from each other without being explicitly taught what these objects are. Some applications then work with the trained model without changing it.Others *finetune* the model on a similar, small labeled dataset. In the finetune procedure, only minor changes to the model are made. Here, we are trying to tell the model what objects we are *interested* in.Looking back to our previous example, we could train a model on random images from [Flickr](https://www.flickr.com/explore) without labeling them. Afterwards, we finetune our model to differentiate between dogs and cats by supplying a small labeled dataset. The model should perform very well without a lot of training because it already *knows* how to differentiate dogs and cats by looking at random images from Flickr. At least, this is what we hope. Our dataset *pushes* the model to focus on a specific task.In the research community, the self-supervised learning methods are evaluated by a similar procedure.First, the model is trained on a dataset without using any human-annotated labels and is then finetuned on a *downstream task* of a different dataset. A possible downstream task would be to classify dogs and cats. The score of the downstream tasks is used as a quantitive measure of the generability of the model.The authors from Jing _et. al_ {% cite Jing2020 %} gave a very detailed overview of the various self-supervision approaches. For images, there are three significant types of pretext tasks, as shown in Fig. 9.
###Code
#hide
generate_graph(
"""
"Pretext tasks" -> "Generation-based\nmethods"
"Pretext tasks" -> "Context-based methods"
"Pretext tasks" -> "Free Semantic\nLabel-based methods"
""",
mode="TB",
fname="pretext-tasks"
);
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/term-frequency-inverse-document-frequency-checkpoint.ipynb | ###Markdown
Term Frequency Inverse Document Frequency (TF-IDF) Vector Representation__Anish Sachdeva (DTU/2K16/MC/013)____Natural Language Processing (IT-425)__In this noteook we will extract Term Frequency vector representations from a given corpus, where our corpus will be my resume. We will divide the corpus into 6 different parts and each part will be treated as a document. The vector for a given word will be a $1 \times 6$ vector and each column will represent the frequency countof how many times the word occured in that particular document. 1. Importing Required Packages
###Code
import pprint
from collections import Counter
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import numpy as np
import pandas
###Output
[nltk_data] Downloading package stopwords to
[nltk_data] C:\Users\anish\AppData\Roaming\nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
2. Importing the Corpus (Resume)
###Code
resume_file = open('../assets/resume.txt', 'r')
resume = resume_file.read().lower()
resume_file.close()
print(resume)
###Output
anish sachdeva
software developer + clean code enthusiast
phone : 8287428181
email : [email protected]
home : sandesh vihar, pitampura, new delhi - 110034
date of birth : 7th april 1998
languages : english, hindi, french
work experience
what after college (4 months)
delhi, india
creating content to teach core java and python with data structures and algorithms and giving online classes to students.
giving python classes workshops to students all around india and teaching core data structures and the python api
with emphasis on data structures, algorithms and problem solving. see a sample python batch here:
https://github.com/anishlearnstocode/python-workshop-6
also teaching java to students in batches of 10 days, where the full java api and data types are covered along with many
important algorithms are aso taught. see a sample java batch here: https://github.com/anishlearnstocode/java-wac-batch-32
summer research fellow at university of auckland (2 months)
auckland, new zealand
worked on geometry of mobius transformations, differential geometry under dr. pedram hekmati at the department of
mathematics, university of auckland. worked on various topics in mathematics such as abelian group theory,
measure theory, graph theory and differential geometry.
attended lectures and conferences with notable speakers from throughout academia and the industry. met up with ed witten
who is currently at the forefront of applied mathematics in physics and mathematical topology in higher dimensional
spaces.
met with scientist from microsoft quantum research that are working on cutting edge research and using foundational
group theory and mobius transformations in real world practical applications.
software developer at cern (14 months)
cern, geneva, switzerland
worked in the core platforms team of the fap-bc group. part of an agile team of developers that maintains and adds core
functionality to applications used internally at cern by hr, financial, administrative and other departments including
scientific.
worked on legacy applications that comprise of single and some times multiple frameworks such as java spring, boot,
hibernate and java ee. also worked with google polymer 1.0 and jsp on the client side.
maintained cern's electronic document handing system application with >1m loc that comprising of multiple frameworks
and created ~20 years ago. worked on feature requests, support requests and incidents and also release cycles.
while at cern, i also engaged socially and participated in self growth outside the work environment. i was part of the
department band as lead singer and guitarist. i also worked on my french and learnt it till a2 level. i participated
in many workshops, and volunteered as a participant and helper in many activities related to programming, robotics
etc.
teaching assistant (4 months)
coding ninjas, delhi
served as the teaching assistant to nucleus - java with ds batch, under mr. ankur kumar. worked on creating course
content and quizzes for online platform of coding ninjas for java. helped students in core data structures and algorithms
concepts in java.
education
delhi technological university (2016 - 2021)
bachelors of technology mathematics and computing
cgpa: 9.2
the heritage school rohini (2004 - 2016)
physics, chemistry, maths + computer science with english
senior secondary: 94.8%
secondary: 9.8 cgpa
technical skills
java + algorithms and data structures
mean stack web development
python + machine learning
matlab + octave
mysql, postgressql & mongodb
other skills
ms office, adobe photoshop, latex + mitex
university courses
applied mathematics i, ii, iii
linear algebra + probability & statistics + stochastic processes + discrete maths
computer organization & architecture + data structures + algorithm design and analysis + dbms + os
computer vision + nlp
important links
https://www.linkedin.com/in/anishsachdeva1998/
https://github.com/anishlearnstocode
https://www.hackerrank.com/anishviewer
honours and awards
mitacs globalink scholarship cohort of 2020
summer research fellowship university of auckland mathematics department
technical student @ cern
google india challenge scholarship
certifications
trinity college of london plectrum guitar grade 4 (distinction)
trinity college of london plectrum guitar grade 3 (merit)
trinity college of london plectrum guitar grade 2 (distinction)
trinity college of london plectrum guitar grade 1 (distinction)
french a2.1 level from cern
java data structures and algorithms @ coding ninjas
web development with ruby on rails @ coding ninjas
competitive programming @ coding ninjas
###Markdown
3. Tokenizing The ResumeWe now create a utility function called `tokenize` that will take in a corpus (resume in this case) and will return us a list of tokens after removing stopwords and punctuations. It will only consider alphabetic words and all numbers have also been ignored.
###Code
# utility function for tokenizing
def tokenize(document: str, stopwords_en=stopwords.words('english'), tokenizer=nltk.RegexpTokenizer(r'\w+')):
document = document.lower()
return [token for token in tokenizer.tokenize(document) if token not in stopwords_en and token.isalpha()]
# tokenizing the resume
tokens = tokenize(resume)
# see first 30 tokens
print(tokens[: 30])
###Output
['anish', 'sachdeva', 'software', 'developer', 'clean', 'code', 'enthusiast', 'phone', 'email', 'outlook', 'com', 'home', 'sandesh', 'vihar', 'pitampura', 'new', 'delhi', 'date', 'birth', 'april', 'languages', 'english', 'hindi', 'french', 'work', 'experience', 'college', 'months', 'delhi', 'india']
###Markdown
4. Dividing the Corpus Into 6 Documents
###Code
k = len(tokens) // 6
documents = []
for i in range(5):
documents.append(tokens[i * k: (i + 1) * k])
documents.append(tokens[5 * k:])
# the 6th document is
pprint.pp(documents[5])
###Output
['links',
'https',
'www',
'linkedin',
'com',
'https',
'github',
'com',
'anishlearnstocode',
'https',
'www',
'hackerrank',
'com',
'anishviewer',
'honours',
'awards',
'mitacs',
'globalink',
'scholarship',
'cohort',
'summer',
'research',
'fellowship',
'university',
'auckland',
'mathematics',
'department',
'technical',
'student',
'cern',
'google',
'india',
'challenge',
'scholarship',
'certifications',
'trinity',
'college',
'london',
'plectrum',
'guitar',
'grade',
'distinction',
'trinity',
'college',
'london',
'plectrum',
'guitar',
'grade',
'merit',
'trinity',
'college',
'london',
'plectrum',
'guitar',
'grade',
'distinction',
'trinity',
'college',
'london',
'plectrum',
'guitar',
'grade',
'distinction',
'french',
'level',
'cern',
'java',
'data',
'structures',
'algorithms',
'coding',
'ninjas',
'web',
'development',
'ruby',
'rails',
'coding',
'ninjas',
'competitive',
'programming',
'coding',
'ninjas']
###Markdown
5. Calculating Most Common 5 Tokens From Each Document & Storing Frequency Tables for Each Document
###Code
most_common = set()
document_frequencies = []
for document in documents:
frequencies = Counter(document)
document_frequencies.append(frequencies)
for word, frequency in frequencies.most_common(5):
most_common.add(word)
# number of tokens we have selected, as it isn't necessary to obtain 30 unique tokens
print('Number of tokens:', len(most_common))
# The tokens from the first document are
print('Tokens from first document:', document_frequencies[0].most_common(5))
# The selected tokens are
pprint.pp(most_common)
###Output
{'algorithms',
'also',
'applications',
'auckland',
'cern',
'college',
'com',
'computer',
'data',
'geometry',
'group',
'guitar',
'java',
'london',
'many',
'mathematics',
'participated',
'plectrum',
'python',
'requests',
'research',
'structures',
'students',
'theory',
'trinity',
'university',
'worked'}
###Markdown
6. Calculating Number of Documents a Keyword Appears InThe TF-IDF vector for a given word is given by:$$tfidf(w, d) = tf(w, d) \times idf(w, d) \\idf(w, d) = \log{\frac{N_t}{N_w}}$$where:$N_t:$ is the total numeber of documents and$N_w:$ is the total number of documents containing the keyword $w$.We now create a dictionary `N_w` (_str_ $\rightarrow$ _int_ ) which will store the number of documents a word $w$ occurrs in.
###Code
N_t = 6
N_w = {}
for word in most_common:
count = 0
for frequencies in document_frequencies:
count = count + (word in frequencies)
N_w[word] = count
# seeing the N_w map for all the selected words
pprint.pp(N_w)
###Output
{'requests': 1,
'geometry': 1,
'mathematics': 3,
'university': 3,
'algorithms': 4,
'java': 6,
'college': 2,
'group': 2,
'many': 2,
'com': 3,
'theory': 2,
'python': 2,
'plectrum': 1,
'students': 2,
'london': 1,
'research': 3,
'cern': 3,
'trinity': 1,
'participated': 1,
'guitar': 1,
'data': 5,
'applications': 1,
'worked': 3,
'also': 3,
'structures': 3,
'auckland': 2,
'computer': 1}
###Markdown
We notice above that __java__ is the only word in the given list to appear in all 6 documents. 7. Computing the TF-IDF Vectors
###Code
vectors = {}
for word in most_common:
vector = [0] * 6
for index, frequencies in enumerate(document_frequencies):
vector[index] = frequencies[word] * np.log(N_t / N_w[word])
vectors[word] = vector
# Let's see the vector output for a few words
print(vectors['java'])
print(vectors['students'])
# you can also test it out with a word of your choice, try below:
word = 'python'
print(vectors.get(word, [0] * 6))
###Output
[5.493061443340549, 0.0, 0.0, 0.0, 1.0986122886681098, 0.0]
###Markdown
8. Representing The Vectors in a Tabular Form
###Code
table = pandas.DataFrame(data=vectors)
print(table.iloc[:, 0:7])
print(table.iloc[:, 7:14])
print(table.iloc[:, 14:20])
print(table.iloc[:, 20:])
###Output
data applications worked also structures auckland computer
0 0.546965 0.000000 0.000000 0.693147 2.079442 0.000000 0.000000
1 0.182322 0.000000 1.386294 0.000000 0.000000 3.295837 0.000000
2 0.000000 5.375278 2.079442 0.693147 0.000000 0.000000 0.000000
3 0.182322 0.000000 2.079442 2.079442 0.000000 0.000000 0.000000
4 0.364643 0.000000 0.000000 0.000000 2.079442 0.000000 5.375278
5 0.182322 0.000000 0.000000 0.000000 0.693147 1.098612 0.000000
|
cortilia_scraping.ipynb | ###Markdown
INSTALL LIBRARIES
###Code
#install packages for web scraping:Selenium (emulate a user on a web site) + chromium
!pip install selenium
!apt-get update
!apt install chromium-chromedriver
!cp /usr/lib/chromium-browser/chromedriver /usr/bin
import sys #to setup the path of chromedriver
import logging
from selenium.webdriver.remote.remote_connection import LOGGER
LOGGER.setLevel(logging.WARNING)
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
from selenium import webdriver
from tqdm import tqdm_notebook as tqdm
import pandas
import json #to work with json file
import pprint #to print stuff in a more aesthetic way
#other stuff to work in a browser
chrome_options = webdriver.ChromeOptions() #With webdriver we can start the phantom browser, emulate the user navigation and scrape our data.
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")
###Output
_____no_output_____
###Markdown
PRODUCT SCRAPING FROM CORTILIA
###Code
wd = webdriver.Chrome('chromedriver',options=chrome_options) #è un oggetto che mi permette di interfacciarmi con web tramite richieste
#Mettiamo sotto forma di funzione quello che abbiamo scritto sopra così ci basta richiamarla
def parse_recipe(recipe):
id_prodotto=""
prodotto = ""
prezzo = ""
id_produttore = ""
peso = ""
try:
if(len(recipe.find_elements_by_css_selector("h3.title-ellipsis")) > 0):
prodotto = recipe.find_elements_by_css_selector("h3.title-ellipsis")[0].text
if(len(recipe.find_elements_by_css_selector("div.price")) > 0):
prezzo = recipe.find_elements_by_css_selector("div.price")[0].text
if(len(recipe.find_elements_by_css_selector("p>span.ellipsis>a")) > 0):
id_produttore =recipe.find_elements_by_css_selector("p>span.ellipsis>a")[0].get_attribute("href")
if(len(recipe.find_elements_by_css_selector("p>span.ellipsis")) > 0):
peso =recipe.find_elements_by_css_selector("p>span.ellipsis")[1].text
id_prodotto = recipe.find_elements_by_css_selector("a")[0].get_attribute("href")
except:
pass
return {'prodotto': prodotto,
'prezzo (€)': prezzo,
'id_produttore' : id_produttore,
'peso' : peso,
'id_prodotto':id_prodotto}
#PROVIAMO: sintetizziamo tutto quello fatto fino adesso + aggiungiamo progress bar tramite tqdm
detail_recipes = []
wd.get("https://www.cortilia.it/prodotti/frutta-verdura/verdura")
wd.save_screenshot(f'screenshot.png')
list_recipes = wd.find_elements_by_css_selector("div.product.big.cleared")
#category =
for recipe in tqdm(list_recipes):
detail_recipes.append(parse_recipe(recipe))
print(len(detail_recipes))
pprint.pprint(detail_recipes[0:10])
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
detail_recipes = []
detail_categories = []
categories = ['dispensa/passate-e-sughi','frutta-verdura/aromi-e-spezie','frutta-verdura/verdura','frutta-verdura/frutta-secca-e-semi', 'frutta-verdura/legumi','frutta-verdura/frutta',
'pesce-carne/pesce', 'pesce-carne/pollo-e-coniglio', 'pesce-carne/carni-bovine', 'pesce-carne/carni-suine',
'pesce-carne/carne-ovina', 'pesce-carne/hamburger','salumi-latticini/uova-latte-e-burro', 'salumi-latticini/formaggi-freschi','salumi-latticini/formaggi-stagionati', 'salumi-latticini/yogurt-e-dessert',
'pasta-pane-riso/pasta-fresca', 'pasta-pane-riso/pasta-secca','pasta-pane-riso/riso-e-cereali', 'pasta-pane-riso/pane-e-sostitutivi',
'dispensa/passate-e-sughi','dispensa/conserve-e-salse','dispensa/olio-e-condimenti','dispensa/farine-e-pasticceria', 'vino-bevande/vini-rossi-e-rosati',
'vino-bevande/vini-bianchi-e-bollicine','vino-bevande/acqua-e-bibite','vino-bevande/birra','vino-bevande/liquori-e-distillati','dolci-snack/caffe-te-e-tisane','dolci-snack/confetture-creme-e-miele',
'dolci-snack/cereali-e-fette']
for category in tqdm(categories):
wd.get(f"https://www.cortilia.it/prodotti/{category}")
time.sleep(3) #inserisco una pausa di 5 secondi dopo essere andato sul sito, per aspettare il caricamento di tutti i projects
wd.save_screenshot("check_browser.png")
list_recipes = wd.find_elements_by_css_selector("div.product.big.cleared")#trova tutte le card all'interno del range ovvero item specificato
for recipe in list_recipes:
detail_recipes.append(parse_recipe(recipe))
detail_categories.append({'categoria':category})
print(len(detail_recipes))
print(len(detail_categories))
import pandas as pd
ds_ingredienti_cortilia = pd.DataFrame(detail_recipes)
ds_categories = pd.DataFrame(detail_categories)
ds_ingredienti_cortilia=ds_ingredienti_cortilia.join(ds_categories)
ds_ingredienti_cortilia.set_index("id_prodotto")
ds_ingredienti_cortilia.head()
#CLEANING
#rimuovo euro dalla colonna prezzo + trasformo prezzo in float
ds_ingredienti_cortilia['prezzo (€)']= ds_ingredienti_cortilia['prezzo (€)'].str.replace("€","")
ds_ingredienti_cortilia['prezzo (€)']= ds_ingredienti_cortilia['prezzo (€)'].str.strip()
ds_ingredienti_cortilia['prezzo (€)']= ds_ingredienti_cortilia['prezzo (€)'].str.replace(",",".")
ds_ingredienti_cortilia['prezzo (€)']= ds_ingredienti_cortilia['prezzo (€)'].astype(float)
#rimuovo elementi con parole box, kit, degustazione, tris, misto, mix
remove_list =['Box','Kit','Degustazione','Tris','Misto','Mix']
ds_ingredienti_cortilia=ds_ingredienti_cortilia[~ds_ingredienti_cortilia['titolo'].str.contains('|'.join(remove_list))]
#pulisco la categoria di ogni prodotto
ds_ingredienti_cortilia['categoria']= ds_ingredienti_cortilia['categoria'].str.replace('.*/','')
ds_ingredienti_cortilia['categoria']= ds_ingredienti_cortilia['categoria'].str.replace('-',' ')
ds_ingredienti_cortilia.head()
len(ds_ingredienti_cortilia)
#estraggo unità di misura da colonna peso
ds_ingredienti_cortilia['unità di misura'] = ds_ingredienti_cortilia['peso'].str.extract(r'(kg$|g$|gr$|l$|lt$|ml$|cl$|q.b.$)', expand=False)
ds_ingredienti_cortilia['unità di misura'] = ds_ingredienti_cortilia['unità di misura'].str.strip()
ds_ingredienti_cortilia['unità di misura'] = ds_ingredienti_cortilia['unità di misura'].str.replace(".","")
ds_ingredienti_cortilia['peso'] = ds_ingredienti_cortilia['peso'].str.replace('(kg$|g$|gr$|l$|lt$|ml$|cl$|q.b.$)',"")
ds_ingredienti_cortilia['unità di misura'] = ds_ingredienti_cortilia['unità di misura'].str.replace("gr","g")
ds_ingredienti_cortilia.drop_duplicates(subset ="id_prodotto",
keep = False, inplace = True)
ds_ingredienti_cortilia.head()
ds_ingredienti_cortilia.to_csv("ds_prodotti_cortilia.csv")
###Output
_____no_output_____
###Markdown
PRODUCER SCRAPING FROM CORTILIA
###Code
def parse_producer(producer):
id_produttore = producer.find_elements_by_css_selector("a")[0].get_attribute("href")
nome = ""
indirizzo = ""
try:
if(len(producer.find_elements_by_css_selector("div.overlay>h3")) > 0):
nome = producer.find_elements_by_css_selector("div.overlay>h3")[0].text
if(len(producer.find_elements_by_css_selector("div.overlay>h4")) > 0):
indirizzo = producer.find_elements_by_css_selector("div.overlay>h4")[0].text
except:
pass
return {'nome': nome,
'indirizzo': indirizzo,
'id_produttore':id_produttore}
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
detail_producers = []
wd.get("https://www.cortilia.it/produttori/lista")
time.sleep(6) #inserisco una pausa di 5 secondi dopo essere andato sul sito, per aspettare il caricamento di tutti i projects
wd.save_screenshot("check_browser.png")
wd.set_window_size(1920, 1080)
SCROLL_PAUSE_TIME = 2
# Get scroll height
last_height = wd.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = wd.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
list_producers = wd.find_elements_by_css_selector("div.pad-bot2.col-lg-3.col-md-4.col-sm-6")#trova tutte le card all'interno del range ovvero item specificato
for producer in tqdm(list_producers):
detail_producers.append(parse_producer(producer))
print(len(detail_producers))
pprint.pprint(detail_producers[0:5])
import pandas as pd
ds_produttori_cortilia = pd.DataFrame(detail_producers)
ds_produttori_cortilia.set_index("id_produttore")
ds_produttori_cortilia.to_csv("ds_produttori_cortilia.csv")
###Output
_____no_output_____
###Markdown
EXTRACT OF LAT/LONG FROM PRODUCER ADDRESS
###Code
import pandas as pd
ds_produttori = pd.read_csv ("ds_produttori_cortilia.csv", index_col='id_produttore')
ds_produttori.isnull().sum()
#identifichiamo latitudine e longitudine di tutti i progetti
import time
import requests
import json
import pprint
import pandas as pd
from tqdm import tqdm_notebook as tqdm
project_location = []
for project_id, project in tqdm(ds_produttori.iterrows(), total=ds_produttori.shape[0]):
address = project['indirizzo']
if(address is None):
print(str(project_id) + "- " + project['indirizzo'])
else:
key = "INSERTYOURKEY"
try:
geocode_url = f"http://www.mapquestapi.com/geocoding/v1/address?key={key}&location={address}"
response = requests.get(geocode_url)
if(response.status_code == 200):
geo = json.loads(response.text)
lat = geo['results'][0]['locations'][0]['latLng']['lat']
lng = geo['results'][0]['locations'][0]['latLng']['lng']
project_location.append({
"id_produttore": project_id,
"lat": lat,
"lng": lng
})
except Exception as e:
print(e)
print(len(project_location))
import pandas as pd
ds_indirizzo_produttori = pd.DataFrame(project_location)
ds_indirizzo_produttori.set_index("id_produttore")
ds_indirizzo_produttori.head()
ds_produttori_full = ds_produttori.merge(ds_indirizzo_produttori, on='id_produttore')
ds_produttori_full.set_index("id_produttore")
print(len(ds_produttori_full))
ds_produttori_full.head()
ds_produttori_full.isnull().sum()
ds_produttori_full.to_csv("ds_produttori_cortilia_full.csv")
###Output
_____no_output_____
###Markdown
MERGE PRODUCT WITH PRODUCER
###Code
import pandas as pd
ds_prodotti_cortilia = pd.read_csv("ds_prodotti_cortilia.csv")
ds_produttori_cortilia = pd.read_csv("ds_produttori_cortilia_full.csv",)
ds_cortilia = ds_produttori_cortilia.merge(ds_prodotti_cortilia, on='id_produttore')
ds_cortilia.set_index("id_prodotto")
print(len(ds_cortilia))
ds_cortilia.head()
len(ds_cortilia)
ds_cortilia.isnull().sum()
ds_cortilia.to_csv("ds_cortilia.csv")
###Output
_____no_output_____
###Markdown
LOAD DATA IN MONGODB FOR CLOUD STORAGE
###Code
!pip install pymongo
import pymongo
import json
import csv
from pymongo import UpdateOne
client = pymongo.MongoClient("mongodb://XXXX:[email protected]:27017,cluster0-shard-00-01.gghgg.mongodb.net:27017,cluster0-shard-00-02.gghgg.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-tob9ip-shard-0&authSource=admin&retryWrites=true&w=majority")
db = client.cortilia
records = json.loads(ds_prodotti_cortilia.T.to_json()).values()
upserts=[UpdateOne({'id_prodotto':x['id_prodotto']}, {'$setOnInsert':x}, upsert=True) for x in records]
db.sc_prodotti_cortilia.bulk_write(upserts)
records = json.loads(ds_produttori_cortilia.T.to_json()).values()
upserts=[UpdateOne({'id_produttore':x['id_produttore']}, {'$setOnInsert':x}, upsert=True) for x in records]
db.sc_produttori_cortilia.bulk_write(upserts)
records = json.loads(ds_cortilia.T.to_json()).values()
upserts=[UpdateOne({'id_prodotto':x['id_prodotto']}, {'$setOnInsert':x}, upsert=True) for x in records]
db.sc_cortilia.bulk_write(upserts)
records = json.loads(ds_produttori_full.T.to_json()).values()
upserts=[UpdateOne({'id_produttore':x['id_produttore']}, {'$setOnInsert':x}, upsert=True) for x in records]
db.st_produttori_cortilia_full.bulk_write(upserts)
records = json.loads(ds_cortilia_cleaned.T.to_json()).values()
upserts=[UpdateOne({'recipe_id':x['recipe_id']}, {'$setOnInsert':x}, upsert=True) for x in records]
db.st_cortilia_cleaned.bulk_write(upserts)
###Output
_____no_output_____ |
bitcoinOHLCV_generator.ipynb | ###Markdown
###Code
import pandas as pd
from tqdm import *
import matplotlib.pyplot as plt
import os
import requests
import numpy as np
"""
LIST OF EXCHANGES USED FOR DATA COLLECTION (order by btc volume)
the exchanges were chosen by consulting data on the volume of btc from :
https://www.bitcointradevolume.com/
https://coinmarketcap.com/it/currencies/bitcoin/markets/
https://data.bitcoinity.org/markets/volume/30d?c=e&t=b
https://www.cryptocompare.com/coins/btc/analysis/USD
-Coinbase
-Bitfinex
-Kraken
-Bitstamp
-Itbit
-Okcoin
"""
def generateBitcoinOHLCV(exchange, compression, exchangeToFill):
"""
Download exchange data from bitcoincharts.com and building OHLCV candelsticks format
Formato dei dati (preso direttamente dal sito https://bitcoincharts.com/about/markets-api/):
HISTORICAL TRADE DATA
returns CSV:
unixtime,price,amount
Parameters
----------
exchange : str
Exchange's name from http://api.bitcoincharts.com/v1/csv/
compression : int
minutes for resampling
exchangeToFill : str
Name of exchange in which handle missing data
Returns
-------
historicalOHLCV : pd.DataFrame
Dataframe with historical OHLCV candlesticks
"""
bitcoinchartsUrl = "http://api.bitcoincharts.com/v1/csv/{}USD.csv.gz".format(exchange)#data in USD currency
print()
print("Downloading {} trade data from URL {}".format(exchange, bitcoinchartsUrl))
historicalTrades = pd.read_csv(bitcoinchartsUrl, names=["unixtime", "price", "amount"], compression='gzip')#reading csv from url
historicalTrades["datetime"] = pd.to_datetime(historicalTrades["unixtime"], unit="s") #conversion timestamp to datetime
historicalTrades = historicalTrades.drop("unixtime", axis=1).set_index("datetime").sort_index()
historicalOHLCV = historicalTrades
#build OHLCV historical data
historicalOHLCV["open"] = historicalTrades["price"]
historicalOHLCV["high"] = historicalTrades["price"]
historicalOHLCV["low"] = historicalTrades["price"]
historicalOHLCV["close"] = historicalTrades["price"]
historicalOHLCV["volume"] = historicalTrades["amount"]
historicalOHLCV = historicalOHLCV.drop(["price", "amount"], axis=1)
historicalOHLCV = historicalOHLCV.resample(str(compression) + "min").agg({"open": "first", "high": "max", "low": "min", "close": "last", "volume": "sum"})
if(exchange == exchangeToFill):
historicalOHLCV = fillNaN(historicalOHLCV)
historicalOHLCV = historicalOHLCV[~(historicalOHLCV.index < '2012-01-01 00:00:00')] #put the data starts to 2012
return historicalOHLCV[["open", "high", "low", "close", "volume"]]
def fillNaN(df):
df["open"] = df["open"].interpolate(method='slinear')
df["high"] = df["high"].interpolate(method='slinear')
df["low"] = df["low"].interpolate(method='slinear')
df["close"] = df["close"].interpolate(method='slinear')
df["volume"] = df["volume"].interpolate(method='slinear')
return df[["open", "high", "low", "close", "volume"]]
def aggregateExchangeData(list_dfs):
"""
Aggregate data from different exchanges to OHLCV candlesticks
Parameters
----------
list_fds : list of pd.DataFrame
List of exchanges OHLCV bars as returned by :func:`~generateBitcoinOHLCV`
Returns
-------
dfAggregate : pd.DataFrame
Dataframe containing aggregated OHLC bars from all exchanges
"""
print("Aggregating OHLCV history data from {} exchanges into a single dataframe".format(len(list_dfs)))
dfConcat = pd.concat(tuple(list_dfs))
grp = dfConcat.groupby(dfConcat.index)
dfAggregate = grp[["open", "high", "low", "close"]].mean() #the value ar calculated by mean
# Round decimals
return dfAggregate.round({
"open": 4,
"high": 4,
"low": 4,
"close": 4,
})
def creaCartelle():
if(os.path.isdir("/home/csv") == False and len("/home/csv")>0):
os.makedirs("/home/csv")
if(os.path.isdir("/home/csvPlot") == False and len("/home/csvPlot")>0):
os.makedirs("/home/csvPlot")
def csv_plot(dfname, df, statsToPlot):
"""
GRAPHICAL DATAFRAME'S RAPPRESENTATION and save image to /home/csvPlot/
Parameters
----------
dfname : string
Exchange's name
df : pd.DataFrame
data to plot
statsToPlot : string
statistics to plot
"""
fig = plt.figure(figsize=(30, 13))
plt.title('{} from {}'.format(statsToPlot, dfname))
df[statsToPlot].plot(label='{}'.format(dfname))
plt.savefig('/home/csvPlot/{}.png'.format(dfname))
plt.legend()
def cumulativeCsvPlot(aggregatedData, list_dfs, exchanges, statsToPlot):
"""
GRAPHICAL DATAFRAMES' RAPPRESENTATION (all in the same plot) and save image to /home/csvPlot/
Parameters
----------
aggregatedData : pd.DataFrame
Dataframe containing aggregated OHLCV bars from all exchanges
list_fds : list of pd.DataFrame
List of exchanges OHLCV bars as returned by :func:`~generateBitcoinOHLCV`
exchanges : Array
Containing the names of all the exchanges used
statsToPlot : string
statistics to plot
"""
fig = plt.figure(figsize=(30, 13))
aggregatedData[statsToPlot].plot(label='Aggregated')
for exchange in tqdm(exchanges):
list_dfs[exchange][statsToPlot].plot(label=exchange)
plt.savefig('/home/csvPlot/aggregated.png')
plt.title('{} price from every single exchanges and aggregated'.format(statsToPlot))
plt.legend()
def resumeData(dfname, df):
"""
Print first and last row for each DataFrame
Parameters
----------
dfname : string
Exchange's name
df : pd.DataFrame
data to analize
"""
print()
print("exchange: {}".format(dfname))
print("---- prima riga ----")
print(df.iloc[0])
print()
print("---- ultima riga ----")
print(df.iloc[-1])
print("----------------------")
def resumeNaN(dfname, df):
"""
Print the NaN rows for the df
Parameters
----------
dfname : string
Exchange's name
df : pd.DataFrame
data to analize
"""
is_NaN = df.isnull()
row_has_NaN = is_NaN.any(axis=1)
rows_with_NaN = df[row_has_NaN]
rows_with_NaN.to_csv("/home/nan{}.csv".format(dfname))
print()
print("exchange: {}".format(dfname))
print("----------------------")
print(rows_with_NaN)
print()
print(df.isna().sum())
print("----------------------")
creaCartelle()
# Define the compression and the output file
compression = 60 #min
outputFile = ""
# Define the exchanges we want to aggregate
exchanges = ["kraken", "coinbase", "bitstamp", "bitfinex", "okcoin", "itbit"]
exchangeToFill = "bitstamp"
print()
print("Aggregating bitcoin trade data from {} exchanges and create {} file containing {}min OHLC bars".format(len(exchanges),outputFile,compression))
print()
OHLCV = {}
for exchange in tqdm(exchanges):
OHLCV[exchange] = generateBitcoinOHLCV(exchange, compression, exchangeToFill)
outputFile = "{}_data.csv".format(exchange)
print("Saving {} rows to file {}".format(len(OHLCV), outputFile))
print()
OHLCV[exchange].to_csv("/home/csv/{}".format(outputFile))
aggregatedData = aggregateExchangeData(tuple(OHLCV.values()))
aggregatedData["volume"] = OHLCV["bitstamp"]["volume"]#the volume data is taken only from bitstamp exchange
outputFile = "bitcoin_OHLCV.csv"
print("Saving {} rows to file {}".format(len(OHLCV), outputFile))
aggregatedData.to_csv("/home/csv/{}".format(outputFile))
"""
PLOT SINGLE DATAFRAME
"""
stats = "open" #statsToPlot
for exchange in tqdm(exchanges):
csv_plot(exchange, OHLCV[exchange], stats)
csv_plot("aggregated", aggregatedData, stats)
"""
PLOT ALL DATAFRAMES IN THE SAME FIGURE
"""
cumulativeCsvPlot(aggregatedData, OHLCV, exchanges, stats)
"""
PRINT ROW FOR EACH DATAFRAME
"""
for exchange in tqdm(exchanges):
print()
print("exchange: {} ----> elementi : {} ".format(exchange, len(OHLCV[exchange]["open"])))
print()
print("aggregated: elementi : {} ".format(len(aggregatedData["open"])))
"""
PRINT FIRST AND LAST ROW FOR EACH DATAFRAME
"""
for exchange in tqdm(exchanges):
resumeData(exchange, OHLCV[exchange])
resumeData("aggregatedData", aggregatedData)
"""
PRINT NaN FOR EACH DATAFRAME
"""
for exchange in tqdm(exchanges):
resumeNaN(exchange, OHLCV[exchange])
resumeNaN("aggregatedData", aggregatedData)
###Output
33%|███▎ | 2/6 [00:00<00:00, 16.31it/s] |
tutorials/Appendix - Python Tutorial.ipynb | ###Markdown
Appendix: Python TutorialThis optional tutorial demonstrates the features of the Python language and of Jupyter Notebook that are used in the examples and tutorials of the textbook. Special attention is given to the most important data types used in data analysis workflows, as well as common idioms and patterns employed in these use-cases. This appendix may be especially useful for readers more experienced in programming languages other than Python.Contents:1. [Jupyter Notebook](1.-Jupyter-Notebook)2. [Conditionals](2.-Conditionals)3. [Lists](3.-Lists)4. [Loops](4.-Loops)5. [Tuples](5.-Tuples)6. [Dictionaries](6.-Dictionaries)7. [Combining Data Types](7.-Combining-Data-Types) 1. Jupyter NotebookEven if you're well-versed in Python, you may not have used Jupyter Notebook before. The main idea is that we can mix text and code, and that code is executed in "cells." By clicking on a cell and pressing Shift + Enter, you execute the cell and move to the next cell. Ctrl + Enter executes the cell but does not move to the next cell. You can run many cells at once by using the different options in the "Cell" menu.Try executing the code in the next cell and observe that the output is printed below the cell.
###Code
print('Hello from Jupyter')
###Output
Hello from Jupyter
###Markdown
1.1 Printing and inspecting variablesIn Jupyter notebooks, we have two different ways of inspecting variables. Python's `print()` function is useful as always:
###Code
my_str = 'Hello'
my_int = 16
print(my_str)
print(my_int)
###Output
Hello
16
###Markdown
We can also just execute a cell with the name of a variable:
###Code
my_str
###Output
_____no_output_____
###Markdown
The big difference here between the two approaches is that `print()` statements can output multiple items per cell, while the latter approach will only display the last variable named. Observe:
###Code
my_str
my_int
###Output
_____no_output_____
###Markdown
As opposed to the first example using `print()`, this only outputs the last value. Nota BeneOne key advantage of presenting information in this notebook format is that it allows you to change and re-run the code cells, then see how the output differs. Don't be afraid to experiment! 2. Conditionals"Conditionals" is a fancy word for if-statements. If you've ever done any programming, you are surely aware of the if-then-else construction. In Python it's done as follows:
###Code
number_of_apples = 5
if number_of_apples < 1:
print('You have no apples')
elif number_of_apples == 1:
print('You have one apple')
elif number_of_apples < 4:
print('You have a few apples')
else:
print('You have many apples!')
###Output
You have many apples!
###Markdown
You can change `number_of_apples` and re-run the previous cell in order to get the different possible outputs. 3. ListsOne of Python's most versatile and ubiquitous data types is the List ([Python documentation](https://docs.python.org/3/library/stdtypes.htmllist)). This is an **ordered**, **mutable**, **collection** of **non-unique** items. 3.1 OrderedBy *ordered*, we mean that the items are addressed by their *index* in the collection:
###Code
student_names = ['Alice', 'Bob', 'Carol', 'Dave']
student_names[1]
###Output
_____no_output_____
###Markdown
Indices in Python start at zero, so the head of the list has index 0:
###Code
student_names[0]
###Output
_____no_output_____
###Markdown
We can get the last item in a list by using negative indexing:
###Code
student_names[-1]
###Output
_____no_output_____
###Markdown
Lists can also be *sliced* to get a subset of the list items:
###Code
student_names[0:2]
student_names[1:3]
###Output
_____no_output_____
###Markdown
When slicing from the beginning of the list, or to the end of the list, we can leave out the index:
###Code
student_names[:2]
student_names[2:]
###Output
_____no_output_____
###Markdown
3.2 MutableBy *mutable*, we mean that the list can be changed by adding or removing items. We most often add items to the end of the list with `.append()`:
###Code
student_names.append('Esther')
student_names
###Output
_____no_output_____
###Markdown
But we can also add items at any arbitrary index with `.insert()`:
###Code
student_names.insert(2, 'Xavier')
student_names
###Output
_____no_output_____
###Markdown
We can delete items with the `del` keyword:
###Code
del student_names[2]
student_names
###Output
_____no_output_____
###Markdown
3.3 Non-uniqueNote that nothing stops us from repeatedly adding the same name to this list:
###Code
student_names.append('Esther')
student_names.append('Esther')
student_names
###Output
_____no_output_____
###Markdown
If you want a collection where uniqueness is enforced, you should look towards[sets](https://docs.python.org/3/library/stdtypes.htmlset)or[dictionaries](https://docs.python.org/3/library/stdtypes.htmldict). 3.4 CollectionA collection refers to a data type consisting of more than one values. Lists are one type of collection, but there are others such as tuples, sets, and dictionaries.When naming your variables that contain lists, you should use plural nouns, *e.g.* `student_names` in the previous example. In contrast, single values should be named with singular nouns, *e.g.* `my_str` in the first section. This helps you and others reading your code keep straight which variables are collections and which are single items, and also helps when writing loops as shown in the next section. 4. LoopsIf you're coming from another programming language, you're probably aware of more than one type of loop. In Python, we focus on one type of loop in particular: the for-loop. The for-loop iterates through a collection of items, executing its code for each item:
###Code
student_names = ['Alice', 'Bob', 'Carol', 'Dave']
for student_name in student_names:
print('Hello ' + student_name + '!')
###Output
Hello Alice!
Hello Bob!
Hello Carol!
Hello Dave!
###Markdown
4.1 Naming conventionsNote the naming convention being used in the for-in construction: for student_name in student_names: By using a plural noun for the collection `student_names`, we automatically have good name for the individual items in the collection: `student_name`. The tutorials in this book use this naming convention when possible as it makes clear to the reader which variable is the "loop variable" that changes value between iterations of the loop body. 4.2 Loops, lists, and conditionalsOne extremely common type of task when working with data is the *filtering task*. In abstract, this task involves looping over one collection, checking each item for some criterion, then adding items that meet the criterion to another collection.In the following example, we'll create a list of just the "long" names from the `student_names` list. Long names are those that contain more than four characters. You will often see and write code that looks like the following in this book's tutorials:
###Code
# Initialize an empty list and add to it the
# student names containing more than four characters
long_names = []
for student_name in student_names:
# This is our criterion
if len(student_name) > 4:
long_names.append(student_name)
long_names
###Output
_____no_output_____
###Markdown
4.3 Nested loopsLoops can be "nested" inside one another. This often occurs when we want to match up items from one collection to items from the same or another collection. Here let's create a list of all possible pairs of students:
###Code
student_names = ['Alice', 'Bob', 'Carol', 'Dave']
student_pairs = []
for student_name_0 in student_names:
for student_name_1 in student_names:
student_pairs.append(
(student_name_0, student_name_1)
)
student_pairs
###Output
_____no_output_____
###Markdown
Note here that instead of just adding names to the `student_pairs` list, we are adding *tuples* `(student_name, language)`. This means each item in the list is a 2-tuple:
###Code
student_pairs[0]
###Output
_____no_output_____
###Markdown
We'll talk more about tuples in the next section. The second thing to notice is that we're including pairs with two of the same student. Suppose we wish to exclude those. We can accomplish this by adding an if-statement in the second for-loop to *filter* out those repeats:
###Code
student_names = ['Alice', 'Bob', 'Carol', 'Dave']
student_pairs = []
for student_name_0 in student_names:
for student_name_1 in student_names:
# This is the criterion we added
if student_name_0 != student_name_1:
student_pairs.append(
(student_name_0, student_name_1)
)
student_pairs
###Output
_____no_output_____
###Markdown
And now the list has no repeats. 5. TuplesEven experienced Python users often are confused about the difference between tuples and lists, so definitely read this short section even if you have some experience.Tuples ([documentation](https://docs.python.org/3/library/stdtypes.htmltuple)) are superficially similar to lists as they are an ordered collection of non-unique items:
###Code
student_grade = ('Alice', 'Spanish', 'A-')
student_grade
student_grade[0]
###Output
_____no_output_____
###Markdown
5.1 ImmutableThe big difference from lists is that tuples are **immutable**. Each of the following cells should raise an exception.
###Code
student_grade.append('IU Bloomington')
del student_grade[2]
student_grade[2] = 'C'
###Output
_____no_output_____
###Markdown
This immutability makes tuples useful when **index matters**. In this example, the index matters semantically: index 0 is the student's name, index 1 is the course name, and index 2 is their grade in the course. The inability to insert or append items to the tuple means that we are certain that, say, the course name won't move around to a different index. 5.2 UnpackingTuples' immutability makes them useful for *unpacking*. At its simplest, tuple unpacking allows the following:
###Code
student_grade = ('Alice', 'Spanish', 'A-')
student_name, subject, grade = student_grade
print(student_name)
print(subject)
print(grade)
###Output
Alice
Spanish
A-
###Markdown
While occasionally useful on its own, tuple unpacking is most useful when used with loops. Consider the following piece of code, which congratulates students on getting good grades:
###Code
student_grades = [
('Alice', 'Spanish', 'A'),
('Bob', 'French', 'C'),
('Carol', 'Italian', 'B+'),
('Dave', 'Italian', 'A-'),
]
for student_name, subject, grade in student_grades:
if grade.startswith('A'):
print('Congratulations', student_name,
'on getting an', grade,
'in', subject)
###Output
Congratulations Alice on getting an A in Spanish
Congratulations Dave on getting an A- in Italian
###Markdown
Compare this to the same code using indices:
###Code
for student_grade in student_grades:
if student_grade[2].startswith('A'):
print('Congratulations', student_grade[0],
'on getting an', student_grade[2],
'in', student_grade[1])
###Output
Congratulations Alice on getting an A in Spanish
Congratulations Dave on getting an A- in Italian
###Markdown
Tuple unpacking allows us to easily refer to this structured data by semantic names instead of having to keep the indices straight. The second example, while functionally identical, is more difficult to write and harder still to read. 6. DictionariesThe next type of collection is much different than the previous two, but is among the most powerful tools in Python: the dictionary ([documentation](https://docs.python.org/3/library/stdtypes.htmldict)). The dictionary is an **unordered**, **mutable**, collection of **unique** items. In other languages these are called maps, mappings, hashmaps, hashes, or associative arrays. 6.1 UnorderedBy unordered, we mean that dictionary items aren't referred to by their position, or index, in the collection. Instead, dictionary items have *keys*, each of which is associated with a value. Here's a very basic example:
###Code
foreign_languages = {
'Alice': 'Spanish',
'Bob': 'French',
'Carol': 'Italian',
'Dave': 'Italian',
}
###Output
_____no_output_____
###Markdown
Here the student names are the keys and the students' foreign language courses are the values. So to see Carol's foreign language, we use the key -- her name -- instead of an index:
###Code
foreign_languages['Carol']
###Output
_____no_output_____
###Markdown
Trying to get the value for a key that does not exist in the dictionary results in a `KeyError`:
###Code
foreign_languages['Zeke']
###Output
_____no_output_____
###Markdown
We can check if a particular key is in a dictionary with the `in` keyword:
###Code
'Zeke' in foreign_languages
'Alice' in foreign_languages
###Output
_____no_output_____
###Markdown
Note that keys are case-sensitive:
###Code
'alice' in foreign_languages
###Output
_____no_output_____
###Markdown
6.2 MutableWe can add, delete, and change entries in a dictionary:
###Code
# Add an entry that doesn't exist
foreign_languages['Esther'] = 'French'
foreign_languages
# Delete an entry that exists
del foreign_languages['Bob']
foreign_languages
# Change an entry that does exist
foreign_languages['Esther'] = 'Italian'
foreign_languages
###Output
_____no_output_____
###Markdown
6.3 UniqueNote that the syntax for adding an entry that does not exist and changing an existing entry are the same. When assigning a value to a key in a dictionary, it adds the key if it doesn't exist, or else updates the value for the key if it does exist. As a consequence, keys are necessarily *unique* -- there can't be more than one element with the same key in a dictionary. 6.4 Looping over dictionariesWhile not performed as often as with lists, it is possible to loop over entries in a dictionary. There are two ways to accomplish this task:
###Code
for key in foreign_languages:
value = foreign_languages[key]
print(key, 'is taking', value)
for key, value in foreign_languages.items():
print(key, 'is taking', value)
###Output
Alice is taking Spanish
Carol is taking Italian
Dave is taking Italian
Esther is taking Italian
###Markdown
Here I'm using variables named `key` and `value` to show the general principle. When you write loops over dictionaries in your own code, you should use descriptive names as opposed to `key` and `value`. 6.5 Dictionaries as recordsIn `foreign_languages` we have paired data -- every name is associated with a subject. Dictionaries are also often used to contain several different data about a single entity. To illustrate this subtle difference, let's take a look at one item from `student_grades`:
###Code
student_grade = ('Alice', 'Spanish', 'A')
###Output
_____no_output_____
###Markdown
Here we know that the items in each of these tuples is a name, subject, and grade:
###Code
student_name, subject, grade = student_grades[0]
print(student_name, 'got a grade of', grade, 'in', subject)
###Output
Alice got a grade of A in Spanish
###Markdown
We could instead represent this data as a dictionary and use it as such. A dictionary of information describing a single item is often referred to as a *record*:
###Code
record = {
'name': 'Alice',
'subject': 'Spanish',
'grade': 'A',
}
print(record['name'],
'got a grade of', record['grade'],
'in', record['subject'])
###Output
Alice got a grade of A in Spanish
###Markdown
While the code is slightly longer, there is absolutely no ambiguity here about matching up indices and what each value represents. This is also useful in contexts where some of the fields might be optional. 7. Combining Data TypesIn most of these simple examples we've worked with collections of simple values like strings and numbers, however data analysis often involves working with complex data, where each item of interest has several data associated with it. These complex data are often represented as collections of collections, *e.g.,* lists of dictionaries.Choosing the appropriate data types for a given problem will make it easier for you to write bug-free code and will make your code easier for others to read, but identifying the best data types is a skill gained through experience. Some of the commonly-used combination data types are illustrated below, but this is hardly exhaustive. 7.1 List of tuplesWe've actually seen this one before. Consider the `student_grades` data from the earlier example on tuple unpacking:
###Code
student_grades = [
('Alice', 'Spanish', 'A'),
('Bob', 'French', 'C'),
('Carol', 'Italian', 'B+'),
('Dave', 'Italian', 'A-'),
]
###Output
_____no_output_____
###Markdown
This is a list of tuples:
###Code
student_grades[1]
###Output
_____no_output_____
###Markdown
and we can work with the individual tuples as such:
###Code
student_grades[1][2]
###Output
_____no_output_____
###Markdown
7.2 List of dictionariesIn the section on dictionaries, we explored how dictionaries are often used to contain several data about a single entity, and each such dictionary is sometimes called a *record*. Let's convert the list of tuples `student_grades` into a list of records `student_grade_records`:
###Code
student_grade_records = []
for student_name, subject, grade in student_grades:
record = {
'name': student_name,
'subject': subject,
'grade': grade,
}
student_grade_records.append(record)
student_grade_records
###Output
_____no_output_____
###Markdown
Now each item in the list is a dictionary:
###Code
student_grade_records[1]
###Output
_____no_output_____
###Markdown
and we can work with the individual records as such:
###Code
student_grade_records[1]['grade']
###Output
_____no_output_____
###Markdown
This list-of-dicts is often used to represent data from a database or an API. Let's use this data to write our code congratulating students for good grades, as we did in the section on tuple unpacking:
###Code
for record in student_grade_records:
if record['grade'].startswith('A'):
print('Congratulations', record['name'],
'on getting an', record['grade'],
'in', record['subject'])
###Output
Congratulations Alice on getting an A in Spanish
Congratulations Dave on getting an A- in Italian
###Markdown
7.3 Dictionary of dictionariesThe list of dictionaries is very useful when dealing with non-unique data; in the previous example each student might have several grades from different classes. But sometimes we want to refer to the data by a particular name or key. In this case, we can use a dictionary whose values are records, *i.e.*, other dictionaries.Let's use data from `student_grades` again, but assume we just want the foreign language grade so we can use the students name as a key:
###Code
foreign_language_grades = {}
for student_name, subject, grade in student_grades:
record = {
'subject': subject,
'grade': grade,
}
foreign_language_grades[student_name] = record
foreign_language_grades
###Output
_____no_output_____
###Markdown
Now we can refer to these by student name:
###Code
foreign_language_grades['Alice']
###Output
_____no_output_____
###Markdown
And we can get the individual data that we care about:
###Code
foreign_language_grades['Alice']['grade']
###Output
_____no_output_____
###Markdown
7.4 Dictionary with tuple keysIt is occasionally useful to key dictionaries on more than one data. Dictionaries can use any immutable object as a key, which includes tuples. Continuing with our student grades example, we may want the keys to be the student name and subject:
###Code
student_course_grades = {}
for student_name, subject, grade in student_grades:
student_course_grades[student_name, subject] = grade
student_course_grades
###Output
_____no_output_____
###Markdown
Now we can represent all of a student's grades:
###Code
student_course_grades['Alice', 'Math'] = 'A'
student_course_grades['Alice', 'History'] = 'B'
student_course_grades
###Output
_____no_output_____
###Markdown
7.5 Another dictionary of dictionariesLet's take advantage of the fact that, for a particular student, we often want to get subject-grade pairs, *i.e.* a report card. We can create a dictionary with student names as keys and the values being dictionaries of subject-grade pairs. In this case we need to do a bit of checking; that step is commented below:
###Code
student_report_cards = {}
for student_name, subject, grade in student_grades:
# If there is no report card for a student,
# we need to create a blank one
if student_name not in student_report_cards:
student_report_cards[student_name] = {}
student_report_cards[student_name][subject] = grade
student_report_cards
###Output
_____no_output_____
###Markdown
The advantage of this extra work is that we can now easily have multiple grades per student:
###Code
student_report_cards['Alice']['Math'] = 'A'
student_report_cards['Alice']['History'] = 'B'
student_report_cards
###Output
_____no_output_____
###Markdown
And we can easily fetch a student's "report card":
###Code
student_report_cards['Alice']
###Output
_____no_output_____ |
Intro_Ipython_Numpy_Scipy_solution.ipynb | ###Markdown
Ipython notebookIpython est initialement un projet de console python spécialisé pour les applications scientifiques.Avec le temps, le projet s'est élargi et inclus désormais un ensemble d'utilitaires pour les applications scientifiques, dont le notebook.Ipython notebook est une feuille de calul interactive, inspirée de Maple et de Mathematica, qui fonctionne en mode web service. Au démarrage d'un notebook, Ipython lance un serveur d'application dans lequel tourne un kernel (noyau d'application) Ipython et le navigateur par défaut de votre OS sur l'adresse localhost 127.0.0.1. Ce mode de fonctionnement client/serveur permet notamment de lancer des calculs en se connectant à un supercalculateur ou encore à un data center de type Amazon Web Service, Microsoft Azure ou Google App Engine.Le notebook est un environnement de calcul interactif complémentaire avec Spyder.En tant qu'IDE, Spyder est plus adapté au développement de fonctions et d'applications.Le notebook est plus adapté à la réalisation d'étude ou au traitement de donnée. Fonctionnement de l'Ipython NotebookDans les versions >2.x de Ipython, le notebook s'utilise en basculant constemment entre 2 modes : * mode commande des cellules: * Accessible en appuyant sur ESC * Permet de créer (A et B), supprimer (X), copier (C) et coller (V) des cellules * Changer de type de cellule : code (Y) ou markdown (M)* mode édition * Accessible en appuyant sur ENTER * Executer le code de la cellule (SHIFT ENTER) ou afficher le rendu d'une cellule mardownL'ensemble des commandes est accessible dans la barre d'outil (edit). L'ensemble des raccourcis est disponible dans le menu help de l'interface (help -> keyboard shortcut), ainsi qu'une visite guidée de l'interface (help -> user interface tour)Pour plus d'éléments, se rérérer à la documentation officielle : http://ipython.org/ipython-doc/2/notebook/notebook.htmlstructure-of-a-notebook-documentUne galerie de notebook est disponible en ligne via l'application web nbviewer qui permet la visualisation de fichiers .ipynb hébergés sur Github : http://nbviewer.ipython.org/ Numpy, Scipy et MatplotlibNumpy, Scipy et Matplotlib sont les bibliothèques de calcul numérique les plus importantes de Python. Numpy Inclus : * l'ensemble des fonctions mathématiques* les manipulations sur les vecteurs et les matriceshttp://wiki.scipy.org/NumPy_for_Matlab_UsersIl comprend approximativement l'ensemble des fonctionalités de base de Matlab
###Code
import numpy as np # np est une convention d'import
print np.array([0,1,2])
###Output
[0 1 2]
###Markdown
Le numpy array est l'équivalent du vecteur matlab :
###Code
x = np.arange(0,10)
print x
print x**2 # mise à la puissance 2. Toute les opérations sont par défaut élément par élément (elemnt wise)
print x+x
print x.dot(x) # produit scalaire
x2 = np.array([[0,1],[1,0]]) # matrice
print x2
x3 = np.random.randint(low=0,high=10,size=[3,3])
u3 = np.array([2,3,4])
print x3
print x3.T # transposée
print x3*u3# multiplication de chaque ligne par le vecteur u3
print x3.dot(u3) # produit matricielle
###Output
[[1 6 5]
[7 3 8]
[8 7 2]]
[[1 7 8]
[6 3 7]
[5 8 2]]
[[ 2 18 20]
[14 9 32]
[16 21 8]]
[40 55 45]
###Markdown
De manière générale, les manipulations purement matricielles sont souvent plus lourdes en termes de syntaxe que Matlab® (MATrix LABoratory) qui est spécialisé sur ce type d'objet. Cependant, les performances sont similaires. Un benchmark sur ce type de manipulation réalisé par la NASA entre Matlab, Python, Java et Fortran est disponible en ligne : https://modelingguru.nasa.gov/docs/DOC-1762 .En substance matlab et numpy ont des performances similaires, Fortran (avec compilateur intel) est toujours meilleur. Matplotlib La bibliothèque graphique la plus populaire pour les graphes 2D (Mayavi pour la 3D).La galerie en ligne est munie de nombreux exemple de codes : http://matplotlib.org/gallery.htmlUn tutorial de matplotlib : http://nbviewer.ipython.org/github/cmiller8/PythonforBuildingAnalysts/blob/master/0_PythonBaseLibraries/3_MatplotlibLibrary.ipynb
###Code
import matplotlib.pylab as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
La ligne précédente est une commande spécifique à Ipython. Elle ne fait pas partie du langage Python.Les commandes commançant par **%** ou **%%** sont des **magic** qui configurent le comportement du notebook.**%matplotlib inline** indique que les graphes seront inclus en ligne dans le notebook (et sauvegardés avec lui).Les graphes interactifs s'obtiennent avec la commande **%matplotlib qt **
###Code
plt.plot([0,1,2,3],[0,2,1,2],'s-')
###Output
_____no_output_____
###Markdown
Création de 100 points de 0 à $2\pi$ dans le vecteur $x_4$, puis calcul de $$y_4 = \sin(x4)$$
###Code
x4 = np.linspace(0,5*np.pi,100)
y4 = np.sin(x4)
fig = plt.figure() # création d'une fenetre de graph
ax1 = fig.add_subplot(121) # graphe de gauche sur 2 graphes répartis horizontalement
ax2 = fig.add_subplot(122) # graphe de droite sur 2 graphes répartis horizontalement
ax1.plot(x4,y4)
ax2.plot(y4,x4)
ax1.set_title(r'$sin(x)$')
fig.savefig('test.png',dpi=150)
###Output
_____no_output_____
###Markdown
Le code suivant génère un échantillon de taille 1000 tiré dans la loi normal multivariée : $$x_5 \sim \mathcal{N}(\mu,\Sigma)$$Avec $$\mu= \left(\begin{array}{cccc} 20 \\ 30 \\ 100\end{array} \right) $$$$\Sigma= \left(\begin{array}{cccc} \\10&-5&0.3\\-5&10&0.2\\0.3&0.2&1000\end{array} \right)$$Puis on trace uniquement les 2 premières composantes du vecteur dans un graphe de dispersion (*scatter*). On utilise la dernière composante pour la taille et la couleur des points.
###Code
x5 = np.random.multivariate_normal(mean=[20,30,100],cov=[[10,-5,0.3],[-5,10,0.2],[0.3,0.2,1000]],size=1000)
plt.scatter(x5[:,0],x5[:,1],s=x5[:,2],c=x5[:,2],edgecolor='none',alpha=0.15,cmap='hot')
res = plt.hist(x5.flatten(),bins=50) # que fait la méthode flatten ?
###Output
_____no_output_____
###Markdown
Scipy Comprend l'équivalent des principales toolbox de Matlab.
###Code
from scipy import linalg, optimize, sparse # par exemple : algèbre linéaire, optimisation et traitement de matrice crzeuse
from copy import copy
###Output
_____no_output_____
###Markdown
TP : manipulation matricielle http://en.wikibooks.org/wiki/LaTeX/Mathematics Exercice : * En utilisant la fonction diag de numpy, écrire la fonction permettant de construire la matrice tridiagonale de taille $N_x$ suivante:$$ A = \left( \begin{array}{cccc}1+2Fo & -Fo & & 0\\-Fo & 1+2Fo & \ddots & \\\\& \ddots & 1+2Fo & -Fo \\0 & & -Fo & 1+2Fo \end{array} \right)$$Où $$Fo = \frac{\alpha \Delta t}{\Delta x^2}$$Et le vecteur :$$b^{i-1} = \left(\begin{array}{cccc} \theta^{i-1}_1+Fo\theta^{i-1}_{0} \\ \vdots \\ \theta^{i-1}_k\\ \vdots \\ \theta^{i-1}_{N_x}+Fo\theta^{i-1}_{N_x+1} \end{array} \right) $$* En utilisant la fonction solve du package scipy.linalg, trouvez $T^i$ tel que : $$ AT^i = b^{i-1} + \frac{\Delta t}{\rho C_p}S^{i-1}$$Où $S$ est un terme source en $W.m^{-3}.$** NB : Il s'agit de la résolution de l'équation de la chaleur en 1D en instationnaire sur un schema centré implicite.****$\theta^{i-1}_{0}$ et $\theta^{i-1}_{n+1}$ sont les conditions limites à gauche et à droites****Fo est le nombre de Fourier de maille. Sur ce schéma numérique implicite, la solution est inconditionnellement stable, mais la précision est dégradée pour Fo > 1.** **Exercice : *** Caluler l'évolution de la température pour $N_t$ itérations temporelles et $N_x$ mailles spatiales. * Stocker le résultat dans une matrice, soit en affectant les valeurs dans une matrice définie à l'avance, soit en concatenant les vecteurs avec la fonction np.hstack* Utilisez la commande **%%timeit** pour vérifier la performance de votre code **Définition des paramètres physiques du système**
###Code
alpha = 0.54e-6 # m2.s-1
rho = 2.4e3 # kg.m-3
Cp = 0.88e3 #J.kg-1.K-1
dt = 10*60. # s
dx = 0.01 # 1 m
L = 0.5 # 2 m
duration = 3*24*3600. # 3 jours
Nx = int(L/dx)
Nt = int(duration/dt)
Fo = alpha*dt/(dx**2) # Fourier de maille. Peremt d'apprecier la précision et la stabilité du schéma numérique
###Output
_____no_output_____
###Markdown
**Définition de la matrice $A$**
###Code
A = np.diag(-Fo*np.ones(Nx-1),-1)+\
np.diag(1+2*Fo*np.ones(Nx))+\
np.diag(-Fo*np.ones(Nx-1),1)
print A
###Output
[[ 7.48 -3.24 0. ..., 0. 0. 0. ]
[-3.24 7.48 -3.24 ..., 0. 0. 0. ]
[ 0. -3.24 7.48 ..., 0. 0. 0. ]
...,
[ 0. 0. 0. ..., 7.48 -3.24 0. ]
[ 0. 0. 0. ..., -3.24 7.48 -3.24]
[ 0. 0. 0. ..., 0. -3.24 7.48]]
###Markdown
**Définition du vecteur $b$**
###Code
def rhs(Ti,Tg,Td,Fo):
"""
rhs : right hand side of the equation
Calcul le vecteur b tel que Ax = b
Ti vecteur de solution à l'instant i
Tg condition limite à gauche en température (scalaire)
Td condition limite à droite en température (scalaire)
Fo : Fourier de maille
"""
b = copy(Ti)
b[0] += Fo*Tg
b[-1] += Fo*Td
return b
###Output
_____no_output_____
###Markdown
**Résolution du système pour $N_t$ pas de temps**
###Code
#%%timeit
Ti = 15*np.ones([Nx,1]) # Condition initiale à 15°C
t = np.linspace(0,duration,Nt) # vecteur temps
Tg = 10 + 2*np.sin(t/(24*3600)*2*np.pi) # Condition limité à gauche sinusoidale centré autour de 10°C
TT = [] # liste vide dans laquelle on va ajouter les profiles de température à chaque pas de temps
TT.append([Ti])
for t in range(Nt):
b = rhs(Ti,Tg[t],19,Fo)
Ti = np.linalg.solve(A,b) # Résolution d'un système d'équation linéaire AX=b
TT.append([Ti])
T = np.vstack(TT)
T = np.reshape(T,[Nt+1,Nx])
###Output
_____no_output_____
###Markdown
Exercice : * Tracez de manière superposée le profil de température $\theta$ pour les 10 premiers pas de temps.* Représentez les courbes d'isotempératures annotée en noire sur le champs de température représenté par la colormap "hot"
###Code
with sns.color_palette("Blues_r",12): # définition du cycle des couleurs des courbes
ax = plt.plot(T[0:10,:].T)
X,Y = np.meshgrid(np.arange(Nx),np.arange(Nt+1)) # Construction des matrices X et Y pour la fonction countour
print X
print
print Y
ax = plt.imshow(T.T,cmap='RdBu_r',aspect='auto',interpolation='nearest') # création de la carte des températures
# Cration des contours
CS = plt.contour(Y,X,T,10,
colors='k',linewidth=1) # negative contours will be dashed by default
plt.clabel(CS, fontsize=10, inline=0.5) # tracer des labels
f = ax.get_figure() # récupération de l'objet figure qui "encapsule" l'objet ax
f.savefig('Chronogram_Temperature_1D.png',dpi=150) # Sauvegarde du graphe
###Output
C:\Anaconda\lib\site-packages\matplotlib\text.py:52: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
if rotation in ('horizontal', None):
C:\Anaconda\lib\site-packages\matplotlib\text.py:54: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
elif rotation == 'vertical':
###Markdown
Sauvegarder vos résultats : Sauvez la matrice T au format csv avec la fonction savetxt de numpy
###Code
np.savetxt('temperature_1D_implicit.csv',T,delimiter=';')
###Output
_____no_output_____
###Markdown
Bonus : Calcul symbolique avec Sympy
###Code
from sympy import *
init_printing() # formatte les sorties avec Latex
X = Symbol('X')
x, y, z, t, i = symbols('x y z t i')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
expand((X+1)*(X+2))
factor(x**2+3*x+2)
(1/cos(x)).series(x, 0, 10) # développement en série limitée autour de 0 à l'ordre 10
summation(1/2**i, (i, 0, oo)) # somme infinie de 1/2^n
integrate(exp(-y**2)*erf(y), y) # intégration
f = Function('f')
f(x).diff(x, x) + f(x)
dsolve(f(x).diff(x, x) + f(x), f(x)) # résolution d'équations différentielles
solve([x + 5*y - 2, -3*x + 6*y - 15], [x, y]) # résolution d'équation linéaire
import IPython
import sympy
from statsmodels import version as sm_version
print "Sympy version \t:\t %s"%sympy.__version__
print "IPython version \t:\t %s"%IPython.__version__
print "numpy version \t:\t %s"%np.__version__
print "statsmodels version \t:\t %s"%sm_version.full_version
print "matplotlib version \t:\t %s"%plt.__version__
###Output
Sympy version : 0.7.5
IPython version : 2.4.1
numpy version : 1.9.2
statsmodels version : 0.5.0
matplotlib version : 1.9.2
|
C8, Applying Machine Learning to Sentiment Analysis.ipynb | ###Markdown
bag_of_words model - an example
###Code
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer()
docs = np.array(['The sun is shining',
'The weather is sweet',
'The sun is shining and the weather is sweet'])
bag = count.fit_transform(docs)
count.vocabulary_
print(bag.toarray())
###Output
[[0 1 1 1 0 1 0]
[0 1 0 0 1 1 1]
[1 2 1 1 1 2 1]]
###Markdown
TF-IDF, term frequency-inverse document frequency
###Code
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer()
np.set_printoptions(precision = 2)
print(tfidf.fit_transform(count.fit_transform(docs)).toarray())
###Output
[[ 0. 0.43 0.56 0.56 0. 0.43 0. ]
[ 0. 0.43 0. 0. 0.56 0.43 0.56]
[ 0.4 0.48 0.31 0.31 0.31 0.48 0.31]]
###Markdown
cleaning text data
###Code
df.loc[1,'review']
# strip
import re
def preprocessor(text):
# remove html markup
text = re.sub('<[^>]*>', '', text)
# organize emoticons
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text)
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
return text
preprocessor('</a>This :-) is :( a test :-)!')
df['review'] = df['review'].apply(preprocessor)
# transform doc into tokens
def tokenizer(text):
return text.split()
tokenizer('runners like running and thus they run')
# tokenization with word stemming, using Porter stemmer
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer_porter('runners like running and thus they run') # notice thus was tokenized to be thu
# stop-words from NLTK
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:] if w not in stop]
###Output
[nltk_data] Downloading package stopwords to /Users/Jack/nltk_data...
[nltk_data] Unzipping corpora/stopwords.zip.
###Markdown
train a logistic regression model for document classification
###Code
# prepare data
x_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
x_test = df.loc[25000:, 'review'].values
y_test = df.loc[2500:, 'sentiment'].values
###Output
_____no_output_____
###Markdown
out-of-core learning
###Code
def stream_docs(path):
with open(path, 'r') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
next(stream_docs(path='./data/movie_data.csv'))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore',
n_features = 2**21,
preprocessor = None,
tokenizer = tokenizer)
clf = SGDClassifier(loss='log', random_state = 1, n_iter = 1)
doc_stream = stream_docs(path = './data/movie_data.csv')
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
x_train, y_train = get_minibatch(doc_stream, size = 1000)
if not x_train:
break
x_train = vect.transform(x_train)
clf.partial_fit(x_train, y_train, classes = classes)
pbar.update()
x_test, y_test = get_minibatch(doc_stream, size=5000)
x_test = vect.transform(x_test)
print('accuracy: %.3f' % clf.score(x_test, y_test))
###Output
accuracy: 0.805
|
0.14/_downloads/plot_objects_from_arrays.ipynb | ###Markdown
Creating MNE objects from data arraysIn this simple example, the creation of MNE objects fromnumpy arrays is demonstrated. In the last example case, aNEO file format is used as a source for the data.
###Code
# Author: Jaakko Leppakangas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import neo
import mne
print(__doc__)
###Output
_____no_output_____
###Markdown
Create arbitrary data
###Code
sfreq = 1000 # Sampling frequency
times = np.arange(0, 10, 0.001) # Use 10000 samples (10s)
sin = np.sin(times * 10) # Multiplied by 10 for shorter cycles
cos = np.cos(times * 10)
sinX2 = sin * 2
cosX2 = cos * 2
# Numpy array of size 4 X 10000.
data = np.array([sin, cos, sinX2, cosX2])
# Definition of channel types and names.
ch_types = ['mag', 'mag', 'grad', 'grad']
ch_names = ['sin', 'cos', 'sinX2', 'cosX2']
###Output
_____no_output_____
###Markdown
Create an :class:`info ` object.
###Code
# It is also possible to use info from another raw object.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
###Output
_____no_output_____
###Markdown
Create a dummy :class:`mne.io.RawArray` object
###Code
raw = mne.io.RawArray(data, info)
# Scaling of the figure.
# For actual EEG/MEG data different scaling factors should be used.
scalings = {'mag': 2, 'grad': 2}
raw.plot(n_channels=4, scalings=scalings, title='Data from arrays',
show=True, block=True)
# It is also possible to auto-compute scalings
scalings = 'auto' # Could also pass a dictionary with some value == 'auto'
raw.plot(n_channels=4, scalings=scalings, title='Auto-scaled Data from arrays',
show=True, block=True)
###Output
_____no_output_____
###Markdown
EpochsArray
###Code
event_id = 1 # This is used to identify the events.
# First column is for the sample number.
events = np.array([[200, 0, event_id],
[1200, 0, event_id],
[2000, 0, event_id]]) # List of three arbitrary events
# Here a data set of 700 ms epochs from 2 channels is
# created from sin and cos data.
# Any data in shape (n_epochs, n_channels, n_times) can be used.
epochs_data = np.array([[sin[:700], cos[:700]],
[sin[1000:1700], cos[1000:1700]],
[sin[1800:2500], cos[1800:2500]]])
ch_names = ['sin', 'cos']
ch_types = ['mag', 'mag']
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
epochs = mne.EpochsArray(epochs_data, info=info, events=events,
event_id={'arbitrary': 1})
picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
epochs.plot(picks=picks, scalings='auto', show=True, block=True)
###Output
_____no_output_____
###Markdown
EvokedArray
###Code
nave = len(epochs_data) # Number of averaged epochs
evoked_data = np.mean(epochs_data, axis=0)
evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
comment='Arbitrary', nave=nave)
evokeds.plot(picks=picks, show=True, units={'mag': '-'},
titles={'mag': 'sin and cos averaged'})
###Output
_____no_output_____
###Markdown
Create epochs by windowing the raw data.
###Code
# The events are spaced evenly every 1 second.
duration = 1.
# create a fixed size events array
# start=0 and stop=None by default
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
# for fixed size events no start time before and after event
tmin = 0.
tmax = 0.99 # inclusive tmax, 1 second epochs
# create :class:`Epochs <mne.Epochs>` object
epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, baseline=None, verbose=True)
epochs.plot(scalings='auto', block=True)
###Output
_____no_output_____
###Markdown
Create overlapping epochs using :func:`mne.make_fixed_length_events` (50 %overlap). This also roughly doubles the amount of events compared to theprevious event list.
###Code
duration = 0.5
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
epochs = mne.Epochs(raw, events=events, tmin=tmin, tmax=tmax, baseline=None,
verbose=True)
epochs.plot(scalings='auto', block=True)
###Output
_____no_output_____
###Markdown
Extracting data from NEO file
###Code
# The example here uses the ExampleIO object for creating fake data.
# For actual data and different file formats, consult the NEO documentation.
reader = neo.io.ExampleIO('fakedata.nof')
bl = reader.read(cascade=True, lazy=False)[0]
# Get data from first (and only) segment
seg = bl.segments[0]
title = seg.file_origin
ch_names = list()
data = list()
for ai, asig in enumerate(seg.analogsignals):
# Since the data does not contain channel names, channel indices are used.
ch_names.append('Neo %02d' % (ai + 1,))
# We need the ravel() here because Neo < 0.5 gave 1D, Neo 0.5 gives
# 2D (but still a single channel).
data.append(asig.rescale('V').magnitude.ravel())
data = np.array(data, float)
sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
# By default, the channel types are assumed to be 'misc'.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
raw = mne.io.RawArray(data, info)
raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
show=True, block=True, clipping='clamp')
###Output
_____no_output_____ |
soluciones/jp.mallarino50/tarea3/tarea3.ipynb | ###Markdown
Tarea 3: Encuentre la regresiónUd recibe unos datos $x$ y $y$ cómo se muestran a continuación. Ud debe responder cuatro preguntas a partir de estos datos. Suponga que ud tiene un modelo tal que $y=f(x)$ más aún desconoce $f$.
###Code
df = pd.read_pickle('ex1.gz')
sns.scatterplot(x='x',y='y',data=df)
plt.show()
df
###Output
_____no_output_____
###Markdown
(A) Pendiente e interceptoDetermine la pendiente de los datos en el intervalo $[0,1.5]$ y el valor del intercepto con el eje $y$. Es decir, $f(0)=?$. ¿Cuál es el valor de $r^2$? (B) Regresión polinomialSuponga que quiere realizar la siguiente regresión polinomial,$$y=\beta_1+\beta_2x+\beta_2x^2+\beta_2x^3+\beta_2x^4+\beta_2x^5.$$Plantee la función de costo que le permita calcular los coeficientes y calcule $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. ¿Cuál es el $r^2$?Calcule $f(0)$ y compare con los resultados anteriores (C) Regresión polinomial exactaResulta, que cuando se quiere hacer alguna regresión polinomial esta se puede hacer de forma exacta. ¿Cómo? Suponga que ud va a considerar que su problema en lugar de tener $1$ variable ($x$) tiene $n+1$, siendo $n$ el orden del polinomio a ajustar. Es decir, sus nuevas variables van a ser $\{x_0,\,x_1,\,x_2,\,x_3,\dots,\,x_n\}$ definiendo $x_j=x^j$. Así pues, siguiendo el mismo procedimiento para la regresión lineal multidimensional que realizamos para el ejercicio de datos inmobiliarios, puede encontrar los valores de los coeficientes $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. Encuentre estos valores y compare con los resultados en la sección **(B)**.Calcule $f(0)$ y compare con los resultados anteriores.> Si ud se pregunta si esto es posible la respuesta es sí. Inclusive, esto se puede extender a cualquier a cualquier conjunto de funciones, tal que $x_j=f_j(x)$, que represente un conjunto "linealmente independiente" (¡Me estoy adelantando a *Fourier*!). Para quienes quieran explorar algunas curiosidades matemáticas, cuando $n+1$ es igual al número de puntos o valores de $x$ (y todos diferentes) la matriz es siempre invertible y resulta ser la inversa de una matriz de Vandermonde.
###Code
Y = df.loc[:, ['y']]
Y
X = df.loc[:, ['x']].rename(columns={'x': 'x1'})
X.insert(0, 'x0', 1)
X['x2'] = X['x1']*X['x1']
# cambio
X
###Output
_____no_output_____ |
notebooks/reports/Exp_1_single_datasets.ipynb | ###Markdown
Experiment 1: hyperparameter optimizing on single datasets
###Code
# Import external libraries
import pickle
import random
# Import internal metalearning libraries
from src.experimenting.hopt_experiment import HoptExperiment
from src.metalearning.metadata import MetaDataset
from src.metalearning.warmstarter import Warmstarter
from src.pipeline_optimization.bayesian_hopt import BayesianHopt
from src.utils.metafeature_utils import pca_rank_cor, size
from src.visualization.visualizers import visualize_search_performance
# Import thesis specific objective and search space
from src.utils.thesis_utils import thesis_lookup_objective, thesis_search_space
###Output
_____no_output_____
###Markdown
Global parameters
###Code
max_evals = 50
duplicates = 5
n_init_configs = 5
###Output
_____no_output_____
###Markdown
Fixed parts of the experiment
###Code
# Initialize metadataset and calculate metafeatures
metadataset_sample_names = !ls ../../data/metadata/interim
metasamples = [pickle.load(open('../../data/metadata/interim/' + sample_name,"rb")) for sample_name in metadataset_sample_names]
metadataset_all = MetaDataset(metasamples, metafeature_functions=[size, pca_rank_cor])
# Initialize thesis objective, search space and targets for leave-one-out procedure
objective = thesis_lookup_objective
search_space = thesis_search_space()
target_ids = ['EAST_diff_17520', 'EAST_box_17520', 'NORTHC_box_8760']
###Output
Calculate metafeatures of metasamples: 0%| | 0/32 [00:00<?, ?it/s][A
Calculate metafeatures of metasamples: 6%|▋ | 2/32 [00:00<00:01, 16.81it/s][A
Calculate metafeatures of metasamples: 16%|█▌ | 5/32 [00:00<00:01, 18.21it/s][A
Calculate metafeatures of metasamples: 25%|██▌ | 8/32 [00:00<00:01, 20.31it/s][A
Calculate metafeatures of metasamples: 34%|███▍ | 11/32 [00:00<00:00, 21.42it/s][A
Calculate metafeatures of metasamples: 44%|████▍ | 14/32 [00:00<00:00, 23.02it/s][A
Calculate metafeatures of metasamples: 53%|█████▎ | 17/32 [00:00<00:00, 23.33it/s][A
Calculate metafeatures of metasamples: 62%|██████▎ | 20/32 [00:00<00:00, 24.46it/s][A
Calculate metafeatures of metasamples: 72%|███████▏ | 23/32 [00:00<00:00, 24.73it/s][A
Calculate metafeatures of metasamples: 81%|████████▏ | 26/32 [00:01<00:00, 25.72it/s][A
Calculate metafeatures of metasamples: 91%|█████████ | 29/32 [00:01<00:00, 25.46it/s][A
Calculate metafeatures of metasamples: 100%|██████████| 32/32 [00:01<00:00, 25.05it/s][A
###Markdown
Experiment definition
###Code
# initialize search strategies
rand = BayesianHopt(
identifier='Random search',
search_space=search_space,
objective=objective,
max_evals=max_evals,
nr_random_starts=max_evals
)
naive = BayesianHopt(
identifier='Naive',
search_space=search_space,
objective=objective,
max_evals=max_evals,
nr_random_starts=n_init_configs
)
warm = BayesianHopt(
identifier='Warmstarted',
search_space=search_space,
objective=objective,
max_evals=max_evals,
warmstarter=Warmstarter(metadataset_all, n_init_configs=n_init_configs, n_sim_samples=5, n_best_per_sample=5)
)
cold = BayesianHopt(
identifier='Coldstarted',
search_space=search_space,
objective=objective,
max_evals=max_evals,
warmstarter=Warmstarter(metadataset_all, n_init_configs=n_init_configs, n_sim_samples=5, n_best_per_sample=5, cold=True)
)
# initialize hyperoptimization experiment
hopt_exp = HoptExperiment(
hopts=[naive, warm, cold, rand],
duplicates=duplicates,
objective=objective,
metadataset=metadataset_all
)
###Output
_____no_output_____
###Markdown
Run experiment
###Code
hopt_exp.run_hopt_experiment(target_ids)
###Output
Target time series: 0%| | 0/3 [00:00<?, ?it/s][A
Target time series: 33%|███▎ | 1/3 [01:12<02:24, 72.12s/it][A
Target time series: 67%|██████▋ | 2/3 [02:28<01:13, 73.43s/it][A
Target time series: 100%|██████████| 3/3 [03:43<00:00, 74.37s/it][A
###Markdown
Visualize search performance on single datasets
###Code
from src.visualization.visualizers import visualize_search_performance
visualize_search_performance(hopt_exp._hopts[2],crossvalidation=True)
visualize_walltime_comparison(hopt_exp,)
import plotly.graph_objects as go
from plotly.subplots import make_subplots
hopt_exp._hopts[0].results
import pandas as pd
rolling_min = pd.Series(
[hopt_exp._hopts[0].results.loc[:i, ("results", "loss")].min() for i in range(len(hopt_exp._hopts[0].results))]
)
rolling_min
fig.add_trace(go.Scatter(x=idx, y=rolling_min, mode="lines", name="Best so far"))
colors = ['red','blue','green','purple']
legend = [True, False, False]
fig = make_subplots(
rows=1,
cols=3,
subplot_titles=('Dataset (a)', 'Dataset (b)', 'Dataset (c)')
)
for i, sample_id in enumerate(target_ids):
# transform to best so far dataframe
data = hopt_exp.best_so_far[sample_id].mean(level='iterations')
for j, identifier in enumerate([hopt.identifier for hopt in hopt_exp._hopts]):
fig.add_trace(go.Scatter(
y = data[identifier],
name = identifier,
line = {'color': colors[j]},
showlegend = legend[i]
),row=1, col=i+1)
fig.update_xaxes(title_text='Iterations', row=1, col=i+1)
fig.update_layout(
width=1000,
yaxis=go.layout.YAxis(title='MAE')
)
fig.show()
###Output
_____no_output_____
###Markdown
Old stuff Visualizers
###Code
hopt_exp.visualize_avg_ranks()
###Output
_____no_output_____
###Markdown
Visualize search performance on single datasets
###Code
import plotly.graph_objects as go
from plotly.subplots import make_subplots
colors = ['red','blue','green','black']
legend = [True, False, False]
fig = make_subplots(
rows=1,
cols=3,
subplot_titles=('Dataset (a)', 'Dataset (b)', 'Dataset (c)')
)
for i, sample_id in enumerate(target_ids):
# transform to best so far dataframe
data = hopt_exp.best_so_far[sample_id].mean(level='iterations')
for j, identifier in enumerate([hopt.identifier for hopt in hopt_exp._hopts]):
fig.add_trace(go.Scatter(
y = data[identifier],
name = identifier,
line = {'color': colors[j]},
showlegend = legend[i]
),row=1, col=i+1)
fig.update_xaxes(title_text='Iterations', row=1, col=i+1)
fig.update_layout(
width=1000,
yaxis=go.layout.YAxis(title='MAE')
)
fig.show()
###Output
_____no_output_____
###Markdown
Visualize perfromance heatmap of single dataset
###Code
import pandas as pd
intermediate = hopt_exp.results.stack(0)
intermediate.columns = pd.Index(['Coldstarted', 'Bayesian', 'Random search','Warmstarted'], name='hopt')
hopt_exp.results = intermediate.stack().unstack(2).unstack(2)
sample_id = target_ids[2]
hopt_ids = [hopt.identifier for hopt in hopt_exp._hopts]
result = hopt_exp.results[sample_id]
fig = make_subplots(rows=1, cols=len(hopt_ids), subplot_titles=hopt_ids)
for j, hopt_id in enumerate(hopt_ids):
data = result[hopt_id]
x = list(data.index.levels[1]) * hopt_exp._duplicates
y = data.values
fig.add_trace(go.Histogram2dContour(x=x, y=y, name=hopt_id, showlegend=False), row=1, col=j + 1)
fig.update_yaxes(title_text="Mean squared error")
fig.update_xaxes(title_text="Iterations")
fig.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/syntheticdata-checkpoint.ipynb | ###Markdown
Synthetic data
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from synthesize_data import synthesize_data
import expectation_reflection as ER
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
def inference(X_train,y_train,X_test,y_test,method='expectation_reflection'):
if method == 'expectation_reflection':
h0,w = ER.fit(X_train,y_train,niter_max=20,regu=0.)
y_pred = ER.predict(X_test,h0,w)
y_pred_train = ER.predict(X_train,h0,w)
else:
if method == 'logistic_regression':
model = LogisticRegression(solver='liblinear')
if method == 'naive_bayes':
model = GaussianNB()
if method == 'random_forest':
model = RandomForestClassifier(criterion = "gini", random_state = 1,
max_depth=3, min_samples_leaf=5,n_estimators=100)
if method == 'decision_tree':
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred_train = model.predict(X_train)
accuracy = accuracy_score(y_test,y_pred)
return accuracy
list_methods=['logistic_regression','naive_bayes','random_forest','decision_tree','expectation_reflection']
def compare_inference(X,y,train_size):
npred = 1000
accuracy = np.zeros((len(list_methods),npred))
precision = np.zeros((len(list_methods),npred))
recall = np.zeros((len(list_methods),npred))
accuracy_train = np.zeros((len(list_methods),npred))
for ipred in range(npred):
#X, y = shuffle(X, y)
X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)
idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y)),replace=False)
X_train,y_train = X_train0[idx_train],y_train0[idx_train]
for i,method in enumerate(list_methods):
accuracy[i,ipred] = inference(X_train,y_train,X_test,y_test,method)
return accuracy.mean(axis=1)
def plot_accuracy():
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc[:,0],'k--',marker='o',mfc='none',label='Logistic Regression')
plt.plot(list_train_size,acc[:,1],'b--',marker='s',mfc='none',label='Naive Bayes')
plt.plot(list_train_size,acc[:,2],'r--',marker='^',mfc='none',label='Random Forest')
#plt.plot(list_train_size,acc[:,3],'b--',label='Decision Tree')
plt.plot(list_train_size,acc[:,4],'k-',marker='o',label='Expectation Reflection')
plt.xlabel('train size')
plt.ylabel('accuracy')
plt.legend()
###Output
_____no_output_____
###Markdown
Binary variables
###Code
l = 500 ; n = 40 ; g = 10.
X,y = synthesize_data(l,n,g,data_type='binary')
np.unique(y,return_counts=True)
list_train_size = [0.8,0.6,0.4,0.2]
acc = np.zeros((len(list_train_size),len(list_methods)))
for i,train_size in enumerate(list_train_size):
acc[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
df = pd.DataFrame(acc,columns = list_methods)
df.insert(0, "train_size",list_train_size, True)
df
plot_accuracy()
###Output
_____no_output_____
###Markdown
Continuous variables
###Code
l = 500 ; n = 40 ; g = 10.
X,y = synthesize_data(l,n,g,data_type='continuous')
np.unique(y,return_counts=True)
for i,train_size in enumerate(list_train_size):
acc[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
df = pd.DataFrame(acc,columns = list_methods)
df.insert(0, "train_size",list_train_size, True)
df
plot_accuracy()
###Output
_____no_output_____
###Markdown
Categorical variables
###Code
l = 500 ; n = 10 ; g = 10.
X,y = synthesize_data(l,n,g,data_type='categorical')
np.unique(y,return_counts=True)
for i,train_size in enumerate(list_train_size):
acc[i,:] = compare_inference(X,y,train_size)
print(train_size,acc[i,:])
df = pd.DataFrame(acc,columns = list_methods)
df.insert(0, "train_size",list_train_size, True)
df
plot_accuracy()
###Output
_____no_output_____ |
examples/ch10/snippets_ipynb/10_12selfcheck.ipynb | ###Markdown
 10.12 Self Check **2. _(IPython Session)_** Create a `namedtuple` called `Time` with members named `hour`, `minute` and `second`. Then, create a `Time` object, access its members and display its string representation.**Answer:**
###Code
from collections import namedtuple
Time = namedtuple('Time', ['hour', 'minute', 'second'])
t = Time(13, 30, 45)
print(t.hour, t.minute, t.second)
t
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
###Output
_____no_output_____ |
Deep Learning/Subjective Assignment - 7 - Numpy 3.ipynb | ###Markdown
Assignment
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Q1. Create two arrays of six elements. Write the NumPy program tocount the number of instances of a value occurring in one array onthe condition of another array. Sample OutputOriginal arrays- [ 10 -10 10 -10 -10 10] [0.85 0.45 0.9 0.8 0.12 0.6 ] Number of instances of a value occurring in one array on the condition of another array: 3
###Code
a = np.array( [ 10, -10, 10, -10, -10, 10] )
b = np.array( [0.85, 0.45, 0.9, 0.8, 0.12, 0.6 ] )
c = np.sum( (a == 10) & (b >= .6))
print('Number of instances of a value occurring in one array on the condition of another')
print(c)
###Output
Number of instances of a value occurring in one array on the condition of another
3
###Markdown
Q2. Write the NumPy program to convert a Python dictionary to aNumpy ndarray ? Original dictionary- {'column0': {'a': 1, 'b': 0.0, 'c': 0.0, 'd': 2.0}, 'column1': {'a': 3.0, 'b': 1, 'c': 0.0, 'd': -1.0}, 'column2': {'a': 4, 'b': 1, 'c': 5.0, 'd': -1.0}, 'column3': {'a': 3.0, 'b': -1.0, 'c': -1.0, 'd': -1.0}} Type: ndarray- [[ 1. 0. 0. 2.] [ 3. 1. 0. -1.] [ 4. 1. 5. -1.] [ 3. -1. -1. -1.]] Type:
###Code
from ast import literal_eval
udict = """{"column0":{"a":1,"b":0.0,"c":0.0,"d":2.0},
"column1":{"a":3.0,"b":1,"c":0.0,"d":-1.0},
"column2":{"a":4,"b":1,"c":5.0,"d":-1.0},
"column3":{"a":3.0,"b":-1.0,"c":-1.0,"d":-1.0}
}"""
t = literal_eval(udict)
print("\nOriginal dictionary:")
print(t)
print("Type: ",type(t))
result_nparra = np.array([[v[j] for j in ['a', 'b', 'c', 'd']] for k, v in t.items()])
print("\nndarray:")
print(result_nparra)
print("Type: ",type(result_nparra))
###Output
Original dictionary:
{'column0': {'a': 1, 'b': 0.0, 'c': 0.0, 'd': 2.0}, 'column1': {'a': 3.0, 'b': 1, 'c': 0.0, 'd': -1.0}, 'column2': {'a': 4, 'b': 1, 'c': 5.0, 'd': -1.0}, 'column3': {'a': 3.0, 'b': -1.0, 'c': -1.0, 'd': -1.0}}
Type: <class 'dict'>
ndarray:
[[ 1. 0. 0. 2.]
[ 3. 1. 0. -1.]
[ 4. 1. 5. -1.]
[ 3. -1. -1. -1.]]
Type: <class 'numpy.ndarray'>
###Markdown
Q3. Write the Numpy program to find and store the non-zero uniquerows in an array after comparing each row with other row in thegiven matrix? Original array- [[ 1 1 0] [ 0 0 0] [ 0 2 3] [ 0 0 0] [ 0 -1 1] [ 0 0 0]] Non-zero unique rows- [[ 1 1 0] [ 0 2 3] [ 0 -1 1]]
###Code
# https://www.w3resource.com/python-exercises/numpy/python-numpy-exercise-172.php
x = np.array([[1, 1, 0],
[0, 0, 0],
[0, 2, 3],
[0, 0, 0],
[0, -1, 1],
[0, 0, 0]])
temp = {(0, 0, 0 )}
result = []
for idx, row in enumerate(map(tuple, x)):
if row not in temp:
result.append(idx)
print('Non-zero unique rows-')
print(x[result])
###Output
Non-zero unique rows-
[[ 1 1 0]
[ 0 2 3]
[ 0 -1 1]]
###Markdown
Q4. Write the NumPy program to multiply the matrix by another matrixof complex numbers and create a new matrix of complexnumbers? Sample outputFirst array- [ 1.+2.j 3.+4.j] Second array- [ 5.+6.j 7.+8.j] Product of above two arrays- (70-8j)
###Code
x = np.array([1+2j, 3+4j])
y = np.array([5+6j, 7+8j])
z = np.vdot(x,y)
print(f'Matrix product of the complex umber is {z}')
###Output
Matrix product of the complex umber is (70-8j)
###Markdown
Q5. Write a NumPy program to generate the matrix product of twoArrays? Sample OutputMatrices and vectors. x- [[1, 0], [1, 1]] y- [[3, 1], [2, 2]] Matrix product of above two arrays- [[3 1] [5 3]]
###Code
x = np.array([[1,0],[1,1] ])
y = np.array([[3,1],[2,2]])
z = x @ y
print(f'matrix product of above two arrays- {z}')
###Output
matrix product of above two arrays- [[3 1]
[5 3]]
###Markdown
Q6. Write the NumPy program to find roots of the followingPolynomials? a) (x 2 - 4x + 7) b) x4 - 11x3 + 9x2 + 11x ? 10 Sample outputRoots of the first polynomial- [ 1. 1.] Roots of the second polynomial- [ 11.04461946+0.j -0.87114210+0.j 0.91326132+0.4531004j 0.91326132-0.4531004j ]
###Code
# Coefficient of eqn a
coeff_a = [1, -2, 1]
print(f'Roots of the first polynomial- {np.roots(coeff_a)}')
# Coefficient of eqn b
coeff_b = [1, -12, 10, 7, -10]
print(f'Roots of the first polynomial- {np.roots(coeff_b)}')
###Output
Roots of the first polynomial- [1. 1.]
Roots of the first polynomial- [11.04461946+0.j -0.8711421 +0.j 0.91326132+0.4531004j
0.91326132-0.4531004j]
###Markdown
Q7. Write the NumPy program to calculate inverse of sine, cosine,and inverse tangent for all elements in a given array? Sample outputInverse sine- [-1.57079633 0. 1.57079633] Inverse cosine- [3.14159265 1.57079633 0. ] Inverse tangent- [-0.78539816 0. 0.78539816]
###Code
x = np.array([-1., 0, 1.])
print(f'Inverse sine-{np.arcsin(x)}')
print(f'Inverse cosine-{np.arccos(x)}')
print(f'Inverse tangent-{np.arctan(x)}')
###Output
Inverse sine-[-1.57079633 0. 1.57079633]
Inverse cosine-[3.14159265 1.57079633 0. ]
Inverse tangent-[-0.78539816 0. 0.78539816]
###Markdown
Q8. Write the NumPy program to calculate the difference between inneighbouring elements, element-wise of a given array? Sample outputOriginal array- [1 3 5 7 0] Difference between neighbouring elements, element-wise of the said array- [ 2 2 2 -7]
###Code
x = np.array([1, 3, 5, 7, 0])
print(f'Difference between neighboring elements, element-wise of the said array-{np.diff(x)}')
###Output
Difference between neighboring elements, element-wise of the said array-[ 2 2 2 -7]
###Markdown
Q9. Write the Python program to find the maximum and the minimumvalue of a given flattened array? Expected OutputOriginal flattened array- [[0 1] [2 3]] Maximum value of the above flattened array3 Minimum value of the above flattened array0
###Code
x = np.arange(4)
x = x.reshape((2,2)) # flattening an array
print(f'Original flattened array- {x}')
print(f'Maximum value of the above flattened array- {np.amax(x)}')
print(f'Minimum value of the above flattened array- {np.amin(x)}')
###Output
Original flattened array- [[0 1]
[2 3]]
Maximum value of the above flattened array- 3
Minimum value of the above flattened array- 0
###Markdown
Q10. Write the NumPy program to calculate the difference between inthe maximum and the minimum values of a given array along thesecond axis ? Expected OutputOriginal array- [[ 0 1 2 3 4 5] [ 6 7 8 9 10 11]] Difference between the maximum and the minimum values of the said array- [5 5]
###Code
x = np.arange(12).reshape((2,6))
print('Original array-')
print(x)
r1 = np.ptp(x , 1)
print(r1)
r2 = np.max(x,1) - np.min(x,1)
print(r2)
print('Difference between the maximum and the minimum values of the said array-')
print(r1)
###Output
Original array-
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]]
[5 5]
[5 5]
Difference between the maximum and the minimum values of the said array-
[5 5]
###Markdown
Q11. Write the NumPy program to compute the weighted of the givenarray ? Sample OutputOriginal array- [0 1 2 3 4] Weighted average of the said array2.6666666666666665
###Code
x = np.array([0, 1, 2, 3, 4])
print('Original array-')
print(x)
weights = np.arange(1, 6)
print('\nWeighted average of the said array-')
r1 = np.average(x, weights = weights)
r2 = (x*(weights/weights.sum())).sum()
assert np.allclose(r1, r2)
print("\nWeighted average of the said array:")
print(r1)
###Output
Original array-
[0 1 2 3 4]
Weighted average of the said array-
Weighted average of the said array:
2.6666666666666665
###Markdown
Q12. Write the NumPy program to compute the mean, standarddeviation, and the variance of a given array along the secondaxis? Sample outputOriginal array- [0 1 2 3 4 5] Mean- 2.5 std- 1 variance- 2.9166666666666665
###Code
x = np.arange(6)
print('Original array-')
print(x)
mean = np.mean(x)
avg = np.average(x)
print('\nMean-', mean)
std = np.std(x)
print('\nstd-', 1)
var = np.var(x)
print('\nvariance-', var)
###Output
Original array-
[0 1 2 3 4 5]
Mean- 2.5
std- 1
variance- 2.9166666666666665
###Markdown
Q13. Write the Numpy program to compute the covariance matrix of thetwo given arrays? Sample OutputOriginal array1- [0 1 2] Original array1- [2 1 0] Covariance matrix of the said arrays- [[ 1. -1.] [-1. 1.]]
###Code
x = np.array([0, 1, 2])
y = np.array([2, 1, 0])
print('\nOriginal array1-')
print(x)
print('\nOriginal array-2')
print(y)
print('\nCovariance matrix of the said arrays:\n')
print(np.cov(x,y))
###Output
Original array1-
[0 1 2]
Original array-2
[2 1 0]
Covariance matrix of the said arrays:
[[ 1. -1.]
[-1. 1.]]
###Markdown
Q14. Write a NumPy program to compute the cross-correlation of twogiven arrays ? Sample OutputOriginal array1- [0 1 3] Original array1- [2 4 5] Cross-correlation of the said arrays- [ [ 2.33333333 2.16666667 ] [ 2.16666667 2.33333333 ] ]
###Code
x = np.array([0, 1, 3])
y = np.array([2, 4, 5])
print('\nOriginal array1-')
print(x)
print('\nOriginal array2-')
print(y)
print('\nCross-correlation of the said arrays-')
print(np.cov(x, y))
###Output
Original array1-
[0 1 3]
Original array2-
[2 4 5]
Cross-correlation of the said arrays-
[[2.33333333 2.16666667]
[2.16666667 2.33333333]]
###Markdown
Q15. Write the NumPy program to compute Pearson product-momentcorrelation coefficients of two given arrays? Sample OutputOriginal array1- [0 1 3] Original array1- [2 4 5] Pearson product-moment correlation coefficients of the said arrays- [[1. 0.92857143] [0.92857143 1. ]]
###Code
x = np.array([0, 1, 3])
y = np.array([2, 4, 5])
print('\nOriginal array1-')
print(x)
print('\nOriginal array2-')
print(y)
print('\nPearson product-moment correlation coefficients of the said arrays')
np.corrcoef(x, y)
###Output
Original array1-
[0 1 3]
Original array2-
[2 4 5]
Pearson product-moment correlation coefficients of the said arrays
###Markdown
Q16. Write the python program to count the number of occurrences ofeach value in a given array of non-negative integers? Note: bincount() Function- It count the occurrence of each value in an array of thenon- negative integers in the range of the array between the minimum and maximum valuesincluding the values that did not occur. Sample OutputOriginal array- [0, 1, 6, 1, 4, 1, 2, 2, 7] Number of occurrences of each value in array- [1 3 2 0 1 0 1 1]
###Code
x = [0, 1, 6, 1, 4, 1, 2, 2, 7]
print('Original array')
print(x)
print('Number of occurrences of each value in array-')
print(np.bincount(x))
###Output
Original array
[0, 1, 6, 1, 4, 1, 2, 2, 7]
Number of occurrences of each value in array-
[1 3 2 0 1 0 1 1]
###Markdown
Q17. Write a Numpy program to compute the histogram of numsagainst the bins? Sample Outputnums- [0.5 0.7 1. 1.2 1.3 2.1] P a g e 29 | 44 bins- [0 1 2 3] Result- (array([2, 3, 1], dtype=int64), array([0, 1, 2, 3]))
###Code
import matplotlib.pyplot as plt
%matplotlib inline
nums = np.array([0.5, 0.7, 1.0, 1.2, 1.3, 2.1])
bins = np.array([0, 1, 2, 3])
print('nums- ', nums)
print('bins-', bins)
print('Result-', np.histogram(nums, bins))
plt.hist(nums, bins=bins)
plt.show()
###Output
nums- [0.5 0.7 1. 1.2 1.3 2.1]
bins- [0 1 2 3]
Result- (array([2, 3, 1], dtype=int64), array([0, 1, 2, 3]))
|
JUAN-PABLO-MONSALVE-01-Estaciones-Eolicas.ipynb | ###Markdown
Requerimiento 1 > Generar una tabla unificada que contenga las siguientes columnas:Nombre de la estación.Fecha en el formato YYYY-MM-DD.Año.Mes.Dia.HHMMSS.Hora (HH).Minuto (MM).Dirección.Velocidad. APRENDIZ: Juan Pablo Monsalve FICHA: 1751400 FECHA DE INICIO DEL TALLER: 5/12/2019
###Code
estacion1 = open('files/06/Estacion1.csv').readlines()
estacion1 = [row[0:-1]for row in estacion1]
estacion1 = [row.split(',') for row in estacion1]
estacion1 = estacion1[1:]
#estacion1
estacion2 = open('files/06/Estacion2.csv').readlines()
estacion2 = [row[0:-1]for row in estacion2]
estacion2 = [row.split(',') for row in estacion2]
#estacion2 = estacion2[1:]
#estacion2
estacion3 = open('files/06/Estacion3.csv').readlines()
estacion3 = [row[0:-1]for row in estacion3]
estacion3 = [row.split(',') for row in estacion3]
#estacion3 = estacion3[1:]
#estacion3
estacion4 = open('files/06/Estacion4.csv').readlines()
estacion4 = [row[0:-1]for row in estacion4]
estacion4 = [row.split(',') for row in estacion4]
#estacion4 = estacion4[1:]
#estacion4
import time,datetime
#Funcion para leer y tranformar los
def filetrans(file):
estacion = open(file, 'r').readlines()
transformacion = [row[0:-1]for row in estacion]
transformacion = [row.split(';')for row in transformacion]
transformacion = transformacion[1:]
return transformacion
data = filetrans('files/06/Estacion1.csv')
#Agregar columna
[[row[0].lower()]for row in data]
data = [[str('Estacion1')] + row for row in data]
#cambiar formato de fecha
def changeFormat(s):
return datetime.datetime.strptime(s, '%d/%m/%y').strftime('%Y/%m/%d')
#Retornar fecha en formato YY-MM-DD
data = [[row[0],changeFormat(row[1]),row[2],row[3],row[4]]for row in data]
#Cambiar año
def YEAR(s):
return datetime.datetime.strptime(s, '%Y/%m/%d').strftime('%Y')
#Capturar en la columna
data = [[YEAR(row[1])] + row for row in data]
#ordenar datos
data = [[row[1],row[2],row[0],row[3],row[4],row[5],]for row in data]
data
def MONTH(s):
return datetime.datetime.strptime(s, '%Y/%m/%d').month
data = [[MONTH(row[1])] + row for row in data]
data = [[row[1],row[2],row[3],row[0],row[4],row[5],row[6]]for row in data]
#Capturar dia del mes
def DAYOFMONTH(s):
return datetime.datetime.strptime(s, '%Y/%m/%d').strftime('%d')
data = [[DAYOFMONTH(row[1])] + row for row in data]
data = [[row[1],row[2],row[3],row[4],row[0],row[5],row[6],row[7]]for row in data]
def HOUR(s):
return datetime.datetime.strptime(s, '%H:%M:%S').strftime('%H')
data = [[HOUR(row[5])] + row for row in data]
data = [[row[1],row[2],row[3],row[4],row[5],row[6],row[0],row[7],row[8]]for row in data]
def MINUTE(s):
return datetime.datetime.strptime(s, '%H:%M:%S').strftime('%M')
data = [[MINUTE(row[5])] + row for row in data]
data = [[row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[0],row[8],row[9]]for row in data]
data
import csv
encabezados = [['Nombre de la estación' ,'Fecha en el formato YYYY-MM-DD' ,'Año' ,'Mes' ,'Dia', 'HHMMSS' ,'Hora (HH)' ,'Minuto (MM)','Dirección' ,'Velocidad']]
with open('files/06/EST1_Trasnf.csv','w',newline = '')as file:
writer = csv.writer(file,delimiter = ',')
writer.writerows(encabezados)
writer.writerows(data)
print('Hecho')
###Output
melo
|
protokoller/kryptering.ipynb | ###Markdown
Tekst til tal
###Code
def converter(M):
M = list(M)
for i in range(len(M)):
M[i] = ord(M[i])+100
N = 0
for i in range(len(M)):
N += M[i]*(1000**i)
return N
def inverter(C):
M = []
for i in range(len(str(C))//3,-1,-1):
M.append(int(C//(1000**i)))
for i in range(len(M)-1,0,-1):
M[i] = chr((M[i]-M[i-1]*1000)-100)
M.reverse()
del M[-1]
N = ""
return N.join(M)
###Output
_____no_output_____
###Markdown
Elgamal:
###Code
def code_elgamal(M,p,ga,b):
return M*pow(ga,b,p)%p
def decode_elgamal(C,p,gb,a):
return C*inverse(pow(gb,a,p),p)%p
###Output
_____no_output_____
###Markdown
RSA:
###Code
def code_RSA(M,n,e):
return pow(M,e,n)
def decode_RSA(C,n,d):
return pow(C,d,n)
###Output
_____no_output_____ |
examples/pretrain/seg_token/d2v.ipynb | ###Markdown
d2v
###Code
import warnings
from tqdm import tqdm
import json
from EduNLP.utils import dict2str4sif
def load_items():
with open("../../../data/OpenLUNA.json", encoding="utf-8") as f:
for line in f:
yield json.loads(line)
from EduNLP.Pretrain import GensimSegTokenizer
tokenizer = GensimSegTokenizer(depth=None)
sif_items = []
for item in tqdm(load_items(), "sifing"):
keys = ["stem"]
item["options"] = eval(item["options"])
if item["options"]:
keys.append("options")
try:
item_str = dict2str4sif(
item,
key_as_tag=True,
add_list_no_tag=False,
keys=keys,
tag_mode="head"
)
except TypeError:
continue
sif_item = tokenizer(
item_str
)
if sif_item:
sif_items.append(sif_item)
sif_items[0]
len(sif_items)
from EduNLP.Pretrain import train_vector
from gensim.models.doc2vec import TaggedDocument
train_vector(
sif_items,
"../../../data/w2v/gensim_luna_stem_tf_",
10
)
###Output
_____no_output_____ |
13/TF-IDF Homework.ipynb | ###Markdown
Homework 14 (or so): TF-IDF text analysis and clusteringHooray, we kind of figured out how text analysis works! Some of it is still magic, but at least the **TF** and **IDF** parts make a little sense. Kind of. Somewhat.No, just kidding, we're *professionals* now. Investigating the Congressional RecordThe [Congressional Record](https://en.wikipedia.org/wiki/Congressional_Record) is more or less what happened in Congress every single day. Speeches and all that. A good large source of text data, maybe?Let's pretend it's totally secret but we just got it leaked to us in a data dump, and we need to check it out. It was leaked from [this page here](http://www.cs.cornell.edu/home/llee/data/convote.html).
###Code
# If you'd like to download it through the command line...
!curl -O http://www.cs.cornell.edu/home/llee/data/convote/convote_v1.1.tar.gz
# And then extract it through the command line...
!tar -zxf convote_v1.1.tar.gz
###Output
_____no_output_____
###Markdown
You can explore the files if you'd like, but we're going to get the ones from `convote_v1.1/data_stage_one/development_set/`. It's a bunch of text files.
###Code
# glob finds files matching a certain filename pattern
import glob
# Give me all the text files
paths = glob.glob('convote_v1.1/data_stage_one/development_set/*')
paths[:5]
len(paths)
###Output
_____no_output_____
###Markdown
So great, we have 702 of them. Now let's import them.
###Code
speeches = []
for path in paths:
with open(path) as speech_file:
speech = {
'pathname': path,
'filename': path.split('/')[-1],
'content': speech_file.read()
}
speeches.append(speech)
speeches_df = pd.DataFrame(speeches)
speeches_df.head()
###Output
_____no_output_____
###Markdown
In class we had the `texts` variable. For the homework can just do `speeches_df['content']` to get the same sort of list of stuff.**Take a look at the contents of the first 5 speeches**
###Code
speeches_df['content'].head(5)
###Output
_____no_output_____
###Markdown
Doing our analysisUse the `sklearn` package and a plain boring `CountVectorizer` to get a list of all of the tokens used in the speeches. If it won't list them all, that's ok! Make a dataframe with those terms as columns.**Be sure to include English-language stopwords**
###Code
from sklearn.feature_extraction.text import CountVectorizer
count_vectorizer = CountVectorizer(max_features=100, stop_words='english')
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from nltk.stem.porter import PorterStemmer
X = count_vectorizer.fit_transform(speeches_df['content'])
X.toarray()
###Output
_____no_output_____
###Markdown
Okay, it's **far** too big to even look at. Let's try to get a list of features from a new `CountVectorizer` that only takes the top 100 words.
###Code
tophundred_df = pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
tophundred_df
###Output
_____no_output_____
###Markdown
Now let's push all of that into a dataframe with nicely named columns. Everyone seems to start their speeches with "mr chairman" - how many speeches are there total, and many don't mention "chairman" and how many mention neither "mr" nor "chairman"?
###Code
mrchairman_df = pd.DataFrame([tophundred_df['mr'], tophundred_df['chairman'], tophundred_df['mr'] + tophundred_df['chairman']], index=["mr", "chairman", "mr + chairman"]).T
mrchairman_df
num_speeches = len(mrchairman_df)
mrmention_df = mrchairman_df[(mrchairman_df['mr'] > 0)]
mr_mention = len(mrmention_df)
mrorchairmanmention_df = mrchairman_df[(mrchairman_df['mr + chairman'] > 0)]
mrorchair_mention = len(mrorchairmanmention_df)
print("There are",num_speeches,"speeches. Only", num_speeches - mr_mention, "do not mention mr and ", num_speeches - mrorchair_mention, "do not mention mr or chairman")
tophundred_df.columns
###Output
_____no_output_____
###Markdown
What is the index of the speech thank is the most thankful, a.k.a. includes the word 'thank' the most times?
###Code
tophundred_df['thank'].sort_values(ascending=False).head(1) # thank is not in the top 100 words unless you remove stop words
###Output
_____no_output_____
###Markdown
If I'm searching for `China` and `trade`, what are the top 3 speeches to read according to the `CountVectoriser`?
###Code
chinatrade_df = pd.DataFrame([tophundred_df['china'] + tophundred_df['trade']], index=["China + trade"]).T
chinatrade_df['China + trade'].sort_values(ascending=False).head(3)
###Output
_____no_output_____
###Markdown
Now what if I'm using a `TfidfVectorizer`?
###Code
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer(stop_words='english', tokenizer=stemming_tokenizer, use_idf=False, norm='l1', max_features = 100)
X = tfidf_vectorizer.fit_transform(speeches_df['content'])
chinatrade_tfidfpd = pd.DataFrame(X.toarray(), columns=tfidf_vectorizer.get_feature_names())
chinatrade_tfidfpd = pd.DataFrame([tophundred_df['china'] + tophundred_df['trade']], index=["China + trade"]).T
# chinatrade_tfidfpd
chinatrade_tfidfpd['China + trade'].sort_values(ascending=False).head(3)
###Output
_____no_output_____
###Markdown
**What's the content of the speeches?** Here's a way to get them:
###Code
# index 0 is the first speech, which was the first one imported.
paths[0]
# Pass that into 'cat' using { } which lets you put variables in shell commands
# that way you can pass the path to cat
!cat {paths[0]}
###Output
mr. chairman , i thank the gentlewoman for yielding me this time .
my good colleague from california raised the exact and critical point .
the question is , what happens during those 45 days ?
we will need to support elections .
there is not a single member of this house who has not supported some form of general election , a special election , to replace the members at some point .
but during that 45 days , what happens ?
the chair of the constitution subcommittee says this is what happens : martial law .
we do not know who would fill the vacancy of the presidency , but we do know that the succession act most likely suggests it would be an unelected person .
the sponsors of the bill before us today insist , and i think rightfully so , on the importance of elections .
but to then say that during a 45-day period we would have none of the checks and balances so fundamental to our constitution , none of the separation of powers , and that the presidency would be filled by an unelected member of the cabinet who not a single member of this country , not a single citizen , voted to fill that position , and that that person would have no checks and balances from congress for a period of 45 days i find extraordinary .
i find it inconsistent .
i find it illogical , and , frankly , i find it dangerous .
the gentleman from wisconsin refused earlier to yield time , but i was going to ask him , if virginia has those elections in a shorter time period , they should be commended for that .
so now we have a situation in the congress where the virginia delegation has sent their members here , but many other states do not have members here .
do they at that point elect a speaker of the house in the absence of other members ?
and then three more states elect their representatives , temporary replacements , or full replacements at that point .
they come in .
do they elect a new speaker ?
and if that happens , who becomes the president under the succession act ?
this bill does not address that question .
this bill responds to real threats with fantasies .
it responds with the fantasy , first of all , that a lot of people will still survive ; but we have no guarantee of that .
it responds with the fantasy that those who do survive will do the right thing .
we are here having this debate , we have debates every day , because people differ on what the right thing is to do .
i have been in very traumatic situations with people in severe car wrecks and mountain climbing accidents .
my experience has not been that crisis imbues universal sagacity and fairness .
it has not been that .
people respond in extraordinary ways , and we must preserve an institution that has the deliberative body and the checks and balances to meet those challenges .
many of our states are going increasingly to mail-in ballots .
we in this body were effectively disabled by an anthrax attack not long after september 11 .
i would ask my dear friends , will you conduct this election in 45 days if there is anthrax in the mail and still preserve the franchise of the american people ?
how will you do that ?
you have no answer to that question .
i find it extraordinary , frankly , that while saying you do not want to amend the constitution , we began this very congress by amending the constitution through the rule , by undermining the principle that a quorum is 50 percent of the body and instead saying it is however many people survive .
and if that rule applies , who will designate it , who will implement it ?
the speaker , or the speaker 's designee ?
again , not an elected person , as you say is so critical and i believe is critical , but a temporary appointee , frankly , who not a single other member of this body knows who they are .
so we not only have an unelected person , we have an unknown person who will convene this body , and who , by the way , could conceivably convene it for their own election to then become the president of the united states under the succession act .
you have refused steadfastly to debate this real issue broadly .
you had a mock debate in the committee on the judiciary in which the distinguished chairman presented my bill without allowing me the courtesy or dignity to defend it myself .
and on that , you proudly say you defend democracy .
sir , i think you dissemble in that regard .
here is the fundamental question for us , my friends , and it is this : the american people are watching television and an announcement comes on and says the congress has been destroyed in a nuclear attack , the president and vice president are killed and the supreme court is dead and thousands of our citizens in this town are .
what happens next ?
under your bill , 45 days of chaos .
apparently , according to the committee on the judiciary subcommittee on the constitution chairman , 45 days of marshal law , rule of this country by an unelected president with no checks and balances .
or an alternative , an alternative which says quite simply that the people have entrusted the representatives they send here to make profound decisions , war , taxation , a host of other things , and those representatives would have the power under the bill of the gentleman from california ( mr. rohrabacher ) xz4003430 bill or mine to designate temporary successors , temporary , only until we can have a real election .
the american people , in one scenario , are told we do not know who is going to run the country , we have no representatives ; where in another you will have temporary representatives carrying your interests to this great body while we deliberate and have real elections .
that is the choice .
you are making the wrong choice today if you think you have solved this problem .
###Markdown
**Now search for something else!** Another two terms that might show up. `elections` and `chaos`? Whatever you thnik might be interesting. Enough of this garbage, let's clusterUsing a **simple counting vectorizer**, cluster the documents into **eight categories**, telling me what the top terms are per category.Using a **term frequency vectorizer**, cluster the documents into **eight categories**, telling me what the top terms are per category.Using a **term frequency inverse document frequency vectorizer**, cluster the documents into **eight categories**, telling me what the top terms are per category.
###Code
# Initialize a vectorizer
vectorizer = TfidfVectorizer(use_idf=True, tokenizer=stemming_tokenizer, stop_words='english', max_features =8)
X = vectorizer.fit_transform(speeches_df['content'])
X
pd.DataFrame(X.toarray())
from sklearn.cluster import KMeans
number_of_clusters = 8
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
km.labels_
speeches_df['content']
results = pd.DataFrame()
results['content'] = speeches_df['content']
results['category'] = km.labels_
results
vectorizer.get_feature_names()
df = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
df
###Output
_____no_output_____
###Markdown
**Which one do you think works the best?** Harry Potter timeI have a scraped collection of Harry Potter fanfiction at https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip.I want you to read them in, vectorize them and cluster them. Use this process to find out **the two types of Harry Potter fanfiction**. What is your hypothesis?
###Code
!curl -LO https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 149 100 149 0 0 106 0 0:00:01 0:00:01 --:--:-- 106
100 9226k 100 9226k 0 0 2668k 0 0:00:03 0:00:03 --:--:-- 7714k
|
benchmarks/ig-en/jw300-baseline/IgboToEnglish.ipynb | ###Markdown
Masakhane - Reverse Machine Translation for African Languages (Using JoeyNMT) Igbo to English Group Members:*Dino Anastasopoulos (1900661)**Jesse Bristow (1875955)**Chloe Smith (1877342)* > NB> - The purpose of this Notebook is to build models that translate African languages(target language) *into* English(source language). This will allow us to in future be able to make translations from one African language to the other. If you'd like to translate *from* English, please use [this](https://github.com/masakhane-io/masakhane-mt/blob/master/starter_notebook.ipynb) starter notebook instead.> - We call this reverse training because normally we build models that make translations from the source language(English) to the target language. But in this case we are doing the reverse; building models that make translations from the target language to the source(English) Note before beginning: - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running - If you actually want to have a clue what you're doing, read the text and peek at the links - With 100 epochs, it should take around 7 hours to run in Google Colab - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected] - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) Retrieve your data & make a parallel corpusIf you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details.Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe.
###Code
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
if gpu_info.find('failed') >= 0:
print('Not connected to a GPU')
else:
print(gpu_info)
from psutil import virtual_memory
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
if ram_gb < 20:
print('Not using a high-RAM runtime')
else:
print('You are using a high-RAM runtime!')
from google.colab import drive
drive.mount('/content/drive')
# TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here:
# These will also become the suffix's of all vocab and corpus files used throughout
import os
source_language = "en"
target_language = "ig"
lc = False # If True, lowercase the data.
seed = 42 # Random seed for shuffling.
tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
os.environ["tag"] = tag
# This will save it to a folder in our gdrive instead!
!mkdir -p "/content/drive/My Drive/masakhane/$tgt-$src-$tag"
os.environ["gdrive_path"] = "/content/drive/My Drive/masakhane/%s-%s-%s" % (target_language, source_language, tag)
!echo $gdrive_path
# Install opus-tools
! pip install opustools-pkg
# Downloading our corpus
! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q
# extract the corpus file
! gunzip JW300_latest_xml_$src-$tgt.xml.gz
# Download the global test set.
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en
# And the specific test set for this language pair.
os.environ["trg"] = target_language
os.environ["src"] = source_language
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$tgt.en
print("FIRST WGET SUCCESS")
! mv test.en-$tgt.en test.en
print("FIRST MV SUCCESS")
! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$tgt.$tgt
print("SECOND WGET SUCCESS")
! mv test.en-$tgt.$tgt test.$tgt
print("SECOND MV SUCCESS")
# Read the test data to filter from train and dev splits.
# Store english portion in set for quick filtering checks.
en_test_sents = set()
filter_test_sents = "test.en-any.en"
j = 0
with open(filter_test_sents) as f:
for line in f:
en_test_sents.add(line.strip())
j += 1
print('Loaded {} global test sentences to filter from the training/dev data.'.format(j))
import pandas as pd
# TMX file to dataframe
source_file = 'jw300.' + source_language
target_file = 'jw300.' + target_language
source = []
target = []
skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion.
with open(source_file) as f:
for i, line in enumerate(f):
# Skip sentences that are contained in the test set.
if line.strip() not in en_test_sents:
source.append(line.strip())
else:
skip_lines.append(i)
with open(target_file) as f:
for j, line in enumerate(f):
# Only add to corpus if corresponding source was not skipped.
if j not in skip_lines:
target.append(line.strip())
print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i))
df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence'])
# if you get TypeError: data argument can't be an iterator is because of your zip version run this below
#df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence'])
df.head(3)
###Output
Loaded data and skipped 4513/471529 lines since contained in test set.
###Markdown
Pre-processing and exportIt is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned.In addition we will split our data into dev/test/train and export to the filesystem.
###Code
# drop duplicate translations
df_pp = df.drop_duplicates()
# drop conflicting translations
# (this is optional and something that you might want to comment out
# depending on the size of your corpus)
df_pp.drop_duplicates(subset='source_sentence', inplace=True)
df_pp.drop_duplicates(subset='target_sentence', inplace=True)
# Shuffle the data to remove bias in dev set selection.
df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True)
# Install fuzzy wuzzy to remove "almost duplicate" sentences in the
# test and training sets.
! pip install fuzzywuzzy
! pip install python-Levenshtein
import time
from fuzzywuzzy import process
import numpy as np
from os import cpu_count
from functools import partial
from multiprocessing import Pool
# reset the index of the training set after previous filtering
df_pp.reset_index(drop=False, inplace=True)
# Remove samples from the training data set if they "almost overlap" with the
# samples in the test set.
# Filtering function. Adjust pad to narrow down the candidate matches to
# within a certain length of characters of the given sample.
def fuzzfilter(sample, candidates, pad):
candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad]
if len(candidates) > 0:
return process.extractOne(sample, candidates)[1]
else:
return np.nan
#start_time = time.time()
# ### iterating over pandas dataframe rows is not recomended, let use multi processing to apply the function
#with Pool(cpu_count()-1) as pool:
#scores = pool.map(partial(fuzzfilter, candidates=list(en_test_sents), pad=5), df_pp['source_sentence'])
#hours, rem = divmod(time.time() - start_time, 3600)
#minutes, seconds = divmod(rem, 60)
#print("done in {}h:{}min:{}seconds".format(hours, minutes, seconds))
# # Filter out "almost overlapping samples"
#df_pp = df_pp.assign(scores=scores)
#df_pp = df_pp[df_pp['scores'] < 95]
# This section does the split between train/dev for the parallel corpora then saves them as separate files
# We use 1000 dev test and the given test set.
import csv
# Do the split between dev/train and create parallel corpora
num_dev_patterns = 1000
# Optional: lower case the corpora - this will make it easier to generalize, but without proper casing.
if lc: # Julia: making lowercasing optional
df_pp["source_sentence"] = df_pp["source_sentence"].str.lower()
df_pp["target_sentence"] = df_pp["target_sentence"].str.lower()
# Julia: test sets are already generated
dev = df_pp.tail(num_dev_patterns) # Herman: Error in original
stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index)
with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file:
for index, row in stripped.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file:
for index, row in dev.iterrows():
src_file.write(row["source_sentence"]+"\n")
trg_file.write(row["target_sentence"]+"\n")
#stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere
#stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks.
#dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False)
#dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False)
# Doublecheck the format below. There should be no extra quotation marks or weird characters.
! head train.*
! head dev.*
###Output
==> train.en <==
How can we apply this counsel ? — Prov . 15 : 28 ; 16 : 23 .
Consider what Jehovah will do for mankind .
Unscrupulous men slander Jehovah ’ s Witnesses , branding them “ a dangerous cult . ”
Nevertheless , we decided on a code that we would use to warn our spiritual brothers in case of trouble — 4711 , the name of a famous cologne .
“ I Have Found Real Purpose in Life . ” — MARCOS PAULO DE SOUSA
Jesus Saves — How ?
André Caquot , member of the French Institute , speaks of “ the Canaanite cultural substratum at the heart of Israelite religion . ”
Demands from your parents , friends , and teachers ; the physical and emotional changes of puberty ; or the feeling that you are a failure because of some minor shortcoming — all these things can leave you feeling melancholy and sad .
A number of Greeks , perhaps including some who were Jewish proselytes , also believed .
“ Clothe yourselves with the tender affections of compassion , kindness , lowliness of mind , mildness , and long-suffering . ” — Colossians 3 : 12 .
==> train.ig <==
Olee otú anyị nwere ike isi mee ihe a Baịbụl kwuru ? — Ilu 15 : 28 ; 16 : 23 .
Chegodị ihe Jehova ga - emere ụmụ mmadụ .
Mmadụ ndị na - enweghị ụkpụrụ na - ebo Ndịàmà Jehova ebubo ụgha , na - akpọ ha “ òtù nzuzo dị ize ndụ . ”
Ka o sina dị , anyị kpebiri ihe mgbaàmà anyị ga - eji na - agwa ụmụnna anyị mgbe e nwere nsogbu — 4711 , aha otu sent na - ewu ewu a na - agba n’ahụ́ .
“ Ndụ m enweela isi . ” — MAKOS PAULU DI SOZA
Jisọs Na - azọpụta — N’ụzọ Dị Aṅaa ?
André Caquot , bụ́ onye òtù French Institute , na - ekwu na “ okpukpe ndị Izrel dabeere n’ọdịbendị ndị Kenan . ”
Ihe ndị mụrụ gị , ndị enyi , na ndị nkụzi gị chọrọ gị n’aka ; mgbanwe anụ ahụ na nke mmetụta uche nke oge ịgba ajị ; ma ọ bụ gị iche na ị baghị n’ihe n’ihi ụmụ emezighị emezi ụfọdụ — ihe ndị a nile pụrụ ime ka ị daa mbà n’obi ma nọrọ ná mwute .
Ọtụtụ ndị Gris , nke nwere ike ịgụnye ndị nke na - eso ụzọ ndị Juu , ghọkwara ndị kwere ekwe .
“ Yikwasịnụ onwe unu mmetụta ndị dị nro bụ́ ọmịiko , obiọma , ịdị umeala n’obi , ịdị nwayọọ , na ogologo ntachi obi . ” — Ndị Kọlọsi 3 : 12 .
==> dev.en <==
Under such circumstances , some may feel that the only option is divorce. — Proverbs 13 : 12 .
Because as the Source of life , God sets out his laws and standards in the Bible “ for [ our ] good , ” or “ for [ our ] own well-being . ” — Deuteronomy 10 : 13 ; New Revised Standard Version .
Such a fatalistic view holds many back from improving their health and leading a more productive life .
God ’ s action against this wicked world will perfectly reflect both his justice and his love. — Ps . 92 : 7 ; Prov . 2 : 21 , 22 .
At the time , it seemed most unlikely that they would .
Because we often think best in pictures , illustrations can make concepts easier to grasp .
Locating the Spiritual Paradise
The Bible — Why So Many ?
Though our lives and assignments have changed , our friendships remain . ”
That spirit is Jehovah ’ s active force , and since it always acts in harmony with the will of the holy God , it is properly called “ holy spirit , ” or “ the spirit of holiness . ”
==> dev.ig <==
N’ọnọdụ ndị dị otú ahụ , ụfọdụ pụrụ iche na nanị ihe a ga - eme bụ ịgba alụkwaghịm . — Ilu 13 : 12 .
Ọ bụ n’ihi na Chineke , bụ́ Isi Iyi nke ndụ , nyere iwu na ụkpụrụ ndị dị na Baịbụl “ maka ọdịmma [ anyị ] , ” ma ọ bụkwanụ “ ka o wee dịrị [ anyị ] mma . ” — Diuterọnọmi 10 : 13 ; Bible Nsọ nke Union Version .
Iche echiche otú ahụ na - eme ka ọtụtụ ndị ghara ime ihe ga - enyere ha aka ka ahụ́ sie ha ike , ha enwee ike na - eme ihe dịịrị ha ná ndụ .
Ihe Chineke ga - eme ụwa ọjọọ a ga - egosi nnọọ na ọ na - ekpe ikpe ziri ezi nakwa na ọ hụrụ ndị mmadụ n’anya . — Ọma 92 : 7 ; Ilu 2 : 21 , 22 .
N’oge ahụ , o yiri ka ọ gaghị ekwe omume .
Ebe ọ bụ na anyị na - aghọtakarị ihe nke ọma mgbe anyị ji anya nke uche na - ahụ ya , ihe atụ pụrụ ime ka echiche ụfọdụ dị mfe nghọta .
Ịchọta Ebe Paradaịs Ime Mmụọ ahụ Dị
Gịnị Mere E Ji Nwee Baịbụl Dị Iche Iche ?
N’agbanyeghị na ndụ anyị na ozi anyị agbanweela , anyị ka bụkwa ezigbo enyi . ”
Mmụọ nsọ bụ ike Jehova nọ n’ọrụ , ebe ọ bụkwa na mmụọ a na - arụ ọrụ mgbe nile n’ụzọ kwekọrọ n’uche Chineke dị nsọ , a kpọrọ ya aha kwesịrị ekwesị nke bụ́ “ mmụọ nsọ ” ma ọ bụ “ mmụọ nke dị nsọ . ”
###Markdown
--- Installation of JoeyNMTJoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io)
###Code
# Install JoeyNMT
! git clone https://github.com/joeynmt/joeynmt.git
! cd joeynmt; pip3 install .
# Install Pytorch with GPU support v1.7.1.
# ! pip install torch==1.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
###Output
fatal: destination path 'joeynmt' already exists and is not an empty directory.
Processing /content/joeynmt
[33m DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.
pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.[0m
Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (0.16.0)
Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (7.1.2)
Requirement already satisfied: numpy>=1.19.5 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (1.19.5)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (57.4.0)
Requirement already satisfied: torch>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (1.9.0+cu111)
Requirement already satisfied: tensorboard>=1.15 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (2.6.0)
Requirement already satisfied: torchtext>=0.10.0 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (0.10.0)
Requirement already satisfied: sacrebleu>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (2.0.0)
Requirement already satisfied: subword-nmt in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (0.3.7)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (3.2.2)
Requirement already satisfied: seaborn in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (0.11.2)
Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (6.0)
Requirement already satisfied: pylint>=2.9.6 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (2.11.1)
Requirement already satisfied: six==1.12 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (1.12.0)
Requirement already satisfied: wrapt==1.11.1 in /usr/local/lib/python3.7/dist-packages (from joeynmt==1.3) (1.11.1)
Requirement already satisfied: astroid<2.9,>=2.8.0 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (2.8.2)
Requirement already satisfied: toml>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (0.10.2)
Requirement already satisfied: isort<6,>=4.2.5 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (5.9.3)
Requirement already satisfied: platformdirs>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (2.4.0)
Requirement already satisfied: typing-extensions>=3.10.0 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (3.10.0.2)
Requirement already satisfied: mccabe<0.7,>=0.6 in /usr/local/lib/python3.7/dist-packages (from pylint>=2.9.6->joeynmt==1.3) (0.6.1)
Requirement already satisfied: lazy-object-proxy>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from astroid<2.9,>=2.8.0->pylint>=2.9.6->joeynmt==1.3) (1.6.0)
Requirement already satisfied: typed-ast<1.5,>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from astroid<2.9,>=2.8.0->pylint>=2.9.6->joeynmt==1.3) (1.4.3)
Requirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from sacrebleu>=2.0.0->joeynmt==1.3) (2019.12.20)
Requirement already satisfied: colorama in /usr/local/lib/python3.7/dist-packages (from sacrebleu>=2.0.0->joeynmt==1.3) (0.4.4)
Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.7/dist-packages (from sacrebleu>=2.0.0->joeynmt==1.3) (0.8.9)
Requirement already satisfied: portalocker in /usr/local/lib/python3.7/dist-packages (from sacrebleu>=2.0.0->joeynmt==1.3) (2.3.2)
Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (0.6.1)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (1.35.0)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (2.23.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (1.0.1)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (0.37.0)
Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (0.12.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (3.3.4)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (0.4.6)
Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (1.41.0)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (1.8.0)
Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=1.15->joeynmt==1.3) (3.17.3)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.15->joeynmt==1.3) (0.2.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.15->joeynmt==1.3) (4.2.4)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.15->joeynmt==1.3) (4.7.2)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=1.15->joeynmt==1.3) (1.3.0)
Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard>=1.15->joeynmt==1.3) (4.8.1)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard>=1.15->joeynmt==1.3) (0.4.8)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard>=1.15->joeynmt==1.3) (2021.5.30)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard>=1.15->joeynmt==1.3) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard>=1.15->joeynmt==1.3) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard>=1.15->joeynmt==1.3) (1.24.3)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=1.15->joeynmt==1.3) (3.1.1)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from torchtext>=0.10.0->joeynmt==1.3) (4.62.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->markdown>=2.6.8->tensorboard>=1.15->joeynmt==1.3) (3.6.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->joeynmt==1.3) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->joeynmt==1.3) (1.3.2)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->joeynmt==1.3) (2.4.7)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->joeynmt==1.3) (2.8.2)
Requirement already satisfied: scipy>=1.0 in /usr/local/lib/python3.7/dist-packages (from seaborn->joeynmt==1.3) (1.4.1)
Requirement already satisfied: pandas>=0.23 in /usr/local/lib/python3.7/dist-packages (from seaborn->joeynmt==1.3) (1.1.5)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.23->seaborn->joeynmt==1.3) (2018.9)
Building wheels for collected packages: joeynmt
Building wheel for joeynmt (setup.py) ... [?25l[?25hdone
Created wheel for joeynmt: filename=joeynmt-1.3-py3-none-any.whl size=86029 sha256=35b9e549369cabe794456e8e93516baea7b98d28efb61498317d76eeb2265e66
Stored in directory: /tmp/pip-ephem-wheel-cache-w537mjqg/wheels/0a/f4/bf/6c9d3b8efbfece6cd209f865be37382b02e7c3584df2e28ca4
Successfully built joeynmt
Installing collected packages: joeynmt
Attempting uninstall: joeynmt
Found existing installation: joeynmt 1.3
Uninstalling joeynmt-1.3:
Successfully uninstalled joeynmt-1.3
Successfully installed joeynmt-1.3
###Markdown
Preprocessing the Data into Subword BPE Tokens- One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909).- It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)- Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable.
###Code
# One of the huge boosts in NMT performance was to use a different method of tokenizing.
# Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance
# Do subword NMT
from os import path
os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts
os.environ["tgt"] = target_language
# Learn BPEs on the training data.
os.environ["data_path"] = path.join("joeynmt", "data",target_language + source_language ) # Herman!
! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt
# Apply BPE splits to the development and test data.
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src
! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt
# Create directory, move everyone we care about to the correct location
! mkdir -p $data_path
! cp train.* $data_path
! cp test.* $data_path
! cp dev.* $data_path
! cp bpe.codes.4000 $data_path
! ls $data_path
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
# Create that vocab using build_vocab
! sudo chmod 777 joeynmt/scripts/build_vocab.py
! joeynmt/scripts/build_vocab.py joeynmt/data/$tgt$src/train.bpe.$src joeynmt/data/$tgt$src/train.bpe.$tgt --output_path joeynmt/data/$tgt$src/vocab.txt
# Some output
! echo "BPE English Sentences"
! tail -n 5 test.bpe.$tgt
! echo "Combined BPE Vocab"
! tail -n 10 joeynmt/data/$tgt$src/vocab.txt # Herman
# Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path
! cp train.* "$gdrive_path"
! cp test.* "$gdrive_path"
! cp dev.* "$gdrive_path"
! cp bpe.codes.4000 "$gdrive_path"
! ls "$gdrive_path"
###Output
bpe.codes.4000 dev.ig test.en test.en-any.en.3 train.bpe.en
dev.bpe.en models test.en-any.en test.en-any.en.4 train.bpe.ig
dev.bpe.ig test.bpe.en test.en-any.en.1 test.en-any.en.5 train.en
dev.en test.bpe.ig test.en-any.en.2 test.ig train.ig
###Markdown
Creating the JoeyNMT ConfigJoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with!- We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021))Things worth playing with:- The batch size (also recommended to change for low-resourced languages)- The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes)- The decoder options (beam_size, alpha)- Evaluation metrics (BLEU versus Crhf4)
###Code
# This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update
# (You can of course play with all the parameters if you'd like!)
name = '%s%s' % (target_language, source_language)
# gdrive_path = os.environ["gdrive_path"]
# Create the config
config = """
name: "{target_language}{source_language}_reverse_transformer"
data:
src: "{target_language}"
trg: "{source_language}"
train: "data/{name}/train.bpe"
dev: "data/{name}/dev.bpe"
test: "data/{name}/test.bpe"
level: "bpe"
lowercase: False
max_sent_length: 100
src_vocab: "data/{name}/vocab.txt"
trg_vocab: "data/{name}/vocab.txt"
testing:
beam_size: 5
alpha: 1.0
training:
# load_model: "{gdrive_path}/models/{name}_reverse_transformer/39000.ckpt" # if uncommented, load a pre-trained model from this checkpoint
random_seed: 42
optimizer: "adam"
normalization: "tokens"
adam_betas: [0.9, 0.999]
scheduling: "Noam" # TODO: try switching from plateau to Noam scheduling
patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds.
learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer)
learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer)
decrease_factor: 0.7
loss: "crossentropy"
learning_rate: 0.0001
learning_rate_min: 0.00000001
weight_decay: 0.0
label_smoothing: 0.1
batch_size: 4096
batch_type: "token"
eval_batch_size: 3600
eval_batch_type: "token"
batch_multiplier: 1
early_stopping_metric: "ppl"
epochs: 5 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all
validation_freq: 1000 # TODO: Set to at least once per epoch.
logging_freq: 100
eval_metric: "bleu"
model_dir: "models/{name}_reverse_transformer"
overwrite: True # TODO: Set to True if you want to overwrite possibly existing models.
shuffle: True
use_cuda: True
max_output_length: 100
print_valid_sents: [0, 1, 2, 3]
keep_last_ckpts: 3
model:
initializer: "xavier"
bias_initializer: "zeros"
init_gain: 1.0
embed_initializer: "xavier"
embed_init_gain: 1.0
tied_embeddings: True
tied_softmax: True
encoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
decoder:
type: "transformer"
num_layers: 6
num_heads: 4 # TODO: Increase to 8 for larger data.
embeddings:
embedding_dim: 256 # TODO: Increase to 512 for larger data.
scale: True
dropout: 0.2
# typically ff_size = 4 x hidden_size
hidden_size: 256 # TODO: Increase to 512 for larger data.
ff_size: 1024 # TODO: Increase to 2048 for larger data.
dropout: 0.3
""".format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language)
with open("joeynmt/configs/transformer_reverse_{name}.yaml".format(name=name),'w') as f:
f.write(config)
###Output
_____no_output_____
###Markdown
Train the ModelThis single line of joeynmt runs the training using the config we made above
###Code
# Train the model
# You can press Ctrl-C to stop. And then run the next cell to save your checkpoints!
!cd joeynmt; python3 -m joeynmt train configs/transformer_reverse_$tgt$src.yaml
# Copy the created models from the notebook storage to google drive for persistant storage
!mkdir -p "$gdrive_path/models/${tgt}${src}_reverse_transformer/"
!cp -r joeynmt/models/${tgt}${src}_reverse_transformer/* "$gdrive_path/models/${tgt}${src}_reverse_transformer/"
# Output our validation accuracy
! cat "$gdrive_path/models/${tgt}${src}_reverse_transformer/validations.txt"
# Test our model
! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${tgt}${src}_reverse_transformer/config.yaml"
###Output
2021-10-14 17:20:42,347 - INFO - root - Hello! This is Joey-NMT (version 1.3).
2021-10-14 17:20:42,347 - INFO - joeynmt.data - Building vocabulary...
2021-10-14 17:20:42,667 - INFO - joeynmt.data - Loading dev data...
2021-10-14 17:20:42,678 - INFO - joeynmt.data - Loading test data...
2021-10-14 17:20:42,735 - INFO - joeynmt.data - Data loaded.
2021-10-14 17:20:42,753 - INFO - joeynmt.prediction - Process device: cuda, n_gpu: 1, batch_size per device: 3600
2021-10-14 17:20:42,753 - INFO - joeynmt.prediction - Loading model from models/igen_reverse_transformer/39000.ckpt
2021-10-14 17:20:45,771 - INFO - joeynmt.model - Building an encoder-decoder model...
2021-10-14 17:20:45,994 - INFO - joeynmt.model - Enc-dec model built.
2021-10-14 17:20:46,060 - INFO - joeynmt.prediction - Decoding on dev set (data/igen/dev.bpe.en)...
2021-10-14 17:21:07,709 - WARNING - sacrebleu - That's 100 lines that end in a tokenized period ('.')
2021-10-14 17:21:07,709 - WARNING - sacrebleu - It looks like you forgot to detokenize your test data, which may hurt your score.
2021-10-14 17:21:07,710 - WARNING - sacrebleu - If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.
2021-10-14 17:21:07,713 - INFO - joeynmt.prediction - dev bleu[13a]: 25.91 [Beam search decoding with beam size = 5 and alpha = 1.0]
2021-10-14 17:21:07,713 - INFO - joeynmt.prediction - Decoding on test set (data/igen/test.bpe.en)...
2021-10-14 17:21:46,034 - WARNING - sacrebleu - That's 100 lines that end in a tokenized period ('.')
2021-10-14 17:21:46,035 - WARNING - sacrebleu - It looks like you forgot to detokenize your test data, which may hurt your score.
2021-10-14 17:21:46,035 - WARNING - sacrebleu - If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.
2021-10-14 17:21:46,045 - INFO - joeynmt.prediction - test bleu[13a]: 27.67 [Beam search decoding with beam size = 5 and alpha = 1.0]
|
Simple Scraper Using Python.ipynb | ###Markdown
Amazon product Scraper using Beautifulsoup
###Code
import requests
from bs4 import BeautifulSoup
#for database we will use sqlite3 library
import sqlite3
###Output
_____no_output_____
###Markdown
We need this user agent to bypass restrictions of amazon to scrape the products
###Code
headers = { "user-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}
#Here we will add the url of the product that we want to parse
url = 'https://www.amazon.de/-/en/Fossil-FS4656IE-Chronograph-Quartz-Leather/dp/B07TCQRRWR/'
page = requests.get(url, headers=headers)
page
#If we want to see the page content we need to use .content
#page.content
soup = BeautifulSoup(page.content, features="lxml")
#It will beautify the content
#print(soup.prettify)
#Here we parse the name of product and by using the strip() function we can get rid of extra html code or spaces
product_name = soup.find(id='productTitle').get_text().strip()
product_name
#Here we parse the product ASIN and it's little tricky
product_asin = soup.select_one("li:nth-child(5) .a-text-bold+ span").get_text().strip()
print('ASIN: ',product_asin)
#For the timestamp we have different ways 1). we can use system date and time using python other we can use database date and time function
from datetime import datetime
dateTimeObj = datetime.now()
print(dateTimeObj.year, '/', dateTimeObj.month, '/', dateTimeObj.day)
print(dateTimeObj.hour, ':', dateTimeObj.minute, ':', dateTimeObj.second)
###Output
2021 / 2 / 18
22 : 36 : 29
###Markdown
Database For this project we are using SQLite database
###Code
# Create a SQL connection to our SQLite database
con = sqlite3.connect('Amazon_db.sqlite')
#Create the table and give the name of table attributes
cur = con.cursor()
cur.execute('CREATE TABLE if not exists amazon (product_name VARCHAR, product_asin VARCHAR, timestamp CHAR(100))')
con.commit()
#Here we will insert the data and print all data
#For timestamp we are using DateTime() function of SQLite
con.execute("INSERT INTO amazon(product_name, product_asin, timestamp) values (?, ?, DateTime('now', 'localtime'))", (product_name, product_asin))
con.commit()
cur = con.cursor()
cur.execute("SELECT * FROM amazon")
rows = cur.fetchall()
for row in rows:
print(row)
#con.close()
#If you want to delete record you can use this query
#cur.execute("DELETE FROM amazon WHERE product_asin='GARMIN'")
#We can print the selected data by using this query
con = sqlite3.connect('Amazon_db.sqlite')
cur = con.cursor()
cur.execute("SELECT product_name, timestamp FROM amazon WHERE product_asin='B07TCQRRWR'")
rows = cur.fetchall()
for row in rows:
print(row)
con.close()
###Output
("Fossil FS4656IE Men's Chronograph Quartz Watch with Leather Strap", '2021-02-18 22:01:40')
("Fossil FS4656IE Men's Chronograph Quartz Watch with Leather Strap", '2021-02-18 22:02:40')
("Fossil FS4656IE Men's Chronograph Quartz Watch with Leather Strap", '2021-02-18 22:04:38')
("Fossil FS4656IE Men's Chronograph Quartz Watch with Leather Strap", '2021-02-18 22:05:40')
("Fossil FS4656IE Men's Chronograph Quartz Watch with Leather Strap", '2021-02-18 22:36:29')
|
samples/notebooks (plus)/classifications.ipynb | ###Markdown
ClassificationsExample on how to work with classifications in Azure Purview.
###Code
%env PURVIEW_NAME=pvdemofm2ie-pv
# Initialise Helper Method
import json
def getJSON(raw_output):
output = ''.join(raw_output)
json_obj = json.loads(output)
return json_obj
# Get Classifications
data = !pv types readTypeDefs --type "CLASSIFICATION"
data = getJSON(data)
# Number of Classifications by Creator (createdBy)
classificationsByCreator = {}
for classification in data['classificationDefs']:
createdBy = classification['createdBy']
if createdBy in classificationsByCreator:
classificationsByCreator[createdBy] += 1
else:
classificationsByCreator[createdBy] = 1
print(classificationsByCreator)
###Output
{'admin': 208, '095354ff-cae8-44ff-8120-22ec5a941b40': 2, 'f12b84af-9b2b-4bfc-8ed7-6d72f1669f78': 1}
|
model_creation_01.ipynb | ###Markdown
INFO:tensorflow:2017-12-04 03:28:26.086613: Step 499: Train accuracy = 100.0%INFO:tensorflow:2017-12-04 03:28:26.086770: Step 499: Cross entropy = 0.001410INFO:tensorflow:2017-12-04 03:28:26.131517: Step 499: Validation accuracy = 72.0% (N=100)INFO:tensorflow:Final test accuracy = 95.2% (N=21)INFO:tensorflow:Froze 2 variables.
###Code
image_url = 'imagenet/tf_files/coffee_companies/roses/2414954629_3708a1a04d.jpg'
%%bash
echo python -m scripts.label_image \
--graph='tf_files/retrained_graph.pb' \
--image='tf_files/test/1.jpg'
peets coffee 0.999745
starbucks 0.000184728
coffee bean 7.04684e-05
dunkin donuts 5.55596e-09
###Output
_____no_output_____ |
processing/process_sme_results.ipynb | ###Markdown
Creating FITS output for any given SME output Author(s): Sven Buder (SB, WG4) History:181011 SB Created
###Code
# Preamble for notebook
# Compatibility with Python 3
from __future__ import (absolute_import, division, print_function)
try:
%matplotlib inline
%config InlineBackend.figure_format='retina'
except:
pass
# Basic packages
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
import os
import sys
import scipy
import scipy.interpolate
import collections
import glob
import pickle
import pandas
from astropy.stats import sigma_clipped_stats
# Packages to work with FITS and (IDL) SME.out files
import astropy.io.fits as pyfits
import astropy.table as table
from astropy.table import Table, hstack, vstack
from scipy.io.idl import readsav
from astropy.nddata import bitmask
# Matplotlib and associated packages for plotting
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.transforms import Bbox,TransformedBbox
from matplotlib.image import BboxImage
from matplotlib.legend_handler import HandlerBase
from matplotlib._png import read_png
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
import matplotlib.colors as colors
params = {
'font.family' : 'sans',
'font.size' : 17,
'axes.labelsize' : 20,
'ytick.labelsize' : 16,
'xtick.labelsize' : 16,
'legend.fontsize' : 20,
'text.usetex' : True,
'text.latex.preamble': [r'\usepackage{upgreek}', r'\usepackage{amsmath}'],
}
plt.rcParams.update(params)
_parula_data = [[0.2081, 0.1663, 0.5292],
[0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286],
[0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279],
[0.1707285714, 0.2919380952, 0.779247619],
[0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333],
[0.0116952381, 0.3875095238, 0.8819571429],
[0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333],
[0.032852381, 0.4430428571, 0.8719571429],
[0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952],
[0.0722666667, 0.4886666667, 0.8467],
[0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524],
[0.0749428571, 0.5375428571, 0.8262714286],
[0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714],
[0.0343428571, 0.5965809524, 0.819852381],
[0.0265, 0.6137, 0.8135],
[0.0238904762, 0.6286619048, 0.8037619048],
[0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429],
[0.0266619048, 0.6641952381, 0.7607190476],
[0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667],
[0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714],
[0.1801333333, 0.7176571429, 0.6424333333],
[0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714],
[0.3021714286, 0.7376047619, 0.5711857143],
[0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571],
[0.4420095238, 0.7480809524, 0.5033142857],
[0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857],
[0.5708571429, 0.7485190476, 0.4493904762],
[0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188],
[0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857],
[0.7858428571, 0.7355666667, 0.3632714286],
[0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714],
[0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905],
[0.9449571429, 0.7261142857, 0.2886428571],
[0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619],
[0.9990428571, 0.7653142857, 0.2164142857],
[0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667],
[0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381],
[0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571],
[0.9598238095, 0.9218333333, 0.0948380952],
[0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
parula = ListedColormap(_parula_data, name='parula')
parula_zero = _parula_data[0]
parula_0 = ListedColormap(_parula_data, name='parula_0')
parula_0.set_bad((1,1,1))
parula_r = ListedColormap(_parula_data[::-1], name='parula_r')
willi_blau = [0.0722666667, 0.4886666667, 0.8467]
class read_iso():
def __init__(self):
self.num_cols=4
self.columns = ['M_Mo', 'logTeff', 'logG', 'logL_Lo', 'Ks']
self.num_ages = len(age)
self.ages = age
def fill_chemistry(self, m_h, fe_h, alpha_fe):
self.FeH = fe_h
self.Z = 10**m_h*0.0152
self.aFe = alpha_fe
def fill_iso(self, iso_input):
self.data = iso_input
###Output
_____no_output_____
###Markdown
Function to gather and combine input data from SME, IRAF, Output structure & A(X)_sun
###Code
def get_product_information(product_name):
if product_name == 'GBS':
product_subsets = 'gbs'
product_input_data_path = '../validation/stellar_parameters/gaia_fgk_benchmark_stars/data/'
product_pipeline = 'lbol'
if product_name == 'seis':
product_subsets = 'seis'
product_input_data_path = '../input/asteroseismic_information/'
product_pipeline = 'lbol'
if product_name == 'OpenClusters':
product_subsets = ['Blanco 1','Pleiades','Hyades','NGC 1817','NGC 1901','ASCC 16','ASCC 20','ASCC 21','NGC 2112','NGC 2204','Berkeley 73','NGC 2232','NGC 2243','Berkeley 33','Berkeley 32','NGC 2516','NGC 2548','NGC 2632','M 67','IC 2602','Melotte 101','Trumpler 20','NGC 5460','NGC 6253','ASCC 89','IC 4665','NGC 6469','NGC 6568','NGC 6583','Ruprecht 145','Ruprecht 147']
product_input_data_path = '../validation/comparisons/comparison_clusters/sme_output_files/'
product_pipeline = 'lbol'
if product_name == 'GlobularClusters':
product_subsets = ['47 Tuc','NGC 288','NGC 362','NGC 1851','Omega Cen','NGC 6362','NGC 6397','NGC 7099']
product_input_data_path = '../validation/comparisons/comparison_clusters/sme_output_files/'
product_pipeline = 'lbol'
if product_name == 'random10000':
product_subsets = '10000'
product_input_data_path = '../validation/random10000/data/'
product_pipeline = 'lbol'
if product_name == 'ts_DR2':
product_subsets = 'ts_DR2'
product_input_data_path = '../validation/DR2_rerun/data/'
product_pipeline = 'lbol'
if product_name == 'high_vtot':
product_subsets = 'high_vtot'
product_input_data_path = '../science/high_vtot/data/'
product_pipeline = 'lbol'
if product_name == 'Li_rich_giants':
product_subsets = 'Li_rich'
product_input_data_path = '../science/Li_rich_giants/data/'
product_pipeline = 'lbol'
if product_name == 'Keller':
product_subsets = 'Keller'
product_input_data_path = '../science/Keller/'
product_pipeline = 'lbol'
if product_name == 'wide_binaries':
product_subsets = 'wide_binaries'
product_input_data_path = '../science/wide_binaries/data/'
product_pipeline = 'lbol'
if product_name[:4] == '10k_':
product_subsets = product_name
product_input_data_path = 'sme_result_files/'
product_pipeline = 'lbol_final'
if product_name[:4] == 'mric':
product_subsets = product_name
product_input_data_path = 'sme_result_files/'
product_pipeline = 'lbol'
if product_name[:2] == 'dn':
product_subsets = product_name
product_input_data_path = 'sme_result_files/'
product_pipeline = 'fixed'
if (
(product_name[:2] == '13') |
(product_name[:2] == '14') |
(product_name[:2] == '15') |
(product_name[:2] == '16') |
(product_name[:2] == '17') |
(product_name[:2] == '18') |
(product_name[:2] == '19') |
(product_name[:2] == '20')
):
product_subsets = product_name
product_input_data_path = 'sme_result_files/'
product_pipeline = 'lbol_final'
return (product_subsets, product_input_data_path, product_pipeline)
def get_input_data(product_subsets, product_input_data_path, product_pipeline):
dr3_output_structure = Table.read('galah_dr3_output_structure.fits',1)
if np.shape(product_subsets)!=():
joined_sme_data = Table.read(product_input_data_path+'/GALAH_'+product_subsets[0].replace(" ", "")+'_'+product_pipeline+'.fits', format='fits')
for each in range(1,len(product_subsets)):
joined_sme_data = vstack([joined_sme_data, Table.read(product_input_data_path+'/GALAH_'+product_subsets[each].replace(" ", "")+'_'+product_pipeline+'.fits', format='fits')])
else:
joined_sme_data = Table.read(product_input_data_path+'/GALAH_'+product_subsets.replace(" ", "")+'_'+product_pipeline+'.fits', format='fits')
all_iraf_data = pyfits.getdata('../input/sobject_iraf_53_2MASS_GaiaDR2_WISE_PanSTARRSDR1_BailerJones_K2seis_small.fits',1)
bad_irfm = all_iraf_data['irfm_teff'] < 0
all_iraf_data['irfm_teff'][bad_irfm] = np.nan
iraf_matched = []
for each in joined_sme_data['SOBJECT_ID']:
match = np.where(each == all_iraf_data['sobject_id'])[0]
if len(match)>0:
iraf_matched.append(match[0])
else:
print('No match found for '+str(each))
iraf_matched = np.array(iraf_matched)
iraf_data = all_iraf_data[iraf_matched]
#print('You are not adjusting the SP of 160419005101398')
print('You are adjusting the SP of 160419005101398')
if '10k_5' == product_subsets:
find_160419005101398 = np.where(joined_sme_data['SOBJECT_ID'] == 160419005101398)[0][0]
joined_sme_data['E_VEL'][find_160419005101398] = 1.5407
joined_sme_data['C_VEL'][find_160419005101398] = 0.029697875
joined_sme_data['TEFF'][find_160419005101398] = 4371.376
joined_sme_data['E_TEFF'][find_160419005101398] = 171.23157
joined_sme_data['C_TEFF'][find_160419005101398] = 7.060005
joined_sme_data['LOGG'][find_160419005101398] = 1.562197
joined_sme_data['FE_H'][find_160419005101398] = -0.80380595
joined_sme_data['E_FE_H'][find_160419005101398] = 0.30206105
joined_sme_data['C_FE_H'][find_160419005101398] = 0.22875078
joined_sme_data['FEH'][find_160419005101398] = -0.7256485
joined_sme_data['E_FEH'][find_160419005101398] = 0.22870451
joined_sme_data['C_FEH'][find_160419005101398] = 0.008607003
joined_sme_data['VMIC'][find_160419005101398] = 1.4966545
joined_sme_data['VMAC'][find_160419005101398] = 0.
joined_sme_data['VRAD'][find_160419005101398] = 0.027477598
joined_sme_data['E_VRAD'][find_160419005101398] = 1.5407
joined_sme_data['C_VRAD'][find_160419005101398] = 0.029697875
joined_sme_data['VSINI'][find_160419005101398] = 7.103247
joined_sme_data['E_VSINI'][find_160419005101398] = 4.623626
joined_sme_data['C_VSINI'][find_160419005101398] = 0.09662821
joined_sme_data[find_160419005101398]['CHI'][0] = 4.159691
joined_sme_data[find_160419005101398]['ITER'][0] = 4
joined_sme_data[find_160419005101398]['SN'][0] = 62.423534
joined_sme_data[find_160419005101398]['LI_VRAD'] = -8.639416
joined_sme_data[find_160419005101398]['SI6722'] = 0.20560789
joined_sme_data[find_160419005101398]['E_SI6722'] = 0.067370385
joined_sme_data[find_160419005101398]['C_SI6722'] = 0.039390706
joined_sme_data[find_160419005101398]['SI_VRAD'] = -2.1945698
abundance_zeropoints= Table.read('../validation/abundances/galahdr3_abundance_zeropoints.fits',1)
print('Got input data')
return dr3_output_structure, joined_sme_data, iraf_data, abundance_zeropoints
def compute_final_uncertainty(final_output_data, each_key):
"""
Combine the three uncertainty types for parameter X:
e_final^2 (X) = e_fit^2(X) + e_precision^2(X) + e_accuracy^2(X)
"""
uncertainty_accuracy = collections.OrderedDict()
uncertainty_accuracy['teff'] = 67
uncertainty_accuracy['logg'] = 0.12
uncertainty_accuracy['fe_h'] = 0.034
uncertainty_accuracy['fe_h_atmo'] = 0.059
uncertainty_accuracy['vbroad'] = 2.0
uncertainty_accuracy['rv_galah'] = 0.1
# This value is fixed now
# for more details see the validation/radial_velocities part
print('NB: We are leaving out logg for now, because we compute its final uncertainty later')
opt_parms = np.load('../validation/repeat_observations/reference_uncertainties/repeats_'+each_key+'.npy')
def snr_sigma_func(t, A, K, C):
return A * np.exp(- K * t) + C
snr_in = final_output_data['snr_c2_iraf'].clip(min=0)
scaling = dict()
scaling['teff'] = [3,7.5]
scaling['logg'] = [2,0]
scaling['fe_h'] = [4.,0.01]
scaling['fe_h_atmo'] = [2,0.0125]
scaling['vbroad'] = [1.75,0.3]
scaling['rv_galah'] = [2.0,0.15]
new_error = np.sqrt(
# SME fit uncertainty
(scaling[each_key][0] * final_output_data['cov_e_'+each_key] + scaling[each_key][1])**2.+
# Precision uncertainty, interpolated for the CCD2 SNR
(
snr_sigma_func(snr_in,*opt_parms)
)**2.+
# Accuracy uncertainty
(uncertainty_accuracy[each_key])**2.
)
new_error[new_error==0] = np.nan
final_output_data['e_'+each_key] = new_error
return(final_output_data)
def compute_final_abundance_uncertainty(final_output_data, final_output_abundances):
for each_key in final_output_abundances.keys():
if (each_key[:2] == 'A_') & (each_key[2:4] != 'Fe') & (each_key[2:] not in ['Ti6599','Cr4848']):
try:
opt_parms = np.load('../validation/repeat_observations/reference_uncertainties/repeats_'+each_key[2:]+'.npy')
except:
print('No reference repeat uncertainty for '+str(each_key[2:]))
opt_parms = np.array([0.,0.,0.])
def snr_sigma_func(t, A, K, C):
return A * np.exp(- K * t) + C
snr_in = final_output_data['snr_c2_iraf'].clip(min=0)
final_output_data['cov_e_'+each_key[2:]] = np.nanmax([
# SME covariance uncertainty
final_output_data['cov_e_'+each_key[2:]],
# Precision uncertainty, interpolated for the CCD2 SNR
(
snr_sigma_func(snr_in,*opt_parms)
)
], axis=0
)
return(final_output_data, final_output_abundances)
def compute_logg_uncertainty(final_output_data, mc_sampling=10000):
print('Sampling logg uncertainty')
np.random.seed(12)
mc_teff = np.random.normal(
loc = final_output_data['teff'],
# We do not trust the SME e_teff, and hence sample the max of e_teff=100 and 2% error of teff
# scale = np.array([np.max([100,0.02*final_output_data['teff'][x]]) for x in range(len(final_output_data['teff']))]),
scale = final_output_data['e_teff'],
size=(mc_sampling,len(final_output_data['teff']))
)
mc_mass = np.random.normal(
loc = final_output_data['mass'],
# We do not know the error on mass and hence assume it is 10%
scale = 0.1*final_output_data['mass'],
size=(mc_sampling,len(final_output_data['mass']))
)
mc_mass.clip(min=0., out=mc_mass)
mc_kmag = np.random.normal(
loc = final_output_data['ks_m'],
scale = final_output_data['ks_msigcom'],
size=(mc_sampling,len(final_output_data['ks_m']))
)
mc_bc = np.random.normal(
loc = final_output_data['bc_ks'],
scale = 0.1*np.ones(len(final_output_data['bc_ks'])),
size=(mc_sampling,len(final_output_data['bc_ks']))
)
sigma_dist_hi = abs(final_output_data['r_hi'] - final_output_data['r_est'])
sigma_dist_lo = abs(final_output_data['r_est'] - final_output_data['r_lo'])
mc_dist_lo = np.abs(np.random.normal(
loc = 0,
scale = sigma_dist_lo,
size=(mc_sampling,len(final_output_data['r_est'])))
)
mc_dist_hi = np.abs(np.random.normal(
loc = 0,
scale = sigma_dist_hi,
size=(mc_sampling,len(final_output_data['r_est'])))
)
#fraction_hi_lo = sigma_dist_hi / (sigma_dist_lo + sigma_dist_hi)
fraction_hi_lo = 0.5
select_dist_lo_hi = (np.random.uniform(0, 1, size=(mc_sampling,len(final_output_data['r_est']))) < fraction_hi_lo)
mc_dist = np.array(final_output_data['r_est']).T + select_dist_lo_hi*mc_dist_hi - (1-select_dist_lo_hi)*mc_dist_lo
mc_dist.clip(min=0.001, out=mc_dist)
e_a_ks = (final_output_data['e_a_ks']).clip(min=0.01)
mc_ak = np.random.normal(
loc = final_output_data['a_ks'],
scale = e_a_ks,
size=(mc_sampling,len(final_output_data['a_ks']))
)
mc_ak.clip(min=0., out=mc_ak)
def logg_function(teff, mass, kmag, bc, dist, ak):
return(4.438 + 4*np.log10(teff/5772.) + np.log10(mass) + 0.4*(kmag + bc - 5.*np.log10(dist) + 5 - ak - 4.7554))
mc_logg = logg_function(mc_teff, mc_mass, mc_kmag, mc_bc, mc_dist, mc_ak)
logg_mean = np.array([np.nanmean(mc_logg[:,x]) for x in range(np.shape(mc_logg)[1])])
logg_std = np.array([np.nanstd(mc_logg[:,x]) for x in range(np.shape(mc_logg)[1])])
print('Done sampling logg uncertainty')
return(logg_std)
def combine_SME_IRAF_to_FINAL(output_filename, product_pipeline, sme_data, iraf_data, dr3_output_structure, abundance_zeropoints):
print('Combining information')
final_output_data = collections.OrderedDict()
final_output_abundances = collections.OrderedDict()
abundances_in_mode = np.array([(sme_data['MODE'][0,it]).replace(" ","") for it in range(len(sme_data['MODE'][0]))])
for each_key in dr3_output_structure.keys():
# Keys in output not matching input keys
if each_key=='star_id':
final_output_data[each_key]=np.array(iraf_data['tmass_id'])
final_output_abundances[each_key]=np.array(iraf_data['tmass_id'])
elif each_key in ['sobject_id']:
final_output_data[each_key]=np.array(sme_data[each_key.upper()])
final_output_abundances[each_key]=np.array(sme_data[each_key.upper()])
elif each_key in ['field_id','source_id','ra','ra_error','dec','dec_error','l','b','r_est','r_lo','r_hi','r_len','pmra','pmra_error','pmdec','pmdec_error','ra_dec_corr','ra_parallax_corr','ra_pmra_corr','ra_pmdec_corr','dec_parallax_corr','dec_pmra_corr','dec_pmdec_corr','parallax_pmra_corr','parallax_pmdec_corr','pmra_pmdec_corr','red_flag','ebv','snr_c1_iraf','snr_c2_iraf','snr_c3_iraf','snr_c4_iraf','flag_guess','rv_guess','e_rv_guess','teff_guess','logg_guess','feh_guess','j_m','j_msigcom','h_m','h_msigcom','ks_m','ks_msigcom','ph_qual_tmass','w2mpro','w2mpro_error','ph_qual_wise','parallax','parallax_error','visibility_periods_used','astrometric_chi2_al','astrometric_n_good_obs_al','ruwe','phot_g_mean_mag','bp_rp','irfm_teff','irfm_ebv','irfm_ebv_ref']:
final_output_data[each_key]=np.array(iraf_data[each_key])
elif each_key=='wg4_field':
if product_pipeline in ['lbol','lbol_final']:
final_output_data[each_key]=np.array([sme_data['FIELD'][x][:-5] for x in range(len(sme_data['FIELD']))])
final_output_data['wg4_pipeline']=np.array(['lbol' for x in range(len(sme_data['FIELD']))])
elif product_pipeline == 'seis':
final_output_data[each_key]=np.array([sme_data['FIELD'][x][:-5] for x in range(len(sme_data['FIELD']))])
final_output_data['wg4_pipeline']=np.array(['seis' for x in range(len(sme_data['FIELD']))])
else:
final_output_data[each_key]=np.array(sme_data['FIELD'])
final_output_data['wg4_pipeline']=np.array(['free' for x in range(len(sme_data['FIELD']))])
elif each_key in ['flag_sp','teff', 'e_teff', 'logg', 'e_logg', 'vmic', 'e_vmic', 'mass', 'lbol', 'age']:
final_output_data[each_key]=np.array(sme_data[each_key.upper()])
elif each_key[:-3] in ['alpha','LiI','CI','OI','NaI','MgI',
'AlI','SiI','KI','CaI','ScI','ScII',
'TiI','TiII','VI','CrI','CrII','MnI',
#'FeI','FeII',
'CoI','NiI','CuI','ZnI',
'RbI','SrI','YII','ZrI','MoI','RuI',
'BaII','LaII','CeII','NdII','SmII','EuII']:
final_output_data[each_key] = np.array([np.NaN for x in range(len(sme_data['FIELD']))])
final_output_data['e_'+each_key] = np.array([np.NaN for x in range(len(sme_data['FIELD']))])
final_output_data['nr_'+each_key] = np.zeros(len(sme_data['FIELD']))
final_output_data['flag_'+each_key] = np.zeros(len(sme_data['FIELD']))
elif each_key == 'rv_5854':
final_output_data[each_key]=np.array(sme_data['BA_VRAD'])
elif each_key == 'rv_6708':
final_output_data[each_key]=np.array(sme_data['LI_VRAD'])
elif each_key == 'rv_6722':
final_output_data[each_key]=np.array(sme_data['SI_VRAD'])
elif each_key == 'bc_ks':
final_output_data[each_key]=np.array(sme_data['BC_K'])
elif each_key == 'fe_h_atmo':
final_output_data[each_key]=np.array(sme_data['FEH'])
final_output_abundances[each_key]=np.array(sme_data['FEH'])
elif each_key == 'e_fe_h_atmo':
final_output_data[each_key]=np.array(sme_data['E_FEH'])
final_output_abundances[each_key]=np.array(sme_data['E_FEH'])
elif each_key == 'cov_e_fe_h_atmo':
final_output_data[each_key]=np.array(sme_data['C_FEH'])
final_output_abundances[each_key]=np.array(sme_data['C_FEH'])
elif each_key[:6]=='cov_e_':
if each_key[6:] in ['teff', 'logg']:
final_output_data[each_key]=np.array(sme_data['C_'+each_key[6:].upper()])
elif each_key in ['init_teff','init_logg']:
final_output_data[each_key]=np.array(sme_data['S'+each_key[5:].upper()][:,0])
elif each_key == 'init_fe_h_atmo':
final_output_data[each_key]=np.array(sme_data['SFEH'][:,0])
elif each_key == 'init_vbroad':
final_output_data[each_key]=np.array(sme_data['SVSINI'][:,0])
elif each_key=='fe_h':
final_output_data['fe_h']=np.array(sme_data['A_ABUND'][:,1] - abundance_zeropoints['A_Fe'][0])
final_output_data['e_fe_h']=np.array(sme_data['E_ABUND'][:,1])
final_output_data['cov_e_fe_h']=np.array(sme_data['C_ABUND'][:,1])
final_output_data['flag_fe_h']=np.array(sme_data['AFLAG'][:,1])
final_output_abundances['fe_h']=np.array(sme_data['A_ABUND'][:,1] - abundance_zeropoints['A_Fe'][0])
final_output_abundances['e_fe_h']=np.array(sme_data['E_ABUND'][:,1])
final_output_abundances['cov_e_fe_h']=np.array(sme_data['C_ABUND'][:,1])
final_output_abundances['flag_fe_h']=np.array(sme_data['AFLAG'][:,1])
elif each_key=='vbroad':
final_output_data[each_key]=np.array(sme_data['VSINI'])
final_output_data['e_'+each_key]=np.array(sme_data['E_VSINI'])
final_output_data['cov_e_'+each_key]=np.array(sme_data['C_VSINI'])
elif each_key=='rv_galah':
final_output_data[each_key]=np.array(sme_data['VEL'])
final_output_data['e_'+each_key]=np.array(sme_data['E_VEL'])
final_output_data['cov_e_'+each_key]=np.array(sme_data['C_VEL'])
elif each_key=='rv_gaia':
final_output_data[each_key]=np.array(iraf_data['radial_velocity'])
final_output_data['e_'+each_key]=np.array(iraf_data['radial_velocity_error'])
elif each_key=='chi2_sp':
final_output_data[each_key]=np.array(sme_data['CHI'][:,0])
elif each_key=='a_ks':
# Apply RJCE method
rjce_ak = np.array(0.918*(iraf_data['h_m'] - iraf_data['w2mpro'] - 0.08))
e_rjce_ak = 0.918*np.sqrt(iraf_data['h_msigcom']**2. + iraf_data['w2mpro_error']**2.)
rjce_ak.clip(min=0.0, out=rjce_ak)
# check if 2MASS Hmag and WISE W2mag have good quality
tmass_adjusted = np.array(iraf_data['ph_qual_tmass'])
tmass_adjusted[np.where(tmass_adjusted==' ')[0]] = 'UUU'
tmass_adjusted[np.where(tmass_adjusted=='')[0]] = 'UUU'
wise_adjusted = np.array(iraf_data['ph_qual_wise'])
wise_adjusted[np.where(wise_adjusted==' ')[0]] = 'UUUU'
wise_adjusted[np.where(wise_adjusted=='')[0]] = 'UUUU'
h_m_qual = np.array([tmass_adjusted[x][1] == 'A' for x in range(len(tmass_adjusted))])
w2_qual = np.array([wise_adjusted[x][1] == 'A' for x in range(len(wise_adjusted))])
# if photometry bad, exchange a_k by ebv approximation
bad_rjce = np.isnan(iraf_data['h_m']) | np.isnan(iraf_data['w2mpro']) | (h_m_qual==False) | (w2_qual==False)
ebv_ak = 0.36*iraf_data['ebv']
rjce_ak[bad_rjce] = ebv_ak[bad_rjce]
e_rjce_ak[bad_rjce] = ebv_ak[bad_rjce]
final_output_data['a_ks']=rjce_ak
final_output_data['e_a_ks']=e_rjce_ak
# adjust too large E(B-V) if photometry good
bad_ebv = np.where(0.36*iraf_data['ebv'] > 3*rjce_ak)[0]
if len(bad_ebv) > 0:
final_output_data['ebv'][bad_ebv] = 2.78*rjce_ak[bad_ebv]
# nearby stars with ebv=0 and a_ks=0
nearby = np.where(iraf_data['r_est'] <= 100.)[0]
final_output_data['ebv'][nearby] = 0.
final_output_data['a_ks'][nearby] = 0.
final_output_data['e_a_ks'][nearby] = 0.
# Abundance keys
elif (each_key[0:2]=='A_') & (each_key != 'A_Ks'):
element = each_key[2:]
element_in_sme_data = np.where(element == abundances_in_mode)[0]
if np.shape(element_in_sme_data)[0] != 1:
# print('Exchange values for '+element)
if element == 'K':
element_in_sme_data = np.where('K7699' == abundances_in_mode)[0]
if element == 'Ca':
element_in_sme_data = np.where('Ca5862' == abundances_in_mode)[0]
if element == 'Cu':
element_in_sme_data = np.where('Cu5782' == abundances_in_mode)[0]
if element == 'Ba':
element_in_sme_data = np.where('Ba5854' == abundances_in_mode)[0]
# only if the element exists in the SME data
elif np.shape(element_in_sme_data)[0] == 1:
element_in_sme_data=element_in_sme_data[0]
final_output_abundances[each_key]=np.array(sme_data['A_ABUND'][:,element_in_sme_data])
if element=='Li':
final_output_data[each_key]=np.array(sme_data['A_ABUND'][:,element_in_sme_data])
if element in ['Fe', 'Li']:
final_output_data['flux_'+each_key]=np.array(sme_data['LINEFLUX'][:,element_in_sme_data])
final_output_data['chi_'+each_key]=np.array(sme_data['CHI'][:,element_in_sme_data])
if element!='Fe':
try:
final_output_data[element+'_fe']=np.array(
# [X/Fe] = [X/H] - [Fe/H] = A(X) - A(X)_sun - (A(Fe) - A(Fe)_sun)
sme_data['A_ABUND'][:,element_in_sme_data]- abundance_zeropoints[each_key][0]
- (final_output_abundances['A_Fe'] - abundance_zeropoints['A_Fe'][0])
)
except:
print('You are using A(Fe) = 7.45 + fe_h_atmo!')
final_output_data[element+'_fe']=np.array(
# [X/Fe] = [X/H] - [Fe/H] = A(X) - A(X)_sun - (A(Fe) - A(Fe)_sun)
sme_data['A_ABUND'][:,element_in_sme_data]- abundance_zeropoints[each_key][0]
- (7.45 + final_output_data['fe_h_atmo'] - abundance_zeropoints['A_Fe'][0])
)
#final_output_data[element+'_fe']=np.array(sme_data['ABUND'][:,element_in_sme_data])
#final_output_data['e_'+element+'_fe']=np.array(sme_data['E_ABUND'][:,element_in_sme_data])
#final_output_data['e_'+element]=np.array(sme_data['E_ABUND'][:,element_in_sme_data])
final_output_data['cov_e_'+element]=np.array(sme_data['C_ABUND'][:,element_in_sme_data])
final_output_data['flag_'+element]=np.array(sme_data['AFLAG'][:,element_in_sme_data])
final_output_abundances['cov_e_'+element]=np.array(sme_data['C_ABUND'][:,element_in_sme_data])
final_output_abundances['flag_'+element]=np.array(sme_data['AFLAG'][:,element_in_sme_data])
final_output_abundances['flux_'+each_key]=np.array(sme_data['LINEFLUX'][:,element_in_sme_data])
final_output_abundances['chi_'+each_key]=np.array(sme_data['CHI'][:,element_in_sme_data])
else:
print('Could not find element '+element+' in SME data')
# Already filled
elif ((each_key in ['wg4_pipeline', 'e_fe_h', 'e_rv_galah', 'c_rv_galah', 'e_rv_gaia', 'e_vbroad', 'e_a_ks']) | (each_key[:4]=='e_A_') | (each_key[:7]=='flag_A_') | (each_key[:7]=='flux_A_') | (each_key[:7]=='chi2_A_') | (each_key[-3:]=='_fe')):
pass
# Placeholder
elif each_key in ['e_mass', 'e_lbol', 'e_age', 'e_bc_ks']:
final_output_data[each_key] = np.array([np.NaN for x in range(len(sme_data['FIELD']))])
else:
print('No match for '+each_key)
return(final_output_data, final_output_abundances)
def combine_line_by_line(
final_output_data,
final_output_abundances,
abundance_zeropoints,
abundance_uncertainty_limit = 0.005,
clip_outlier_sigma = 2,
debug = False
):
"""
We combine all line measurements
for a given element and species
as outlined in the converter dictionary,
e.g. OI is a combination of O7772, O7774, O7775
Abundance uncertainties are set be
at least above the abundance_uncertainty_limit
Only use those measurements that are as close as 2 sigma
to the error-weighted mean and recompute it
"""
print('Only using A(X) with flags == 0 if available, otherwise == 1')
print('Setting uncertainty floor for A(X) to >= '+str(abundance_uncertainty_limit))
converter = collections.OrderedDict()
"""
before Karin ran combined
converter['LiI'] = ['Li']
converter['CI'] = ['C6588']
converter['OI'] = ['O7772','O7774','O7775']
converter['NaI'] = ['Na5683','Na5688'] # leaving out 'Na4752'
converter['MgI'] = ['Mg4730','Mg5711','Mg7692'] # leaving out 'Mg7722', 'Mg7759', 'Mg7811'
converter['AlI'] = ['Al6696','Al6699','Al7835','Al7836']
converter['SiI'] = ['Si5684','Si5690','Si5701','Si5772','Si5793'] # leaving out 'Si5666','Si6722','Si7680'
converter['KI'] = ['K5802','K7699']
converter['CaI'] = ['Ca5857','Ca5868','Ca6494','Ca6500'] # leaving out Ca6509
converter['ScI'] = ['Sc4744','Sc4753','Sc5672','Sc5687','Sc5724']
converter['ScII'] = ['Sc5658','Sc5667','Sc5684','Sc6605']
converter['TiI'] = ['Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti6717'] # 'Ti5866', 'Ti6599','Ti7853'
converter['TiII'] = ['Ti4720','Ti4765','Ti4799','Ti4866','Ti4874'] #,'Ti4849'
converter['VI'] = ['V4747','V4784','V4797','V4832']
converter['CrI'] = ['Cr4775','Cr4789','Cr4801','Cr5702','Cr5720','Cr5788','Cr5845','Cr6630']
converter['CrII'] = ['Cr4848']
converter['MnI'] = ['Mn4739','Mn4762','Mn4766','Mn4783']
#converter['FeI'] = ['Fe4789','Fe4793','Fe4794','Fe4803','Fe4808','Fe4876','Fe4890','Fe4891','Fe5651','Fe5652','Fe5661','Fe5663','Fe5679','Fe5680','Fe5696','Fe5702','Fe5705','Fe5731','Fe5732','Fe5742','Fe5775','Fe5778','Fe5807','Fe5809','Fe5812','Fe5815','Fe5850','Fe5853','Fe5855','Fe5859','Fe6482','Fe6495','Fe6499','Fe6518','Fe6546','Fe6593','Fe6594','Fe6598','Fe6609','Fe6628','Fe6648','Fe6678','Fe6699','Fe6704','Fe6714','Fe6725','Fe6733','Fe7710','Fe7723','Fe7748']
#converter['FeII'] = ['Fe4720','Fe4731','Fe4833','Fe6516','Fe7712']
converter['CoI'] = ['Co5647','Co6632','Co7713','Co7838']
converter['NiI'] = ['Ni5847','Ni6586']
converter['CuI'] = ['Cu5700','Cu5782']
converter['ZnI'] = ['Zn4722','Zn4811']
converter['RbI'] = ['Rb7800']
converter['SrI'] = ['Sr6550']
converter['YII'] = ['Y4855','Y4884','Y5663']
converter['ZrI'] = ['Zr4739','Zr4772','Zr4806','Zr4828']
converter['MoI'] = ['Mo5751','Mo5858']
converter['RuI'] = ['Ru4869']
converter['BaII'] = ['Ba5854','Ba6497']
converter['LaII'] = ['La4716','La4749','La4804','La5806']
converter['CeII'] = ['Ce4774']
converter['NdII'] = ['Nd4811','Nd5741','Nd5770','Nd5812','Nd5842']
converter['SmII'] = ['Sm4837','Sm4854']
converter['EuII'] = ['Eu5819','Eu6645']
converter['alpha'] = [
'Mg4730','Mg5711','Mg7692',
'Si5666','Si5684','Si5690','Si5701','Si5772','Si5793','Si6722','Si7680',
'Ca5857','Ca5868','Ca6494','Ca6500',
'Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti6717',
'Ti4720','Ti4765','Ti4799','Ti4866','Ti4874'
]
"""
converter['LiI'] = ['Li6708']
converter['CI'] = ['C6588']
converter['OI'] = ['O']
converter['NaI'] = ['Na']
converter['MgI'] = ['Mg5711']
# run with old NLTE grid
# converter['SiI'] = ['Si5684','Si5690','Si5701','Si5772','Si5793'] # leaving out 'Si5666','Si6722','Si7680'
converter['SiI'] = ['Si']
converter['AlI'] = ['Al']
converter['KI'] = ['K7699']
# run with old NLTE grid
# converter['CaI'] = ['Ca5857','Ca5868','Ca6494','Ca6500'] # leaving out Ca6509
converter['CaI'] = ['Ca']
converter['ScI'] = ['Sc']# Sc4744','Sc4753','Sc5672','Sc5687','Sc5724']
#converter['ScII'] = ['Sc5667','Sc5684','Sc6605']
#converter['TiI'] = ['Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti6717'] # 'Ti5866', 'Ti6599','Ti7853'
#converter['TiII'] = ['Ti4720','Ti4765','Ti4799','Ti4866','Ti4874'] #,'Ti4849'
converter['TiI'] = ['Ti4758','Ti4759','Ti4782','Ti4802','Ti4820','Ti5739']
converter['TiII'] = ['Ti4720','Ti4765','Ti4799','Ti4866']
#converter['VI'] = ['V4747','V4784','V4797']
converter['VI'] = ['V4797','V4832'] #'V4784',
#converter['CrI'] = ['Cr4775','Cr4789','Cr4801','Cr5702','Cr5720','Cr5788','Cr5845','Cr6630']
#converter['CrII'] = ['Cr4848']
converter['CrI'] = ['Cr']
converter['MnI'] = ['Mn']
converter['CoI'] = ['Co5647','Co6490','Co6632','Co7713'] #maybe Def NO 'Co4781','Co4900','Co6551','Co7838'
converter['NiI'] = ['Ni5847','Ni6586']
converter['CuI'] = ['Cu5700','Cu5782']
converter['ZnI'] = ['Zn4722','Zn4811']
converter['RbI'] = ['Rb7800']
converter['SrI'] = ['Sr6550']
converter['YII'] = ['Y4855','Y4884'] # leaving out 'Y4820','Y5663' 'Y5729'
#converter['YII'] = ['Y']
converter['ZrI'] = ['Zr4739','Zr4772','Zr4806','Zr4828','Zr5681']
converter['MoI'] = ['Mo5858','Mo6619']#'Mo5689','Mo5751'
converter['RuI'] = ['Ru4869','Ru5699']#'Ru4758'
converter['BaII'] = ['Ba'] # not using 'Ba5854','Ba6497'
converter['LaII'] = ['La4749','La4804','La5806'] # not using 'La4716',
converter['CeII'] = ['Ce4774']
converter['NdII'] = ['Nd4811','Nd5812'] # leaving out 'Nd5741','Nd5843','Nd6740','Nd5770','Nd5842'
converter['SmII'] = ['Sm4720','Sm4848'] # leaving out 'Sm4792','Sm4837','Sm4854'
converter['EuII'] = ['Eu6645'] # leaving out 'Eu5819',
converter['alpha'] = [
'Mg5711',
'Si',
'Ca',
'Ti4758','Ti4759','Ti4782','Ti4802','Ti4820','Ti5739'
]
# Until 191209
#converter['alpha'] = [
# 'Mg5711',
# 'Si',
# 'Ca',
# 'Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti6717',
# 'Ti4720','Ti4765','Ti4799','Ti4866','Ti4874'
#]
# Until 191105 (before using Karin's NLTE results)
# converter['alpha'] = [
# 'Mg5711',
# 'Si5666','Si5684','Si5690','Si5701','Si5772','Si5793','Si6722','Si7680',
# 'Ca5857','Ca5868','Ca6494','Ca6500',
# 'Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti6717',
# 'Ti4720','Ti4765','Ti4799','Ti4866','Ti4874'
# ]
# 'conservative_alpha'
#converter['alpha'] = [
# 'Mg4730','Mg5711',
# 'Si5684','Si5690','Si5701','Si5772',
# 'Ca5868',
# 'Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820',
# 'Ti4765','Ti4799','Ti4874'
#]
# Until 190519:
#[
# 'Mg4730','Mg5711','Mg7692',
# 'Si5666','Si5684','Si5690','Si5701','Si5772','Si5793',
# 'Ca5857','Ca5868','Ca6494','Ca6500',
# 'Ti4758','Ti4759','Ti4778','Ti4782','Ti4798','Ti4802','Ti4820','Ti5689','Ti5716','Ti5720','Ti5739','Ti5866','Ti6599','Ti6717','Ti7853'
#]
"""
ii=where(mode eq 'Mg' or mode eq 'Si' or mode eq 'Ti')
for i=0,n_elements(object)-1 do begin
j=where(finite(res[i].abund[ii]) and res[i].aflag[ii] eq 0,jc)
if jc ne 0 then begin
res[i].alpha_fe = total(res[i].abund[ii[j]]/res[i].e_abund[ii[j]]^2)/total(1./res[i].e_abund[ii[j]]^2)
res[i].e_alpha_fe = sqrt(1./total(1./res[i].e_abund[ii[j]]^2))
;print,res[i].alpha_fe,res[i].e_alpha_fe,res[i].e_teff
endif
endfor
"""
def combine_line_measurements_for_element(each_element, final_output_data, final_output_abundances, data_index,debug=debug):
"""
Combine the individual measurements
Figure out which lines are not flagged
1a) If more than 1 line is useful:
Check for outliers with sigma-clipping
For large sigma (>0.3, with exception of Li) or alpha: be more strict (-1.5,1.5)
2a) If still more than 1 line is useful:
simple error-weighted mean combination
2b) elif 1 line useful (and element is not alpha_fe):
give back that 1 line measurement
2c) else:
return nan value
1b) elif 1 line useful (and element is not alpha_fe):
give back that 1 line measurement
1c) else:
return nan values
"""
useful_line_bitmask = []
useful_line_measurements = []
useful_line_uncertainties = []
if debug:
print('Starting going through converter')
for each_index, each_line in enumerate(converter[each_element]):
if debug:
print('index, line:',each_index,each_line)
# if line measurement flag == 0
if (
(final_output_data['flag_'+each_line][data_index] == 0) &
np.isfinite(final_output_abundances['A_'+each_line][data_index])
):
useful_line_bitmask.append(2**each_index)
useful_line_measurements.append(final_output_abundances['A_'+each_line][data_index] - abundance_zeropoints['A_'+each_line][0] - final_output_abundances['fe_h'][data_index])
useful_line_uncertainties.append(final_output_data['cov_e_'+each_line][data_index])
useful_line_bitmask=np.array(useful_line_bitmask)
useful_line_measurements=np.array(useful_line_measurements)
useful_line_uncertainties=np.array(useful_line_uncertainties)
if debug:
print('useful_line_bitmask',useful_line_bitmask)
print('useful_line_measurements',useful_line_measurements)
print('useful_line_uncertainties',useful_line_uncertainties)
if len(useful_line_measurements) > 1:
sigma_clipped_mean, sigma_clipped_median, sigma_clipped_std = sigma_clipped_stats(useful_line_measurements, sigma=clip_outlier_sigma)
sigma_outliers = np.abs(sigma_clipped_mean - useful_line_measurements) > clip_outlier_sigma*sigma_clipped_std
if ((sigma_clipped_std) > 0.3) | (each_element == 'alpha'): # Some outliers are causing problems
if each_element != 'LiI':
more_robust = (useful_line_measurements > -1.5) & (useful_line_measurements < 2.5)
sigma_clipped_mean, sigma_clipped_median, sigma_clipped_std = sigma_clipped_stats(useful_line_measurements[more_robust], sigma=clip_outlier_sigma)
if len(useful_line_measurements) > 1:
return(
np.sum(useful_line_measurements[~sigma_outliers]/useful_line_uncertainties[~sigma_outliers]**2)/np.sum(1./useful_line_uncertainties[~sigma_outliers]**2),
np.sqrt(1./np.sum(1./useful_line_uncertainties[~sigma_outliers]**2)),
np.sum(useful_line_bitmask[~sigma_outliers]),
0)
#np.sum(useful_line_measurements[~sigma_outliers]/useful_line_uncertainties[~sigma_outliers]**2)/np.sum(1./useful_line_uncertainties[~sigma_outliers]**2),
#np.sqrt(1./np.sum(1./useful_line_uncertainties[~sigma_outliers]**2) + np.var(useful_line_measurements[~sigma_outliers])),
#np.sum(useful_line_bitmask[~sigma_outliers],
#0)
elif (len(useful_line_measurements[~sigma_outliers]) == 1) & (each_element != 'alpha'):
return(useful_line_measurements[~sigma_outliers],useful_line_uncertainties[~sigma_outliers],useful_line_bitmask[~sigma_outliers],0)
else:
return(np.nan,np.nan,np.nan,np.nan)
elif (len(useful_line_measurements) == 1) & (each_element != 'alpha'):
return(useful_line_measurements[0],useful_line_uncertainties[0],useful_line_bitmask[0],0)
else:
useful_line_bitmask = []
useful_line_measurements = []
useful_line_uncertainties = []
if (
(final_output_data['flag_'+each_line][data_index] == 1) &
(final_output_abundances['flux_A_'+each_line][data_index] > 0.03) &
(final_output_abundances['A_'+each_line][data_index] > -3)
):
useful_line_bitmask.append(2**each_index)
useful_line_measurements.append(final_output_abundances['A_'+each_line][data_index] - abundance_zeropoints['A_'+each_line][0] - final_output_abundances['fe_h'][data_index])
useful_line_uncertainties.append(final_output_data['cov_e_'+each_line][data_index])
useful_line_bitmask=np.array(useful_line_bitmask)
useful_line_measurements=np.array(useful_line_measurements)
useful_line_uncertainties=np.array(useful_line_uncertainties)
if len(useful_line_measurements) >= 1:
upper_limit = np.where(np.min(useful_line_measurements) == useful_line_measurements)[0][0]
return(useful_line_measurements[upper_limit],useful_line_uncertainties[upper_limit],useful_line_bitmask[upper_limit],1)
else:
return(np.nan,np.nan,np.nan,np.nan)
for each_element in converter.keys():
(final_output_data[each_element+'_fe'], final_output_data['e_'+each_element+'_fe'], final_output_data['nr_'+each_element+'_fe'], final_output_data['flag_'+each_element+'_fe']) = np.array([combine_line_measurements_for_element(each_element, final_output_data, final_output_abundances, data_index) for data_index in range(len(final_output_abundances['sobject_id']))]).T
print('Uncertainty is calculated from sqrt(1/sum(1/unc**2)) only, but neglecting the line-by-line variance for now!')
return(final_output_data, final_output_abundances)
def photometric_binarity_cool_outlier_flag(final_output_data):
"""
This function loads isochrones for each [Fe/H] and checks
if the estimated Teff is cooler than the one for the oldest most metal-rich one
The flag is useful to identify MS binaries with logg>4
or unreliably cool stars for logg<4
"""
try:
isos = np.load('VAC_ages/Parsec_isochrones.npy')
except:
isos = np.load('VAC_ages/Parsec_isochrones.npy',fix_imports=True,allow_pickle=True,encoding='latin1')
is_photometric_binary = np.zeros(len(final_output_data['fe_h']),dtype=bool)
for each_iso_index, each_iso_feh in enumerate([isos[i].FeH for i in range(len(isos)-1)]):
# We only employ this model for the non-metal-poor stars, because
# for the metal-poor stars, we have already used the adjusted oldest isochrone
if each_iso_feh >= -1.0:
in_eiso_feh_bin = (final_output_data['fe_h'] > isos[each_iso_index].FeH) & (final_output_data['fe_h'] <= isos[each_iso_index+1].FeH)
if each_iso_index == 0:
in_eiso_feh_bin = (final_output_data['fe_h'] <= each_iso_feh)
if each_iso_index == len(isos)-1:
in_eiso_feh_bin = (final_output_data['fe_h'] > each_iso_feh)
# find the RGB tip of each isochrone by finding the first EEP below logg~2dex where the logg value of the next EEP is larger
rgb_tip = np.where(
(isos[each_iso_index].data[-1]['logG'][:-1] < 2) &
(isos[each_iso_index].data[-1]['logG'][:-1] < isos[each_iso_index].data[-1]['logG'][1:]))[0][0]
binary_identification_function = scipy.interpolate.interp1d(
isos[each_iso_index].data[-1]['logG'][:rgb_tip],
10**isos[each_iso_index].data[-1]['logTeff'][:rgb_tip],
bounds_error=False,
fill_value = (10**isos[each_iso_index].data[-1]['logTeff'][rgb_tip],10**isos[each_iso_index].data[-1]['logTeff'][0])
#10**isos[each_iso_index].data[-1]['logTeff'][0],10**isos[each_iso_index].data[-1]['logTeff'][rgb_tip]]]
)
binary_identification_teff_lower = binary_identification_function(
np.array(final_output_data['logg'][in_eiso_feh_bin]-0.15)
)
binary_identification_teff_upper = binary_identification_function(
np.array(final_output_data['logg'][in_eiso_feh_bin]+0.15)
)
identified_binary_lower = final_output_data['teff'][in_eiso_feh_bin]+150 < binary_identification_teff_lower
identified_binary_upper = final_output_data['teff'][in_eiso_feh_bin]+150 < binary_identification_teff_upper
identified_binary = np.array(identified_binary_lower & identified_binary_upper)
arange = np.arange(len(final_output_data['teff']))
arange_identified_binary = (arange[in_eiso_feh_bin])[identified_binary]
is_photometric_binary[arange_identified_binary] = True
return is_photometric_binary
def cool_outlier_flag(final_output_data):
# Here we read in a cutoff isochrone to decide which metal-poor stars are not reliable
try:
cutoff_isochrone = np.loadtxt('../input/MP_cutoff_isochrone.txt')
except:
isos = np.load('VAC_ages/Parsec_isochrones.npy')
most_luminous_index = np.where(isos[0].data[-1]['logG'] == np.min(isos[0].data[-1]['logG']))[0]
up_to_rgb_tip = np.arange(most_luminous_index[0])
iso_change_1 = 28 # points from oldest, most metal-poor isochrone with teff-500 and logg-0.3
iso_change_2 = 42 # points from oldest, most metal-poor isochrone with teff-500 and logg+0.3
cutoff_isochrone = np.concatenate(([
np.array([[10**isos[0].data[-1]['logTeff'][0]-1000,
isos[0].data[-1]['logG'][0]]]),
np.array([10**isos[0].data[-1]['logTeff'][up_to_rgb_tip[:iso_change_1]]-500,
isos[0].data[-1]['logG'][up_to_rgb_tip[:iso_change_1]]-0.3]).T,
np.array([10**isos[0].data[-1]['logTeff'][up_to_rgb_tip[iso_change_2:]]-500,
isos[0].data[-1]['logG'][up_to_rgb_tip[iso_change_2:]]+0.3]).T,
np.array([[10**isos[0].data[-1]['logTeff'][up_to_rgb_tip[-1]]-500,
isos[0].data[-1]['logG'][up_to_rgb_tip[-1]]-1.3]])
]))
np.savetxt('../input/MP_cutoff_isochrone.txt',cutoff_isochrone,fmt='%s')
cutoff_isochrone_function = scipy.interpolate.interp1d(cutoff_isochrone[:,1],cutoff_isochrone[:,0])
cutoff_teff = cutoff_isochrone_function(np.array(final_output_data['logg']))
cool_outlier = photometric_binarity_cool_outlier_flag(final_output_data)
unreliable_cool_mp_stars = (
((final_output_data['teff'] < cutoff_teff) & (final_output_data['fe_h'] < -1.2)) |
(cool_outlier & (final_output_data['logg'] < 4.0))
)
return(unreliable_cool_mp_stars)
def apply_sp_flags(final_output_data, final_output_abundances):
get_bin = lambda x, n: format(int(x), 'b').zfill(n)
sme_idl_bitmask = np.array([get_bin(x, 10) for x in final_output_data['flag_sp']])
# 0 : alright
#+ 1 : convergence == non-finite SPs
#+ 2 : grid limit reached
#+ 4 : Gaussian RV fit failed
#+ 8 : ELLI mass estimated failed
#+16 : Timeout on ISAAC
red_bitmask = np.array([get_bin(x, 10) for x in final_output_data['red_flag']])
# 0 : for no flags,
#+ 1 : for bad wavelength solution in ccd_1,
#+ 2 : for bad wavelength solution in ccd_2,
#+ 4 : for bad wavelength solution in ccd_3,
#+ 8 : for bad wavelength solution in ccd_4,
#+16 : for molecfit fail in ccd_3,
#+32 : for molecfit fail in ccd_4,
#+64 : if the object is actually a twilight flat.
# tsne_old is a combination of tsne_class_1_0.fits and tsne_classification_dr52_2018_04_09.csv
tsne_old = Table.read('../input/tSNE/tsne_flags_combined.fits')
# CMP giants
# HaHb emission
# binary
# hot stars
# mol. abs. bands
# problematic - ccd2 continuum
# problematic - ccd3 continuum
# problematic - ccd3 spikes
# problematic - ccd4
# problematic - ccd4 oversubtraction
# problematic - ccd4 spikes
# problematic - ccd4 strong spike
# problematic - neg. flux
# problematic - osc. cont.
# Dictionary:
#
# BIN - Binary
# HAE - Halpha emission
# HBE - Hbeta emission
# WRD - weird (very general)
# MAB - Molecular absorption bands
# CMP - Cool metal poor giants
# HOT - Hot stars:)
# HFR - Hot fast rotators
# TAB - Telluric absorption
# TEM - Telluric emission
# SPI - Spikes
# NTR - Nothing to report
# CEH - Carbon-enhanced (SWAN bands)
#
binary_old = (
(tsne_old['class_norm'] == 'binary') |
(tsne_old['class_red'] == 'binary') |
(tsne_old['tsne'] == 'BIN')
)
emission_old = (
(tsne_old['class_red'] == 'HaHb emission') |
(tsne_old['class_norm'] == 'HaHb emission') |
(tsne_old['tsne'] == 'HAE') |
(tsne_old['tsne'] == 'HBE')
)
reduction_old = (
(tsne_old['class_red'] == 'problematic') |
(tsne_old['class_norm'] == 'problematic - ccd2 continuum') |
(tsne_old['class_norm'] == 'problematic - ccd3 continuum') |
(tsne_old['class_norm'] == 'problematic - ccd3 spikes') |
(tsne_old['class_norm'] == 'problematic - ccd4') |
(tsne_old['class_norm'] == 'problematic - ccd4 oversubtraction') |
(tsne_old['class_norm'] == 'problematic - ccd4 spikes') |
(tsne_old['class_norm'] == 'problematic - ccd4 strong spike') |
(tsne_old['class_norm'] == 'problematic - neg. flux') |
(tsne_old['class_norm'] == 'problematic - osc. cont.') |
(tsne_old['tsne'] == 'WRD') |
(tsne_old['tsne'] == 'TAB') |
(tsne_old['tsne'] == 'TEM') |
(tsne_old['tsne'] == 'SPI')
)
tsne_new = Table.read('../input/tSNE/tsne_classification_dr53_2018_12_30.csv')
bins = (tsne_new['classification'] == 'BIN')
emission = (
(tsne_new['classification'] == 'HAE') |
(tsne_new['classification'] == 'HBE')
)
reduction = (
(tsne_new['classification'] == 'WRD') |
(tsne_new['classification'] == 'TAB') |
(tsne_new['classification'] == 'TEM') |
(tsne_new['classification'] == 'SPI')
)
tsne_hot = Table.read('../input/tSNE/hot_stars_binary_emission_2020_02_07.csv')
bins_hot = (tsne_hot['classification'] == 'BIN')
emission_hot = (tsne_hot['classification'] == 'EMISSION')
np.savetxt('../input/tsne_binaries.txt', np.unique(np.concatenate((np.array(tsne_old['sobject_id'][binary_old]),np.array(tsne_new['sobject_id'][bins]),np.array(tsne_hot['sobject_id'][bins_hot])))), fmt='%s')
tsne_binary = np.loadtxt('../input/tSNE/tsne_binaries.txt',dtype=np.int)
np.savetxt('../input/tsne_emission.txt', np.unique(np.concatenate((np.array(tsne_old['sobject_id'][emission_old]),np.array(tsne_new['sobject_id'][emission]),np.array(tsne_hot['sobject_id'][emission_hot])))), fmt='%s')
tsne_emission = np.loadtxt('../input/tSNE/tsne_emission.txt',dtype=np.int)
np.savetxt('../input/tsne_reduction_issues.txt', np.unique(np.concatenate((np.array(tsne_old['sobject_id'][reduction_old]),np.array(tsne_new['sobject_id'][reduction])))), fmt='%s')
tsne_reduction = np.loadtxt('../input/tSNE/tsne_reduction_issues.txt',dtype=np.int)
bitmask_sp = np.zeros(len(final_output_data['sobject_id']),dtype=np.int)
print('Applying the following flags:')
def raise_bitmask(bit, position):
#print(position)
bitmask_sp[position] += bit
# Raise bitmask 1
print(' 1: RUWE > 1.4 (bad astrometric solution)')
raise_bitmask(1, final_output_data['ruwe'] > 1.4)
# Raise bitmask 2
print(' 2: unreliable broadening')
raise_bitmask(2, (
(final_output_data['vbroad'] <= 3.) |
(final_output_data['vbroad'] >= 100.))
)
# Raise bitmask 4
print(' 4: Low S/N (below 5 or 10? below this value for all CCDs or just CCD2 as SNR-tracer?)')
raise_bitmask(4, final_output_data['snr_c2_iraf'] <= 10)
# Raise bitmask 8
print(' 8: reduction issue:')
print(' a) Wavelength solution (propagating of red_flag)')
print(' b) t-SNE projected reduction issues: Negative/positive fluxes, spikes, etc.')
raise_bitmask(8, np.array([(x[-1]=='1') | (x[-2]=='1') | (x[-3]=='1') | (x[-4]=='1') | (y in tsne_reduction) for (x, y) in zip(red_bitmask, final_output_data['sobject_id'])]))
# Raise bitmask 16
print(' 16: t-SNE projected emission features')
raise_bitmask(16, np.array([each in tsne_emission for each in final_output_data['sobject_id']]))
# Raise bitmask 32
print(' 32: t-SNE projected binaries')
raise_bitmask(32, np.array([each in tsne_binary for each in final_output_data['sobject_id']]))
# Raise bitmask 64
print(' 64: Photometric binarity flag')
is_photometric_binary = photometric_binarity_cool_outlier_flag(final_output_data)
raise_bitmask(64, ((is_photometric_binary == True) & (final_output_data['logg'] >= 4.0)))
# Raise bitmask 128
print(' 128: SNR-dependent high SME chi2 (bad fit) / FYI: median chi2_sp is 0.748')
raise_bitmask(128, final_output_data['chi2_sp'] > np.exp(0.08*final_output_data['snr_c2_iraf'])+0.1*final_output_data['snr_c2_iraf'])
"""
Karin's suggestion:
if not using chi2_limit(snr, teff), go only for selecting the definite outliers
raise_bitmask(16, final_output_data['chi2_sp'] > np.exp(0.08*final_output_data['snr_c2_iraf'])+0.1*final_output_data['snr_c2_iraf'])
My suggestion:
raise_bitmask(16, final_output_data['chi2_sp'] / 0.35 > 3. * np.exp(1/90.*final_output_data['snr_c2_iraf']))
which cuts away most cool giants...
Morgan's suggestion:
(chi2_sp - (0.75 - 1) / 10 * ln(snr_c2_iraf)) / (((.005)/60.) * snr_c2_iraf * snr_c2_iraf + 0.3) > 3. * chi2_sp
"""
"""
t-SNE flags as taken from "tsne_classification_dr52_2018_04_09.csv"
TO BE UPDATED WITH TRAVEN'S NEW DR5.3 FLAGS
"""
# Raise bitmask 256
print(' 256: Problems with Fe lines, where lineflux is not between 0.03 and 1.00 or Teff < 4100K')
raise_bitmask(256,
(
(final_output_data['teff'] <= 4100) |
(final_output_abundances['flux_A_Fe'] <= 0.03) |
(final_output_abundances['flux_A_Fe'] > 1.00)
) |
np.isnan(final_output_data['fe_h']) |
(final_output_data['fe_h'] > 1.0)
)
# Raise bitmask 512')
print(' 512: sme did not finish')
print(' a) no convergence == non-finite SPs')
print(' b) Gaussian RV fit failed')
print(' c) Timeout on ISAAC')
raise_bitmask(512, np.array([((x[-1]=='1') | (x[-3]=='1') | (x[-4]=='1') |(x[-5]=='1') ) for x in sme_idl_bitmask]))
unreliable_cool_mp_stars = cool_outlier_flag(final_output_data)
# Raise bitmask 1024
print('1024: MARCS grid limit reached or outside of reasonable parameter range')
raise_bitmask(1024, (
np.array([x[-2]=='1' for x in sme_idl_bitmask]) |
(final_output_data['fe_h_atmo'] > 1.0) |
(final_output_data['fe_h'] > 1.0) |
(final_output_data['teff'] <=3000) |
# This is the clump of metal-poor stars below Teff~4000 K
((final_output_data['teff'] <=4000) & (final_output_data['fe_h'] <=-1.0)) |
# The aforementioned unreliable cool MP stars
(unreliable_cool_mp_stars == True)
)
)
adjust_fe_h = (final_output_data['fe_h'] >= 1.0)
final_output_data['fe_h'][adjust_fe_h] = 1.0
adjust_fe_h = (final_output_data['teff'] < 3000.)
final_output_data['teff'][adjust_fe_h] = 3000.
final_output_data['flag_sp'] = bitmask_sp
return(final_output_data, final_output_abundances)
def apply_ab_flags(final_output_data, final_output_abundances):
get_bin = lambda x, n: format(int(x), 'b').zfill(n)
for each_key in final_output_abundances.keys():
if each_key[0:2]=='A_':
each_element = each_key[2:]
if each_element == 'Fe':
element = 'fe_h'
print('Applying the following flags:')
print(' 1: Upper limit')
print(' 2: Bad chi2 fit')
print(' 4: Saturation')
print(' 8: Bad wavelength solution / rv for Li6708')
print('16: Bad stellar parameter flag (>= 128)')
print('32: No abundance value at all')
else:
element = each_element
old_bitmask = np.array([get_bin(x, 10) for x in final_output_data['flag_'+element]])
bitmask_element = np.zeros(len(final_output_data['sobject_id']),dtype=np.int)
def raise_bitmask(bit, position):
#print(position)
bitmask_element[position] += bit
# Raise bitmask 1
# - upper limit
raise_bitmask(1, np.array([x[-1]=='1' for x in old_bitmask]))
# Raise bitmask 2
# - bad chi2 fit
raise_bitmask(2, np.array([x[-2]=='1' for x in old_bitmask]))
# Raise bitmask 4
# - saturation
try:
opt_parms = np.load('../validation/repeat_observations/reference_uncertainties/repeats_'+element+'.npy')
except:
print('No reference repeat uncertainty for '+str(element))
opt_parms = np.array([0.,0.,0.])
def snr_sigma_func(t, A, K, C):
return np.abs(A) * np.exp(- K * t) + np.abs(C)
snr_in = final_output_data['snr_c2_iraf'].clip(min=0)
raise_bitmask(4, np.where(
(final_output_data['cov_e_'+element] >= 0.3) |
(final_output_data['cov_e_'+element] >= 2*snr_sigma_func(snr_in,*opt_parms))
)
)
# Old version used for GALAH DR3 190330
# raise_bitmask(4, np.where(
# (final_output_data['cov_e_'+element] >= 0.3) |
# (final_output_data['cov_e_'+element] >= 3./final_output_data['snr_c2_iraf'] + 0.01)
# )
#)
if element == 'Li':
# Raise bitmask 8
# - bad wavelength solution / rv for Li6708
# - cool dwarfs with subsolar A(Li)
raise_bitmask(8,
(
(np.abs(final_output_data['rv_6708']) > 10) |
(
(final_output_data['teff'] < 4500) &
(final_output_data['logg'] > 4.0) &
(final_output_data['Li6708_fe'] + final_output_data['fe_h'] < 0.0)
)
)
)
# Raise bitmask 16
# - flag_sp above 128
raise_bitmask(16, np.where(
(final_output_data['flag_sp'] >= 128)
)
)
if element != 'fe_h':
# Raise bitmask 32
# - no abundance value at all
raise_bitmask(32, np.isnan(final_output_abundances['A_'+element]))
final_output_data['flag_'+element] = bitmask_element
return(final_output_data, final_output_abundances)
def adjust_unreliable_detections(final_output_data):
def apply_nanning(element, adjust_detection):
adjust_detection_plus = (
adjust_detection
)
final_output_data[element+'_fe'][adjust_detection_plus] = np.nan
final_output_data['e_'+element+'_fe'][adjust_detection_plus] = np.nan
final_output_data['flag_'+element+'_fe'][adjust_detection_plus] = 32
for elem in ['alpha','LiI','CI','OI','NaI','MgI','AlI','SiI','KI','CaI','ScI','TiI','TiII','VI','CrI','MnI','CoI','NiI','CuI','ZnI','RbI','SrI','YII','ZrI','MoI','RuI','BaII','LaII','CeII','NdII','SmII','EuII']:
adjust_detection = (
~(np.isfinite(final_output_data[elem+'_fe']))
)
apply_nanning(elem,adjust_detection)
adjust_detection = (
(
(final_output_data['flag_'+elem+'_fe']==32) |
(final_output_data['flag_'+elem+'_fe']==64)
)
)
apply_nanning(elem,adjust_detection)
#Li
#adjust_detection = (
# ~((final_output_data['LiI_fe'] < 5.))
#)
#apply_nanning('LiI',adjust_detection)
#C
adjust_detection = (
~((final_output_data['CI_fe'] < 3.))
)
apply_nanning('CI',adjust_detection)
#O
adjust_detection = (
~((final_output_data['OI_fe'] < 2.5))
)
apply_nanning('OI',adjust_detection)
#Na
adjust_detection = (
~((final_output_data['NaI_fe'] > -1.5) & (final_output_data['NaI_fe'] < 2.))
)
apply_nanning('NaI',adjust_detection)
#Mg
adjust_detection = (
~((final_output_data['MgI_fe'] > -1.5) & (final_output_data['MgI_fe'] < 1.5))
)
apply_nanning('MgI',adjust_detection)
#Al
adjust_detection = (
~((final_output_data['AlI_fe'] < 2.5))
)
apply_nanning('AlI',adjust_detection)
#Si
adjust_detection = (
~(
(final_output_data['SiI_fe'] < 1.5) &
(final_output_data['SiI_fe'] > -1.)
)
)
apply_nanning('SiI',adjust_detection)
#K
#Ca
adjust_detection = (
~(
(final_output_data['CaI_fe'] < 2.25) &
(final_output_data['CaI_fe'] > -1.9)
)
)
apply_nanning('CaI',adjust_detection)
#Sc
adjust_detection = (
~(
(final_output_data['ScI_fe'] < 1.5) &
(final_output_data['ScI_fe'] > -1.25)
)
)
apply_nanning('ScI',adjust_detection)
#Ti
adjust_detection = (
~((final_output_data['TiI_fe'] < 2.5))
)
apply_nanning('TiI',adjust_detection)
#Ti2
adjust_detection = (
~((final_output_data['TiII_fe'] < 1.5))
)
apply_nanning('TiII',adjust_detection)
#V
adjust_detection = (
~((final_output_data['VI_fe'] < 2.0))
)
apply_nanning('VI',adjust_detection)
#Cr
adjust_detection = (
~(
(final_output_data['CrI_fe'] < 1.5)&
(final_output_data['CrI_fe'] > -2.0)
)
)
apply_nanning('CrI',adjust_detection)
#Mn
adjust_detection = (
~((final_output_data['MnI_fe'] < 1.75))
)
apply_nanning('MnI',adjust_detection)
#Co
adjust_detection = (
~((final_output_data['CoI_fe'] < 4.))
)
apply_nanning('CoI',adjust_detection)
#Ni
adjust_detection = (
~(
(final_output_data['NiI_fe'] < 1.5) &
(final_output_data['NiI_fe'] > -1.5)
)
)
apply_nanning('NiI',adjust_detection)
#Cu
adjust_detection = (
~(
(final_output_data['CuI_fe'] < 1.5) &
(final_output_data['CuI_fe'] > -1.5)
)
)
apply_nanning('CuI',adjust_detection)
#Zn
adjust_detection = (
~(
(final_output_data['ZnI_fe'] < 1.5) &
(final_output_data['ZnI_fe'] > -1.5)
)
)
apply_nanning('ZnI',adjust_detection)
#Rb
adjust_detection = (
~(
(final_output_data['RbI_fe'] < 2.0) &
(final_output_data['RbI_fe'] > -1.0)
)
)
apply_nanning('RbI',adjust_detection)
#Sr
adjust_detection = (
~((final_output_data['SrI_fe'] < 2.0))
)
apply_nanning('SrI',adjust_detection)
#Y
adjust_detection = (
~((final_output_data['YII_fe'] < 2.5))
)
apply_nanning('YII',adjust_detection)
#Zr
adjust_detection = (
~((final_output_data['ZrI_fe'] < 2.5))
)
apply_nanning('ZrI',adjust_detection)
#Mo
adjust_detection = (
~((final_output_data['MoI_fe'] < 3.))
)
apply_nanning('MoI',adjust_detection)
#Ru
adjust_detection = (
~((final_output_data['RuI_fe'] < 2.))
)
apply_nanning('RuI',adjust_detection)
#Ba
adjust_detection = (
~((final_output_data['BaII_fe'] < 2.75) & (final_output_data['BaII_fe'] > -2.))
)
apply_nanning('BaII',adjust_detection)
#La
adjust_detection = (
# only giants and subdwarfs
~((final_output_data['LaII_fe'] < 2.5))
)
apply_nanning('LaII',adjust_detection)
#Ce
adjust_detection = (
# only cooler than 5500 K
~((final_output_data['CeII_fe'] < 2.5))
)
apply_nanning('CeII',adjust_detection)
#Nd
adjust_detection = (
# only giants
~((final_output_data['NdII_fe'] < 3.))
)
apply_nanning('NdII',adjust_detection)
#Sm
adjust_detection = (
# only giants and subdwarfs
~((final_output_data['SmII_fe'] < 2.25))
)
apply_nanning('SmII',adjust_detection)
#Eu
adjust_detection = (
# only giants and subdwarfs
~((final_output_data['EuII_fe'] < 2.5))
)
apply_nanning('EuII',adjust_detection)
return(final_output_data)
def adjust_unreliable_upper_limits(final_output_data):
"""
These adjustments are based on outcome of
GALAH_DR3/validation/upper_limits/dr3_upper_limit_validation.ipynb
subgroups = dict()
subgroups['Coolest_MS'] = np.array([3000,4500, 4.00,5.25])
subgroups['Cool_MS'] = np.array([4500,5500, 4.00,5.25])
subgroups['Warm_MS'] = np.array([5500,6600, 3.50,5.25])
subgroups['Hot_MS'] = np.array([6600,8000, 3.50,5.25])
subgroups['Hot_Lum1'] = np.array([5500,6600,-0.50,3.50])
subgroups['Hot_Lum2'] = np.array([6600,8000,-0.50,3.50])
subgroups['Subgiants'] = np.array([4250,5500, 3.50,4.00])
subgroups['RGB_tip'] = np.array([4250,5500, 3.00,3.50])
subgroups['1st_RC'] = np.array([4250,5500, 2.20,2.60])
subgroups['RGB_L1'] = np.array([4250,5500, -0.5,2.20])
subgroups['RGB_L2'] = np.array([4000,4250, -0.5,2.20])
subgroups['RGB_L3'] = np.array([3750,4000, -0.5,2.20])
subgroups['RGB_L4'] = np.array([3000,3750, -0.5,2.20])
subgroups['Bump'] = np.array([4250,4900, 2.60,3.00])
subgroups['2nd_RC'] = np.array([4900,5500, 2.60,3.00])
subgroups['PMS'] = np.array([3000,4250, 2.20,4.00])
"""
def apply_upper_limit(element, adjust_upper_limit):
adjust_upper_limit_plus = (
adjust_upper_limit &
bitmask.bitfield_to_boolean_mask(np.array(final_output_data['flag_'+element+'_fe'],dtype=int))
)
final_output_data[element+'_fe'][adjust_upper_limit_plus] = np.nan
final_output_data['e_'+element+'_fe'][adjust_upper_limit_plus] = np.nan
final_output_data['flag_'+element+'_fe'][adjust_upper_limit_plus] = 32
#Li
#Adjusted flag_li_fe for subsolar A(Li) for Coolest_MS
#Li
adjust_upper_limit = (
~((final_output_data['LiI_fe'] < 5.))
)
apply_upper_limit('LiI',adjust_upper_limit)
#C
adjust_upper_limit = (
~(
(
(final_output_data['teff'] > 5250) |
(
(final_output_data['logg'] > 3) &
(final_output_data['teff'] > 5000)
)
) &
(final_output_data['CI_fe'] < 3.)
)
)
apply_upper_limit('CI',adjust_upper_limit)
#O
adjust_upper_limit = (
~(
(
(final_output_data['teff'] > 4250) |
(final_output_data['logg'] > 2.5)
) &
(final_output_data['OI_fe'] < 2.5)
)
)
apply_upper_limit('OI',adjust_upper_limit)
#Na
adjust_upper_limit = (
~(
(final_output_data['teff'] < 6600) &
(final_output_data['NaI_fe'] > -1.5) &
(final_output_data['NaI_fe'] < 2.)
)
)
apply_upper_limit('NaI',adjust_upper_limit)
#Mg
adjust_upper_limit = (
~(
(
(final_output_data['teff'] < 6600) &
(final_output_data['MgI_fe'] < -final_output_data['fe_h'] - 0.5)
) &
(final_output_data['MgI_fe'] > -1.5) &
(final_output_data['MgI_fe'] < 1.5)
)
)
apply_upper_limit('MgI',adjust_upper_limit)
#Al
adjust_upper_limit = (
~(
(final_output_data['teff'] < 6600) &
(final_output_data['AlI_fe'] < 2.5)
)
)
#Si
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['SiI_fe'] < -0.9*final_output_data['fe_h'] - 0.5) &
(final_output_data['SiI_fe'] < 1.5)
)
)
apply_upper_limit('SiI',adjust_upper_limit)
#K
adjust_upper_limit = (
~(
(final_output_data['KI_fe'] < -1.)
)
)
apply_upper_limit('KI',adjust_upper_limit)
#Ca
adjust_upper_limit = (
~(
(final_output_data['teff'] < 6600)
)
)
apply_upper_limit('CaI',adjust_upper_limit)
#Sc
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['ScI_fe'] < -final_output_data['fe_h'] - 0.5) &
(final_output_data['ScI_fe'] < 1.5)
)
)
apply_upper_limit('ScI',adjust_upper_limit)
#Ti
adjust_upper_limit = (
~(
(final_output_data['TiI_fe'] < 2.5)
)
)
apply_upper_limit('TiI',adjust_upper_limit)
#Ti2
adjust_upper_limit = (
~(
(final_output_data['TiII_fe'] < 1.5)
)
)
apply_upper_limit('TiII',adjust_upper_limit)
#V
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 5500) &
(final_output_data['logg'] < 3.5) &
(final_output_data['VI_fe'] < -final_output_data['fe_h']) &
(final_output_data['VI_fe'] < 2.0) &
(
(final_output_data['fe_h'] < -0.75) |
(final_output_data['VI_fe'] < 1.)
)
)
)
apply_upper_limit('VI',adjust_upper_limit)
#Cr
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['CrI_fe'] < -final_output_data['fe_h'] - 0.5) &
(final_output_data['CrI_fe'] < 1.5) &
(final_output_data['CrI_fe'] > -2.0)
)
)
apply_upper_limit('CrI',adjust_upper_limit)
#Mn
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['MnI_fe'] < 1.75) &
(
(final_output_data['fe_h'] < -0.75) &
(final_output_data['MnI_fe'] < -final_output_data['fe_h'] - 0.5)
)
)
)
apply_upper_limit('MnI',adjust_upper_limit)
#Co
adjust_upper_limit = (
~(
(final_output_data['teff'] < 6600) &
(final_output_data['CoI_fe'] < 4.)
)
)
apply_upper_limit('CoI',adjust_upper_limit)
#Ni
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['NiI_fe'] < 1.) &
(final_output_data['NiI_fe'] > -1.5) &
(final_output_data['NiI_fe'] < -final_output_data['fe_h'] - 0.5)
)
)
apply_upper_limit('NiI',adjust_upper_limit)
#Cu
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['CuI_fe'] < 1.5) &
(final_output_data['CuI_fe'] < -final_output_data['fe_h'] - 0.5)
)
)
apply_upper_limit('CuI',adjust_upper_limit)
#Zn
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
# MP or MSTO star (neither giant nor subgiant)
(
(final_output_data['teff'] > 5500) |
(final_output_data['logg'] > 4.0) |
(final_output_data['fe_h'] < -0.75)
) &
(final_output_data['ZnI_fe'] < 1.5) &
(final_output_data['ZnI_fe'] < -final_output_data['fe_h'] + 0.5))
)
apply_upper_limit('ZnI',adjust_upper_limit)
#Rb
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 5500) &
(final_output_data['RbI_fe'] < 2.0)
)
)
apply_upper_limit('RbI',adjust_upper_limit)
#Sr
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
(final_output_data['SrI_fe'] < 2.0) &
(final_output_data['SrI_fe'] < -final_output_data['fe_h'] + 0.5)
)
)
apply_upper_limit('SrI',adjust_upper_limit)
#Y
adjust_upper_limit = (
~(
(final_output_data['teff'] > 4250) &
(final_output_data['teff'] < 6600) &
# dwarf
(
(final_output_data['teff'] > 5500) |
(final_output_data['logg'] > 4.0)
) &
(final_output_data['YII_fe'] < 2.5) &
(final_output_data['YII_fe'] < -final_output_data['fe_h'])
)
)
apply_upper_limit('YII',adjust_upper_limit)
#Zr
adjust_upper_limit = (
~(
(final_output_data['teff'] < 4250) &
(final_output_data['ZrI_fe'] < 2.5)
)
)
apply_upper_limit('ZrI',adjust_upper_limit)
#Mo
adjust_upper_limit = (
~(
(final_output_data['teff'] < 4250) &
(final_output_data['MoI_fe'] < 3.)
)
)
apply_upper_limit('MoI',adjust_upper_limit)
#Ru
adjust_upper_limit = (
~(
(final_output_data['teff'] < 4250) &
(final_output_data['RuI_fe'] < 2.)
)
)
apply_upper_limit('RuI',adjust_upper_limit)
#Ba
adjust_upper_limit = (
~(
(final_output_data['teff'] < 6600) &
(final_output_data['BaII_fe'] < 2.75) &
(final_output_data['BaII_fe'] > -2.)
)
)
apply_upper_limit('BaII',adjust_upper_limit)
#La
adjust_upper_limit = (
# only giants and subdwarfs
~(
(final_output_data['teff'] < 5500) &
(final_output_data['logg'] < 4.0) &
(final_output_data['LaII_fe'] < 2.5)
)
)
apply_upper_limit('LaII',adjust_upper_limit)
#Ce
adjust_upper_limit = (
# only cooler than 5500 K
~(
(final_output_data['teff'] < 5500) &
(final_output_data['CeII_fe'] < 2.5)
)
)
apply_upper_limit('CeII',adjust_upper_limit)
#Nd
adjust_upper_limit = (
# only giants
~(
(final_output_data['teff'] < 5500) &
(final_output_data['logg'] < 3.5) &
(final_output_data['NdII_fe'] < 3.)
)
)
apply_upper_limit('NdII',adjust_upper_limit)
#Sm
adjust_upper_limit = (
# only giants and subdwarfs
~(
(final_output_data['teff'] < 5500) &
(final_output_data['logg'] < 4.0) &
(final_output_data['SmII_fe'] < 2.25)
)
)
apply_upper_limit('SmII',adjust_upper_limit)
#Eu
adjust_upper_limit = (
# only giants and subdwarfs
~(
(final_output_data['teff'] < 5500) &
(final_output_data['logg'] < 4.0) &
(final_output_data['EuII_fe'] < 2.5)
)
)
apply_upper_limit('EuII',adjust_upper_limit)
return(final_output_data)
def write_to_fits(final_output_data, final_output_abundances,output_filename):
output_pandas = pandas.DataFrame(final_output_data,columns=final_output_data.keys())
output_astropy = Table.from_pandas(output_pandas)
output_astropy.write('processed_files/GALAH_iDR3_'+output_filename+'.fits',overwrite=True)
output_a_pandas = pandas.DataFrame(final_output_abundances,columns=final_output_abundances.keys())
output_a_astropy = Table.from_pandas(output_a_pandas)
output_a_astropy.write('processed_files/GALAH_iDR3_'+output_filename+'_abund.fits',overwrite=True)
return output_astropy, output_a_astropy
###Output
_____no_output_____
###Markdown
Execute
###Code
product_list = []
for each_subset in np.arange(0,66):
if np.shape(glob.glob('sme_result_files/GALAH_10k_'+str(each_subset)+'_lbol_final.fits'))[0] > 0:
product_list.append('10k_'+str(each_subset))
product_list = np.array(product_list)
#product_list = ['10k_59','10k_58','10k_57','10k_56','10k_55','10k_54','10k_53','10k_52','10k_51','10k_50']
product_list2 = np.array([
'180602','180603','180604','180620','180621','180622','180623','180625','180628',
'181221','181222','181223','181224','181225','181226',
'190206','190207','190209','190210','190211','190212','190223','190224','190225'
])
product_list = np.concatenate((product_list,product_list2))
product_list = ['dn']
#product_list = product_list2
#product_list = ['10k_5']
#product_list = ['GBS','seis','OpenClusters','GlobularClusters','random10000','ts_DR2','high_vtot','Li_rich_giants']
print(product_list,len(product_list))
for product_name in product_list:
#create_data_product(product_name)
print(product_name)
product_subsets, product_input_data_path, product_pipeline = get_product_information(product_name)
dr3_output_structure, sme_data, iraf_data, abundance_zeropoints = get_input_data(
product_subsets,
product_input_data_path,
product_pipeline)
(final_output_data, final_output_abundances) = combine_SME_IRAF_to_FINAL(
output_filename = product_name,
product_pipeline=product_pipeline,
sme_data=sme_data,
iraf_data=iraf_data,
dr3_output_structure=dr3_output_structure,
abundance_zeropoints=abundance_zeropoints);
for each_key in ['teff','vbroad','fe_h','fe_h_atmo','rv_galah']:
final_output_data = compute_final_uncertainty(final_output_data, each_key)
# MC sample the uncertainties for logg and compute final logg uncertainty
final_output_data['cov_e_logg'] = compute_logg_uncertainty(final_output_data)
final_output_data = compute_final_uncertainty(final_output_data, 'logg')
(final_output_data, final_output_abundances) = apply_sp_flags(final_output_data, final_output_abundances)
(final_output_data, final_output_abundances) = apply_ab_flags(final_output_data, final_output_abundances)
(final_output_data, final_output_abundances) = compute_final_abundance_uncertainty(final_output_data, final_output_abundances)
(final_output_data,final_output_abundances) = combine_line_by_line(final_output_data, final_output_abundances, abundance_zeropoints)
final_output_data = adjust_unreliable_detections(final_output_data)
final_output_data = adjust_unreliable_upper_limits(final_output_data)
fits_data = write_to_fits(
final_output_data,
final_output_abundances,
output_filename = product_name)
def create_data_product(product_name):
print(product_name)
product_subsets, product_input_data_path, product_pipeline = get_product_information(product_name)
dr3_output_structure, sme_data, iraf_data, abundance_zeropoints = get_input_data(
product_subsets,
product_input_data_path,
product_pipeline)
(final_output_data, final_output_abundances) = combine_SME_IRAF_to_FINAL(
output_filename = product_name,
product_pipeline=product_pipeline,
sme_data=sme_data,
iraf_data=iraf_data,
dr3_output_structure=dr3_output_structure,
abundance_zeropoints=abundance_zeropoints);
for each_key in ['teff','vbroad','fe_h','fe_h_atmo','rv_galah']:
final_output_data = compute_final_uncertainty(final_output_data, each_key)
# MC sample the uncertainties for logg and compute final logg uncertainty
final_output_data['cov_e_logg'] = compute_logg_uncertainty(final_output_data)
final_output_data = compute_final_uncertainty(final_output_data, 'logg')
(final_output_data, final_output_abundances) = apply_sp_flags(final_output_data, final_output_abundances)
(final_output_data, final_output_abundances) = apply_ab_flags(final_output_data, final_output_abundances)
(final_output_data, final_output_abundances) = compute_final_abundance_uncertainty(final_output_data, final_output_abundances)
(final_output_data,final_output_abundances) = combine_line_by_line(final_output_data, final_output_abundances, abundance_zeropoints)
final_output_data = adjust_unreliable_detections(final_output_data)
final_output_data = adjust_unreliable_upper_limits(final_output_data)
fits_data = write_to_fits(
final_output_data,
final_output_abundances,
output_filename = product_name)
multi = True
if multi == True:
import multiprocessing
from multiprocessing import Pool
if __name__ == '__main__':
with Pool(3) as p:
p.map(create_data_product, product_list)
else:
for product_name in product_list:
create_data_product(product_name)
###Output
10k_0
10k_8
10k_16
You are adjusting the SP of 160419005101398
You are adjusting the SP of 160419005101398
Got input data
Got input data
Combining information
Combining information
You are adjusting the SP of 160419005101398
Got input data
Combining information
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
Sampling logg uncertainty
NB: We are leaving out logg for now, because we compute its final uncertainty later
Sampling logg uncertainty
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
NB: We are leaving out logg for now, because we compute its final uncertainty later
Sampling logg uncertainty
|
docs/tutorials/enzyme_modules.ipynb | ###Markdown
Enzyme ModulesAn "enzyme module" is defined as a mechanistic description of a reaction consisting of mass action rate laws for all known reaction steps (Du et al., 2016). In **MASSpy**, enzyme modules are represented by the `EnzymeModule` object.To demonstrate the utility of an `EnzymeModule` object and how it aids in constructing mechanistic models of enzyme behavior, an `EnzymeModule` of hexokinase$^{1, 2}$ is constructed and then merged with a model of glycolysis$^{3}$ for verification. Constructing Enzyme ModulesIn order to construct the `EnzymeModule` of hexokinase, the following information is necessary:1. The enzyme is a monomer.2. The enzyme binding of substrates follows a random sequential mechanism.3. The enzyme experiences product inhibtion and is competitively inhibited by 23DPG when complexed with D-glucose.Total HEX1 Concentration$^2$: $\text{[HEX1]}_{total} = 24 nM = 0.000024 mM$.
###Code
from operator import attrgetter
from mass import MassMetabolite
from mass.enzyme_modules import EnzymeModule
from mass.example_data import create_example_model
# Load the glycolysis and hemoglobin models, then merge them
glycolysis = create_example_model("Glycolysis")
hemoglobin = create_example_model("Hemoglobin")
glyc_hb = glycolysis.merge(hemoglobin, inplace=False)
###Output
Set parameter Username
###Markdown
The `EnzymeModule` is a subclass of the `MassModel`, meaning that it inherits the methods and behaviors of the `MassModel` object. Like a `MassModel`, an `EnzymeModule` object requires a unique identifier in order to be created. Optionally, the `name` and `subsystem` attributes are set during initialization.
###Code
HEX1 = EnzymeModule("HEX1", name="Hexokinase (D-glucose:ATP)",
subsystem="Glycolysis")
###Output
_____no_output_____
###Markdown
Defining the enzyme ligandsThe ligands that interact with the enzyme (e.g. as the substrates, activators, and inhibitors) are created as `MassMetabolite` objects and added to the model.
###Code
glc__D_c = MassMetabolite(
"glc__D_c",
name="D-Glucose",
formula="C6H12O6",
charge=0,
compartment="c")
g6p_c = MassMetabolite(
"g6p_c",
name="D-Glucose 6-phosphate",
formula="C6H11O9P",
charge=-2,
compartment="c")
atp_c = MassMetabolite(
"atp_c",
name="ATP",
formula="C10H12N5O13P3",
charge=-4,
compartment="c")
adp_c = MassMetabolite(
"adp_c",
name="ADP",
formula="C10H12N5O10P2",
charge=-3,
compartment="c")
_23dpg_c = MassMetabolite(
"_23dpg_c",
name="2,3-Disphospho-D-glycerate",
formula="C3H3O10P2",
charge=-5,
compartment="c")
h_c = MassMetabolite(
"h_c",
name="H+",
formula="H",
charge=1,
compartment="c")
HEX1.add_metabolites([glc__D_c, g6p_c, atp_c, adp_c, _23dpg_c, h_c])
###Output
_____no_output_____
###Markdown
Once added to the `EnzymeModule`, ligands can be accessed using the `enzyme_module_ligands` attribute.
###Code
HEX1.enzyme_module_ligands
###Output
_____no_output_____
###Markdown
To keep track of the roles played by various ligands in the module, the `enzyme_module_ligands_categorized` attribute is set. The attribute takes a `dict`, with categories as keys and relevant `MassMetabolite` objects as values. Note that an object can be a part of multiple categories.
###Code
HEX1.enzyme_module_ligands_categorized = {
"substrates": glc__D_c,
"cofactors": atp_c,
"inhibitors": _23dpg_c,
"products": [adp_c, g6p_c, h_c]}
HEX1.enzyme_module_ligands_categorized
###Output
_____no_output_____
###Markdown
For each category, a `cobra.Group` is created containing the relevant objects. Once set, the attribute returns a `cobra.DictList` that contains the categorized groups. The groups and their members are printed as follows:
###Code
for group in HEX1.enzyme_module_ligands_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
substrates: ['glc__D_c']
cofactors: ['atp_c']
inhibitors: ['_23dpg_c']
products: ['adp_c', 'g6p_c', 'h_c']
###Markdown
Defining the enzyme module formsAfter adding `MassMetabolite` objects of ligands to the model, the various forms of the enzyme must be defined. These forms are represented by `EnzymeModuleForm` objects. The `EnzymeModuleForm` object inherits from the `MassMetabolite` and is treated like any other metabolite in the model. However, the `EnzymeModuleForm` object contains the additional `bound_metabolites` attribute to assist in tracking metabolites bound to the enzyme form. The `EnzymeModule.make_enzyme_module_form()` method allows for the creation of an `EnzymeModuleForm` object while assigning categories for the `EnzymeModuleForm` in the process. Using `make_enzyme_module_form()` also adds the species to the module upon creation, accessible via the `EnzymeModule.enzyme_module_forms` attribute.
###Code
hex1_c = HEX1.make_enzyme_module_form(
"hex1_c",
name="automatic",
categories="Active",
compartment="c")
hex1_A_c = HEX1.make_enzyme_module_form(
"hex1_A_c", # A stands complexted with ATP
name="automatic",
categories="Active",
bound_metabolites={atp_c: 1},
compartment="c")
hex1_G_c = HEX1.make_enzyme_module_form(
"hex1_G_c", # G stands for complexed with Glucose
name="automatic",
categories="Active",
bound_metabolites={glc__D_c: 1},
compartment="c")
hex1_AG_c = HEX1.make_enzyme_module_form(
"hex1_AG_c",
name="automatic",
categories="Active",
bound_metabolites={glc__D_c: 1, atp_c: 1},
compartment="c")
hex1_G_CI_c = HEX1.make_enzyme_module_form(
"hex1_G_CI_c", # CI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={glc__D_c: 1, _23dpg_c: 1},
compartment="c")
hex1_A_PI_c = HEX1.make_enzyme_module_form(
"hex1_A_PI_c", # PI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={adp_c: 1},
compartment="c")
hex1_G_PI_c = HEX1.make_enzyme_module_form(
"hex1_G_PI_c", # PI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={g6p_c: 1},
compartment="c")
HEX1.enzyme_module_forms
###Output
_____no_output_____
###Markdown
The `bound_metabolites` attribute represents the ligands bound to the site(s) of enzyme.
###Code
# Print automatically generated names
for enzyme_form in HEX1.enzyme_module_forms:
print("Bound to sites of {0}:\n{1}\n".format(
enzyme_form.id, {
ligand.id: coeff
for ligand, coeff in enzyme_form.bound_metabolites.items()}))
###Output
Bound to sites of hex1_c:
{}
Bound to sites of hex1_A_c:
{'atp_c': 1}
Bound to sites of hex1_G_c:
{'glc__D_c': 1}
Bound to sites of hex1_AG_c:
{'glc__D_c': 1, 'atp_c': 1}
Bound to sites of hex1_G_CI_c:
{'glc__D_c': 1, '_23dpg_c': 1}
Bound to sites of hex1_A_PI_c:
{'adp_c': 1}
Bound to sites of hex1_G_PI_c:
{'g6p_c': 1}
###Markdown
Setting the `bound_metabolites` attribute upon creation allow the `formula` and `charge` attributes of the various forms also to be set while ensuring mass and charge balancing is maintained. Note that the enzyme is represented as a moiety, and the ligands bound to the enzyme are represented in the chemical formula.
###Code
# Get the elemental matrix for the enzyme
df = HEX1.get_elemental_matrix(array_type="DataFrame")
# Use iloc to only look at EnzymeModuleForms
df.iloc[:, 6:]
###Output
_____no_output_____
###Markdown
Setting the `name` argument as "automatic" in the `EnzymeModule.make_enzyme_module_form()` method causes a name for the `EnzymeModuleForm` to be generated based on the metabolites in the `bound_metabolites` attribute.
###Code
# Print automatically generated names
for enzyme_form in HEX1.enzyme_module_forms:
print(enzyme_form.name)
###Output
HEX1
HEX1-atp complex
HEX1-glc__D complex
HEX1-glc__D-atp complex
HEX1-glc__D-_23dpg complex
HEX1-adp complex
HEX1-g6p complex
###Markdown
The `categories` argument allows for `EnzymeModuleForm` objects to be placed into `cobra.Group` objects representing those categories. As with the ligands, the categorized enzyme module forms are returned in a `DictList` of `Group` objects by the `enzyme_module_forms_categorized` attribute.
###Code
for group in HEX1.enzyme_module_forms_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
Active: ['hex1_AG_c', 'hex1_A_c', 'hex1_G_c', 'hex1_c']
Inhibited: ['hex1_A_PI_c', 'hex1_G_CI_c', 'hex1_G_PI_c']
###Markdown
Alternatively, the `enzyme_module_forms_categorized` attribute can be set using a `dict`:
###Code
HEX1.enzyme_module_forms_categorized = {
"competitively_inhibited": hex1_G_CI_c}
for group in HEX1.enzyme_module_forms_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
Active: ['hex1_AG_c', 'hex1_A_c', 'hex1_G_c', 'hex1_c']
Inhibited: ['hex1_A_PI_c', 'hex1_G_CI_c', 'hex1_G_PI_c']
competitively_inhibited: ['hex1_G_CI_c']
###Markdown
Defining enzyme module reactionsThe next step is to define all of the reaction steps that represent the catalytic mechanism and regulation of the enzyme module. These reactions are represented as `EnzymeModuleReaction` objects. The `EnzymeModuleReaction` object inherits from the `MassReaction` and is treated like any other reaction in the model. Like the `make_enzyme_module_form()` method, the `make_enzyme_module_reaction()` method allows for the creation of an `EnzymeModuleReaction` object while assigning categories for the `EnzymeModuleReaction` in the process.Species that exist in the model can also be added to the reaction by providing a dictionary of metabolites and their stoichiometric coefficients to the `metabolites_to_add` argument.
###Code
HEX1_1 = HEX1.make_enzyme_module_reaction(
"HEX1_1",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="product_inhibition",
metabolites_to_add={
"hex1_c": -1,
"adp_c": -1,
"hex1_A_PI_c": 1})
HEX1_2 = HEX1.make_enzyme_module_reaction(
"HEX1_2",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="product_inhibition",
metabolites_to_add={
"hex1_c": -1,
"g6p_c": -1,
"hex1_G_PI_c": 1})
HEX1_3 = HEX1.make_enzyme_module_reaction(
"HEX1_3",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="glc__D_c_binding",
metabolites_to_add={
"hex1_c": -1,
"glc__D_c": -1,
"hex1_G_c": 1})
HEX1_4 = HEX1.make_enzyme_module_reaction(
"HEX1_4",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="atp_c_binding",
metabolites_to_add={
"hex1_c": -1,
"atp_c": -1,
"hex1_A_c": 1})
HEX1_5 = HEX1.make_enzyme_module_reaction(
"HEX1_5",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="competitive_inhibition",
metabolites_to_add={
"hex1_G_c": -1,
"_23dpg_c": -1,
"hex1_G_CI_c": 1})
HEX1_6 = HEX1.make_enzyme_module_reaction(
"HEX1_6",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="atp_c_binding",
metabolites_to_add={
"hex1_G_c": -1,
"atp_c": -1,
"hex1_AG_c": 1})
HEX1_7 = HEX1.make_enzyme_module_reaction(
"HEX1_7",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="glc__D_c_binding",
metabolites_to_add={
"hex1_A_c": -1,
"glc__D_c": -1,
"hex1_AG_c": 1})
HEX1_8 = HEX1.make_enzyme_module_reaction(
"HEX1_8",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="catalyzation",
metabolites_to_add={
"hex1_AG_c": -1,
"hex1_c": 1,
"adp_c": 1,
"g6p_c": 1,
"h_c": 1})
for reaction in HEX1.enzyme_module_reactions:
print(reaction)
###Output
HEX1_1: adp_c + hex1_c <=> hex1_A_PI_c
HEX1_2: g6p_c + hex1_c <=> hex1_G_PI_c
HEX1_3: glc__D_c + hex1_c <=> hex1_G_c
HEX1_4: atp_c + hex1_c <=> hex1_A_c
HEX1_5: _23dpg_c + hex1_G_c <=> hex1_G_CI_c
HEX1_6: atp_c + hex1_G_c <=> hex1_AG_c
HEX1_7: glc__D_c + hex1_A_c <=> hex1_AG_c
HEX1_8: hex1_AG_c <=> adp_c + g6p_c + h_c + hex1_c
###Markdown
The `categories` argument allows for `EnzymeModuleReactions` objects to be placed into `cobra.Group` objects representing those categories. As with the ligands and enzyme forms, a `DictList` of the relevant groups are returned with the `enzyme_module_reactions_categorized` attribute.
###Code
HEX1.enzyme_module_reactions_categorized
###Output
_____no_output_____
###Markdown
Unifying rate parametersFor this `EnzymeModule`, the reactions representing glucose binding to the enzyme and ATP binding to the enzyme have the same forward rate and equilibrium constants. Instead of defining the parameter values for each individual reaction, the `unify_rate_parameters()` method can be used to create custom rate laws for the given reactions that all depend on the same rate parameters.The `unify_rate_parameters()` method takes a list of reactions and an identifier to use for the unified parameter. The `enzyme_prefix` flag can be set to `True` to prefix the new parameter identifier with the identifier of the `EnzymeModule`, ensuring that any existing custom parameters are not overwritten.
###Code
for ligand, pid in zip([glc__D_c, atp_c],["G", "A"]):
# Get the group of reactions corresponding to the ligand
category = "_".join((ligand.id, "binding"))
group = HEX1.enzyme_module_reactions_categorized.get_by_id(category)
# Unify the parameters
HEX1.unify_rate_parameters(
group.members, new_parameter_id=pid, enzyme_prefix=True)
# Print the new reaction rates
print("\n" + category + "\n" + "-" * len(category))
for reaction in sorted(group.members, key=attrgetter("id")):
print(reaction.id + ": " + str(reaction.rate))
###Output
glc__D_c_binding
----------------
HEX1_3: kf_HEX1_G*(glc__D_c(t)*hex1_c(t) - hex1_G_c(t)/Keq_HEX1_G)
HEX1_7: kf_HEX1_G*(glc__D_c(t)*hex1_A_c(t) - hex1_AG_c(t)/Keq_HEX1_G)
atp_c_binding
-------------
HEX1_4: kf_HEX1_A*(atp_c(t)*hex1_c(t) - hex1_A_c(t)/Keq_HEX1_A)
HEX1_6: kf_HEX1_A*(atp_c(t)*hex1_G_c(t) - hex1_AG_c(t)/Keq_HEX1_A)
###Markdown
Determining Enzyme Form Concentrations and Rate ConstantsThe next step is to solve for the steady state concentrations for the various forms of the enzyme symbolically using **SymPy**. Because the numerical values for the dissociation constants have been defined, these equations are solved in terms of the rate constants. The rate constants can be approximated using the total enzyme concentration as a constraint and substituted back into the equations to calculate the numerical values of the steady state concentrations.
###Code
from sympy import Eq, Symbol, lambdify, simplify, solveset
from mass import strip_time
from mass.util.matrix import matrix_rank
###Output
_____no_output_____
###Markdown
Solving steady state concentrations symbolicallyTo get the symbolic solutions for the individual enzyme forms, the ODEs are first collected in a `dict`. Keys are the enzyme forms, and values are their ODEs with the time dependency stripped via the `strip_time` function.
###Code
ode_dict = {
enzyme_form.id: Eq(strip_time(enzyme_form.ode), 0)
for enzyme_form in HEX1.enzyme_module_forms}
# Matrix rank of enzyme stoichiometric matrix without substrates
rank = matrix_rank(HEX1.S[6:])
print("Rank Deficiency: {0}".format(len(ode_dict) - rank))
###Output
Rank Deficiency: 1
###Markdown
Because the stoichiometric matrix (without ligands) has a rank deficiency of one, there is a dependent variable in the system unless another equation is added. Therefore, the completely free enzyme form is treated as the dependent variable, and all of the enzyme forms are solved in terms of the free enzyme form.
###Code
enzyme_solutions = {}
for enzyme_form in HEX1.enzyme_module_forms:
# Skip dependent variable
if enzyme_form.id == "hex1_c":
continue
# Get the ODE for the enzyme form from the ODE dict
equation = ode_dict[enzyme_form.id]
# Solve the equation for the enzyme form, substituting
# previously found enzyme form solutions into the equation
solution = solveset(equation.subs(enzyme_solutions),
enzyme_form.id)
# Store the solution
enzyme_solutions[enzyme_form.id] = list(solution)[0]
# Substitute the new solution into existing solutions
enzyme_solutions.update({
enzyme_form: sol.subs(enzyme_solutions)
for enzyme_form, sol in enzyme_solutions.items()})
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
###Output
_____no_output_____
###Markdown
Defining the Rate EquationTo make up for the rank deficiency, an additional equation is needed. Typically, the rate of the enzyme is the summation of the rates for the catalyzation reaction step(s) of the enzyme. The `make_enzyme_rate_equation()` method can be used to create the rate equation from a list of reactions. If `use_rates=True`, the rate expressions of the reactions are added together. If `update_enzyme=True`, the rate equation is set as a symbolic expression for the `enzyme_rate_equation` attribute.
###Code
# Get the catalyzation reactions
catalyzation_group = HEX1.enzyme_module_reactions_categorized.get_by_id(
"catalyzation")
HEX1.make_enzyme_rate_equation(catalyzation_group.members,
use_rates=True,
update_enzyme=True)
print(HEX1.enzyme_rate_equation)
###Output
kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c(t) - adp_c(t)*g6p_c(t)*hex1_c(t))/Keq_HEX1_8
###Markdown
With the rate equation defined, the `enzyme_rate_error()` method is used to get the equation as the difference between the flux value and the rate equation.
###Code
enzyme_rate_equation = strip_time(HEX1.enzyme_rate_error(use_values=False))
print(enzyme_rate_equation)
###Output
v_HEX1 - kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c - adp_c*g6p_c*hex1_c)/Keq_HEX1_8
###Markdown
The solutions for the enzyme forms are substituted into the rate equation, and the equation is solved for the free enzyme form. The solutions are subsequently updated, resulting in symbolic equations that do not depend on any enzyme form.
###Code
# Solve for last unknown concentration symbolically
solution = solveset(enzyme_rate_equation.subs(enzyme_solutions),
"hex1_c")
# Update solution dictionary with the new solution
enzyme_solutions["hex1_c"] = list(solution)[0]
# Update solutions with free variable solutions
enzyme_solutions = {
enzyme_form: simplify(solution.subs(enzyme_solutions))
for enzyme_form, solution in enzyme_solutions.items()}
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
print(args)
###Output
{Keq_HEX1_5, adp_c, kf_HEX1_8, _23dpg_c, g6p_c, Keq_HEX1_2, Keq_HEX1_8, kf_HEX1_G, v_HEX1, Keq_HEX1_A, Keq_HEX1_1, Keq_HEX1_G, glc__D_c, atp_c, kf_HEX1_A}
###Markdown
Numerical values for known quantities are substituted into the equations. For this `EnzymeModule` of Hexokinase, the following dissociation constants are used:$$\begin{align}K_{d,\ \text{GLC-D}} &= 0.038\ \text{mM} \\K_{d,\ \text{ATP}} &= 2.06\ \text{mM} \\K_{i,\ \text{23DPG}} &= 5.5\ \text{mM} \\K_{i,\ \text{ADP}} &= 1\ \text{mM} \\K_{i,\ \text{G6P}} &= 66.67\ \text{mM} \\\end{align}$$A value of $K_{\text{HEX1}}= 313.12$ is used for the catalyzation step. Note that the inverse of the dissociation constant is used for reactions that form complexes.
###Code
numerical_values = {
"Keq_HEX1_1": 1,
"Keq_HEX1_2": 1 / 66.67,
"Keq_HEX1_G": 1 / 0.038,
"Keq_HEX1_A": 1 / 2.06,
"Keq_HEX1_5": 1 / 5.5,
"Keq_HEX1_8": 313.12}
# Update the model with the parameters
HEX1.update_parameters(numerical_values)
###Output
_____no_output_____
###Markdown
The ligand concentrations and the rate for the enzyme are extracted from the merged glycolysis and hemoglobin model.
###Code
# Get steady state flux for EnzymeModule
HEX1.enzyme_rate = glyc_hb.reactions.get_by_id("HEX1").steady_state_flux
numerical_values[HEX1.enzyme_flux_symbol_str] = HEX1.enzyme_rate
# Get the ligand concentrations
for met in HEX1.enzyme_module_ligands:
concentration = glyc_hb.metabolites.get_by_id(met.id).initial_condition
# Set the ligand initial condition and add to numercal values dictionary
met.initial_condition = concentration
numerical_values[met.id] = concentration
###Output
_____no_output_____
###Markdown
The numerical values are substituted into the symbolic equations, resulting in the steady state concentrations that depend only on the rate constants.
###Code
enzyme_solutions = {
enzyme_form: simplify(sol.subs(numerical_values))
for enzyme_form, sol in enzyme_solutions.items()}
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
print(args)
###Output
{kf_HEX1_G, kf_HEX1_8, kf_HEX1_A}
###Markdown
Approximating Rate ConstantsTo determine the set of rate constants for the enzyme module, the absolute error between the total hexokinase concentration value (found in literature) and the computed hexokinase concentration is minimized. For this example, the `minimize()` function of the **SciPy** package is utilized to find a feasible set of rate constants.
###Code
from scipy.optimize import minimize
###Output
_____no_output_____
###Markdown
The objective function for the minimization is first made symbolically. The `enzyme_total_symbol_str` property can be used to represent the total enzyme concentration, while the `enzyme_concentration_total_equation` property creates a symbolic expression for the sum of all enzyme forms.
###Code
enzyme_total_error = abs(
Symbol(HEX1.enzyme_total_symbol_str)
- strip_time(HEX1.enzyme_concentration_total_equation))
print(enzyme_total_error)
###Output
Abs(-HEX1_Total + hex1_AG_c + hex1_A_PI_c + hex1_A_c + hex1_G_CI_c + hex1_G_PI_c + hex1_G_c + hex1_c)
###Markdown
The `enzyme_concentration_total` attribute stores the total amount of enzyme in the model and substituted into the expression. The total HEX1 concentration is $24 * 10^{-6} \text{mM}$.
###Code
HEX1.enzyme_concentration_total = 24e-6
enzyme_total_error = enzyme_total_error.subs({
HEX1.enzyme_total_symbol_str: HEX1.enzyme_concentration_total})
print(enzyme_total_error)
###Output
Abs(hex1_AG_c + hex1_A_PI_c + hex1_A_c + hex1_G_CI_c + hex1_G_PI_c + hex1_G_c + hex1_c - 2.4e-5)
###Markdown
Finally, the symbolic equations for the enzyme forms are substituted into the enzyme total error equation, resulting in an expression that represents the objective function with the only unknown variables being rate constants. The `lambdify()` function of the **SymPy** package converts the symbolic objective into a lambda function that can be used with the `minimize()` function of **SciPy**.
###Code
enzyme_total_error = simplify(enzyme_total_error.subs(enzyme_solutions))
# Sort the arguments to ensure input format remains consistent
args = sorted(list(map(str, args)))
# Use lambdify to make objective function as a lambda function
obj_fun = lambda x: lambdify(args, enzyme_total_error)(*x)
###Output
_____no_output_____
###Markdown
The `minimize()` function is now used to approximate the rate constants. The optimization problems for enzyme rate constants are typically nonlinear, and require nonlinear optimization routines to find feasible solutions.
###Code
# Minimize the objective function, initial guess based on publication values
initial_guess = [1e8, 9376585, 52001]
variable_bounds = ((0, 1e9), (0, 1e9), (0, 1e9))
solution = minimize(obj_fun, x0=initial_guess,
method="trust-constr",
bounds=variable_bounds)
# Map solution array to variables
rate_constants = dict(zip(args, solution.x))
print(rate_constants)
###Output
{'kf_HEX1_8': 100000000.0025878, 'kf_HEX1_A': 9376585.030755484, 'kf_HEX1_G': 52006.59981223971}
###Markdown
Because the rate constants associated with the inhibition of the enzyme forms are not necessary for computing the concentrations, a rapid binding assumption is made for the inhibition reactions. Therefore, a large number is set for the rate constants. The parameters are set using the `update_parameters()` method.
###Code
rate_constants["kf_HEX1_1"] = 1e6
rate_constants["kf_HEX1_2"] = 1e6
rate_constants["kf_HEX1_5"] = 1e6
HEX1.update_parameters(rate_constants)
###Output
_____no_output_____
###Markdown
Calculating numerical values for concentrationsOnce the rate constants have been estimated, they are substituted back into the symbolic concentration equations in order to obtain their numerical values.
###Code
for enzyme_form, solution in enzyme_solutions.items():
# Get the enzyme form object, determine the steady state concentration
enzyme_form = HEX1.enzyme_module_forms.get_by_id(enzyme_form)
enzyme_form.initial_condition = float(solution.subs(rate_constants))
print("{0}: {1:e}".format(enzyme_form.id,
enzyme_form.initial_condition))
###Output
hex1_A_c: 9.401421e-06
hex1_G_c: 5.718872e-08
hex1_AG_c: 1.174630e-08
hex1_G_CI_c: 3.223364e-08
hex1_A_PI_c: 3.519706e-06
hex1_G_PI_c: 8.847367e-09
hex1_c: 1.213692e-05
###Markdown
Error valuesAs a quality assurance check, the `enzyme_concentration_total_error()` method can be used to get the error between the `enzyme_concentration_total` attribute and the sum of the enzyme form concentrations. A positive value indicates the `enzyme_concentration_total` attribute is greater than the sum of the individual enzyme form concentrations that were computed.
###Code
print("Total Enzyme Concentration Error: {0}".format(
HEX1.enzyme_concentration_total_error(use_values=True)))
###Output
Total Enzyme Concentration Error: -1.1680622689093244e-06
###Markdown
Similarly, the error between the `enzyme_rate` attribute and the computed value from the `enzyme_rate_equation` can be also checked using the `enzyme_rate_error()` method, in which a positive value indicates that the `enzyme_rate` attribute is greater than the value computed when using the rate equation.
###Code
print("Enzyme Rate Error: {0}".format(
HEX1.enzyme_rate_error(use_values=True)))
###Output
Enzyme Rate Error: 4.440892098500626e-16
###Markdown
Adding EnzymeModules to ModelsWith the `EnzymeModule` built, it can be integrated into a larger network and simulated. To add an `EnzymeModule` to an existing `MassModel`, the `merge()` method is used. After merging, the `remove_reactions()` method is used to remove the reaction replaced with the enzyme module. The `EnzymeModule` should always be merged into the `MassModel` as demonstrated below:
###Code
glyc_hb_HEX1 = glyc_hb.merge(HEX1, inplace=False)
glyc_hb_HEX1.remove_reactions([
glyc_hb_HEX1.reactions.get_by_id("HEX1")])
###Output
_____no_output_____
###Markdown
All objects, numerical values, and certain attributes of the `EnzymeModule` are transferred into the `MassModel` upon merging. This includes all enzyme forms, reactions steps, initial conditions, rate parameters, and category groups.
###Code
glyc_hb_HEX1
###Output
_____no_output_____
###Markdown
The EnzymeModuleDict objectDuring the merge process, an `EnzymeModuleDict` is created from the `EnzymeModule` and added to the `MassModel.enzyme_modules` attribute.
###Code
print(glyc_hb_HEX1.enzyme_modules)
HEX1_dict = glyc_hb_HEX1.enzyme_modules.get_by_id("HEX1")
HEX1_dict
###Output
[<EnzymeModuleDict HEX1 at 0x7fc1f08e3dc0>]
###Markdown
The `EnzymeModuleDict` inherits from an `OrderedDict`, thereby inheriting ordered dictionary methods such as `keys()`:
###Code
print("\n".join(HEX1_dict.keys()))
###Output
id
name
subsystem
enzyme_module_ligands
enzyme_module_forms
enzyme_module_reactions
enzyme_module_ligands_categorized
enzyme_module_forms_categorized
enzyme_module_reactions_categorized
enzyme_concentration_total
enzyme_rate
enzyme_concentration_total_equation
enzyme_rate_equation
S
model
###Markdown
The `EnzymeModuleDict` stores several of the enzyme-specific attributes so that they are still accessible after integrating the enzyme module into a larger network. The keys of the `EnzymeModuleDict` also can be treated as attribute accessors:
###Code
print("Enzyme Rate:\n{0} = {1}".format(
HEX1_dict["enzyme_rate"], # Returned using dict key
HEX1_dict.enzyme_rate_equation # Returned using attribute accessor
))
###Output
Enzyme Rate:
1.12 = kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c(t) - adp_c(t)*g6p_c(t)*hex1_c(t))/Keq_HEX1_8
###Markdown
Steady State ValidationThe last step is to ensure that a steady state is reached with the completed enzyme module within a larger network context.
###Code
import matplotlib.pyplot as plt
from mass import Simulation
from mass.visualization import plot_time_profile
###Output
_____no_output_____
###Markdown
Here, the model is simulated, and the enzyme's ability to reach steady state is graphically verified:
###Code
# Setup simulation object
sim = Simulation(glyc_hb_HEX1, verbose=True)
# Simulate from 0 to 1000 with 10001 points in the output
conc_sol, flux_sol = sim.simulate(
glyc_hb_HEX1, time=(0, 1e3))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
plot_time_profile(
conc_sol, observable=HEX1_dict.enzyme_module_forms, ax=ax,
xlim=(1e-3, 1e3),
legend="right outside", plot_function="loglog",
xlabel="Time [hr]", ylabel="Concentration [mM]",
title="TIme profile of Concentrations for Enzyme Forms");
###Output
Successfully loaded MassModel 'Glycolysis_Hemoglobin_HEX1' into RoadRunner.
###Markdown
Enzyme ModulesAn "enzyme module" is defined as a mechanistic description of a reaction consisting of mass action rate laws for all known reaction steps (Du et al., 2016). In **MASSpy**, enzyme modules are represented by the `EnzymeModule` object.To demonstrate the utility of an `EnzymeModule` object and how it aids in constructing mechanistic models of enzyme behavior, an `EnzymeModule` of hexokinase$^{1, 2}$ is constructed and then merged with a model of glycolysis$^{3}$ for verification. Constructing Enzyme ModulesIn order to construct the `EnzymeModule` of hexokinase, the following information is necessary:1. The enzyme is a monomer.2. The enzyme binding of substrates follows a random sequential mechanism.3. The enzyme experiences product inhibtion and is competitively inhibited by 23DPG when complexed with D-glucose.Total HEX1 Concentration$^2$: $\text{[HEX1]}_{total} = 24 nM = 0.000024 mM$.
###Code
from operator import attrgetter
from mass import MassMetabolite
from mass.enzyme_modules import EnzymeModule
from mass.test import create_test_model
# Load the glycolysis and hemoglobin models, then merge them
glycolysis = create_test_model("Glycolysis")
hemoglobin = create_test_model("Hemoglobin")
glyc_hb = glycolysis.merge(hemoglobin, inplace=False)
###Output
_____no_output_____
###Markdown
The `EnzymeModule` is a subclass of the `MassModel`, meaning that it inherits the methods and behaviors of the `MassModel` object. Like a `MassModel`, an `EnzymeModule` object requires a unique identifier in order to be created. Optionally, the `name` and `subsystem` attributes are set during initialization.
###Code
HEX1 = EnzymeModule("HEX1", name="Hexokinase (D-glucose:ATP)",
subsystem="Glycolysis")
###Output
_____no_output_____
###Markdown
Defining the enzyme ligandsThe ligands that interact with the enzyme (e.g. as the substrates, activators, and inhibitors) are created as `MassMetabolite` objects and added to the model.
###Code
glc__D_c = MassMetabolite(
"glc__D_c",
name="D-Glucose",
formula="C6H12O6",
charge=0,
compartment="c")
g6p_c = MassMetabolite(
"g6p_c",
name="D-Glucose 6-phosphate",
formula="C6H11O9P",
charge=-2,
compartment="c")
atp_c = MassMetabolite(
"atp_c",
name="ATP",
formula="C10H12N5O13P3",
charge=-4,
compartment="c")
adp_c = MassMetabolite(
"adp_c",
name="ADP",
formula="C10H12N5O10P2",
charge=-3,
compartment="c")
_23dpg_c = MassMetabolite(
"_23dpg_c",
name="2,3-Disphospho-D-glycerate",
formula="C3H3O10P2",
charge=-5,
compartment="c")
h_c = MassMetabolite(
"h_c",
name="H+",
formula="H",
charge=1,
compartment="c")
HEX1.add_metabolites([glc__D_c, g6p_c, atp_c, adp_c, _23dpg_c, h_c])
###Output
_____no_output_____
###Markdown
Once added to the `EnzymeModule`, ligands can be accessed using the `enzyme_module_ligands` attribute.
###Code
HEX1.enzyme_module_ligands
###Output
_____no_output_____
###Markdown
To keep track of the roles played by various ligands in the module, the `enzyme_module_ligands_categorized` attribute is set. The attribute takes a `dict`, with categories as keys and relevant `MassMetabolite` objects as values. Note that an object can be a part of multiple categories.
###Code
HEX1.enzyme_module_ligands_categorized = {
"substrates": glc__D_c,
"cofactors": atp_c,
"inhibitors": _23dpg_c,
"products": [adp_c, g6p_c, h_c]}
HEX1.enzyme_module_ligands_categorized
###Output
_____no_output_____
###Markdown
For each category, a `cobra.Group` is created containing the relevant objects. Once set, the attribute returns a `cobra.DictList` that contains the categorized groups. The groups and their members are printed as follows:
###Code
for group in HEX1.enzyme_module_ligands_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
substrates: ['glc__D_c']
cofactors: ['atp_c']
inhibitors: ['_23dpg_c']
products: ['adp_c', 'g6p_c', 'h_c']
###Markdown
Defining the enzyme module formsAfter adding `MassMetabolite` objects of ligands to the model, the various forms of the enzyme must be defined. These forms are represented by `EnzymeModuleForm` objects. The `EnzymeModuleForm` object inherits from the `MassMetabolite` and is treated like any other metabolite in the model. However, the `EnzymeModuleForm` object contains the additional `bound_metabolites` attribute to assist in tracking metabolites bound to the enzyme form. The `EnzymeModule.make_enzyme_module_form()` method allows for the creation of an `EnzymeModuleForm` object while assigning categories for the `EnzymeModuleForm` in the process. Using `make_enzyme_module_form()` also adds the species to the module upon creation, accessible via the `EnzymeModule.enzyme_module_forms` attribute.
###Code
hex1_c = HEX1.make_enzyme_module_form(
"hex1_c",
name="automatic",
categories="Active",
compartment="c")
hex1_A_c = HEX1.make_enzyme_module_form(
"hex1_A_c", # A stands complexted with ATP
name="automatic",
categories="Active",
bound_metabolites={atp_c: 1},
compartment="c")
hex1_G_c = HEX1.make_enzyme_module_form(
"hex1_G_c", # G stands for complexed with Glucose
name="automatic",
categories="Active",
bound_metabolites={glc__D_c: 1},
compartment="c")
hex1_AG_c = HEX1.make_enzyme_module_form(
"hex1_AG_c",
name="automatic",
categories="Active",
bound_metabolites={glc__D_c: 1, atp_c: 1},
compartment="c")
hex1_G_CI_c = HEX1.make_enzyme_module_form(
"hex1_G_CI_c", # CI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={glc__D_c: 1, _23dpg_c: 1},
compartment="c")
hex1_A_PI_c = HEX1.make_enzyme_module_form(
"hex1_A_PI_c", # PI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={adp_c: 1},
compartment="c")
hex1_G_PI_c = HEX1.make_enzyme_module_form(
"hex1_G_PI_c", # PI stands for competitive inhibition
name="automatic",
categories="Inhibited",
bound_metabolites={g6p_c: 1},
compartment="c")
HEX1.enzyme_module_forms
###Output
_____no_output_____
###Markdown
The `bound_metabolites` attribute represents the ligands bound to the site(s) of enzyme.
###Code
# Print automatically generated names
for enzyme_form in HEX1.enzyme_module_forms:
print("Bound to sites of {0}:\n{1}\n".format(
enzyme_form.id, {
ligand.id: coeff
for ligand, coeff in enzyme_form.bound_metabolites.items()}))
###Output
Bound to sites of hex1_c:
{}
Bound to sites of hex1_A_c:
{'atp_c': 1}
Bound to sites of hex1_G_c:
{'glc__D_c': 1}
Bound to sites of hex1_AG_c:
{'glc__D_c': 1, 'atp_c': 1}
Bound to sites of hex1_G_CI_c:
{'glc__D_c': 1, '_23dpg_c': 1}
Bound to sites of hex1_A_PI_c:
{'adp_c': 1}
Bound to sites of hex1_G_PI_c:
{'g6p_c': 1}
###Markdown
Setting the `bound_metabolites` attribute upon creation allow the `formula` and `charge` attributes of the various forms also to be set while ensuring mass and charge balancing is maintained. Note that the enzyme is represented as a moiety, and the ligands bound to the enzyme are represented in the chemical formula.
###Code
# Get the elemental matrix for the enzyme
df = HEX1.get_elemental_matrix(array_type="DataFrame")
# Use iloc to only look at EnzymeModuleForms
df.iloc[:, 6:]
###Output
_____no_output_____
###Markdown
Setting the `name` argument as "automatic" in the `EnzymeModule.make_enzyme_module_form()` method causes a name for the `EnzymeModuleForm` to be generated based on the metabolites in the `bound_metabolites` attribute.
###Code
# Print automatically generated names
for enzyme_form in HEX1.enzyme_module_forms:
print(enzyme_form.name)
###Output
HEX1
HEX1-atp complex
HEX1-glc__D complex
HEX1-glc__D-atp complex
HEX1-glc__D-_23dpg complex
HEX1-adp complex
HEX1-g6p complex
###Markdown
The `categories` argument allows for `EnzymeModuleForm` objects to be placed into `cobra.Group` objects representing those categories. As with the ligands, the categorized enzyme module forms are returned in a `DictList` of `Group` objects by the `enzyme_module_forms_categorized` attribute.
###Code
for group in HEX1.enzyme_module_forms_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
Active: ['hex1_AG_c', 'hex1_A_c', 'hex1_G_c', 'hex1_c']
Inhibited: ['hex1_A_PI_c', 'hex1_G_CI_c', 'hex1_G_PI_c']
###Markdown
Alternatively, the `enzyme_module_forms_categorized` attribute can be set using a `dict`:
###Code
HEX1.enzyme_module_forms_categorized = {
"competitively_inhibited": hex1_G_CI_c}
for group in HEX1.enzyme_module_forms_categorized:
print("{0}: {1}".format(
group.id, str(sorted([m.id for m in group.members]))))
###Output
Active: ['hex1_AG_c', 'hex1_A_c', 'hex1_G_c', 'hex1_c']
Inhibited: ['hex1_A_PI_c', 'hex1_G_CI_c', 'hex1_G_PI_c']
competitively_inhibited: ['hex1_G_CI_c']
###Markdown
Defining enzyme module reactionsThe next step is to define all of the reaction steps that represent the catalytic mechanism and regulation of the enzyme module. These reactions are represented as `EnzymeModuleReaction` objects. The `EnzymeModuleReaction` object inherits from the `MassReaction` and is treated like any other reaction in the model. Like the `make_enzyme_module_form()` method, the `make_enzyme_module_reaction()` method allows for the creation of an `EnzymeModuleReaction` object while assigning categories for the `EnzymeModuleReaction` in the process.Species that exist in the model can also be added to the reaction by providing a dictionary of metabolites and their stoichiometric coefficients to the `metabolites_to_add` argument.
###Code
HEX1_1 = HEX1.make_enzyme_module_reaction(
"HEX1_1",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="product_inhibition",
metabolites_to_add={
"hex1_c": -1,
"adp_c": -1,
"hex1_A_PI_c": 1})
HEX1_2 = HEX1.make_enzyme_module_reaction(
"HEX1_2",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="product_inhibition",
metabolites_to_add={
"hex1_c": -1,
"g6p_c": -1,
"hex1_G_PI_c": 1})
HEX1_3 = HEX1.make_enzyme_module_reaction(
"HEX1_3",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="glc__D_c_binding",
metabolites_to_add={
"hex1_c": -1,
"glc__D_c": -1,
"hex1_G_c": 1})
HEX1_4 = HEX1.make_enzyme_module_reaction(
"HEX1_4",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="atp_c_binding",
metabolites_to_add={
"hex1_c": -1,
"atp_c": -1,
"hex1_A_c": 1})
HEX1_5 = HEX1.make_enzyme_module_reaction(
"HEX1_5",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="competitive_inhibition",
metabolites_to_add={
"hex1_G_c": -1,
"_23dpg_c": -1,
"hex1_G_CI_c": 1})
HEX1_6 = HEX1.make_enzyme_module_reaction(
"HEX1_6",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="atp_c_binding",
metabolites_to_add={
"hex1_G_c": -1,
"atp_c": -1,
"hex1_AG_c": 1})
HEX1_7 = HEX1.make_enzyme_module_reaction(
"HEX1_7",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="glc__D_c_binding",
metabolites_to_add={
"hex1_A_c": -1,
"glc__D_c": -1,
"hex1_AG_c": 1})
HEX1_8 = HEX1.make_enzyme_module_reaction(
"HEX1_8",
name="Automatic",
subsystem="Glycolysis",
reversible=True,
categories="catalyzation",
metabolites_to_add={
"hex1_AG_c": -1,
"hex1_c": 1,
"adp_c": 1,
"g6p_c": 1,
"h_c": 1})
for reaction in HEX1.enzyme_module_reactions:
print(reaction)
###Output
HEX1_1: adp_c + hex1_c <=> hex1_A_PI_c
HEX1_2: g6p_c + hex1_c <=> hex1_G_PI_c
HEX1_3: glc__D_c + hex1_c <=> hex1_G_c
HEX1_4: atp_c + hex1_c <=> hex1_A_c
HEX1_5: _23dpg_c + hex1_G_c <=> hex1_G_CI_c
HEX1_6: atp_c + hex1_G_c <=> hex1_AG_c
HEX1_7: glc__D_c + hex1_A_c <=> hex1_AG_c
HEX1_8: hex1_AG_c <=> adp_c + g6p_c + h_c + hex1_c
###Markdown
The `categories` argument allows for `EnzymeModuleReactions` objects to be placed into `cobra.Group` objects representing those categories. As with the ligands and enzyme forms, a `DictList` of the relevant groups are returned with the `enzyme_module_reactions_categorized` attribute.
###Code
HEX1.enzyme_module_reactions_categorized
###Output
_____no_output_____
###Markdown
Unifying rate parametersFor this `EnzymeModule`, the reactions representing glucose binding to the enzyme and ATP binding to the enzyme have the same forward rate and equilibrium constants. Instead of defining the parameter values for each individual reaction, the `unify_rate_parameters()` method can be used to create custom rate laws for the given reactions that all depend on the same rate parameters.The `unify_rate_parameters()` method takes a list of reactions and an identifier to use for the unified parameter. The `enzyme_prefix` flag can be set to `True` to prefix the new parameter identifier with the identifier of the `EnzymeModule`, ensuring that any existing custom parameters are not overwritten.
###Code
for ligand, pid in zip([glc__D_c, atp_c],["G", "A"]):
# Get the group of reactions corresponding to the ligand
category = "_".join((ligand.id, "binding"))
group = HEX1.enzyme_module_reactions_categorized.get_by_id(category)
# Unify the parameters
HEX1.unify_rate_parameters(
group.members, new_parameter_id=pid, enzyme_prefix=True)
# Print the new reaction rates
print("\n" + category + "\n" + "-" * len(category))
for reaction in sorted(group.members, key=attrgetter("id")):
print(reaction.id + ": " + str(reaction.rate))
###Output
glc__D_c_binding
----------------
HEX1_3: kf_HEX1_G*(glc__D_c(t)*hex1_c(t) - hex1_G_c(t)/Keq_HEX1_G)
HEX1_7: kf_HEX1_G*(glc__D_c(t)*hex1_A_c(t) - hex1_AG_c(t)/Keq_HEX1_G)
atp_c_binding
-------------
HEX1_4: kf_HEX1_A*(atp_c(t)*hex1_c(t) - hex1_A_c(t)/Keq_HEX1_A)
HEX1_6: kf_HEX1_A*(atp_c(t)*hex1_G_c(t) - hex1_AG_c(t)/Keq_HEX1_A)
###Markdown
Determining Enzyme Form Concentrations and Rate ConstantsThe next step is to solve for the steady state concentrations for the various forms of the enzyme symbolically using **SymPy**. Because the numerical values for the dissociation constants have been defined, these equations are solved in terms of the rate constants. The rate constants can be approximated using the total enzyme concentration as a constraint and substituted back into the equations to calculate the numerical values of the steady state concentrations.
###Code
from sympy import Eq, Symbol, lambdify, simplify, solveset
from mass import strip_time
from mass.util.matrix import matrix_rank
###Output
_____no_output_____
###Markdown
Solving steady state concentrations symbolicallyTo get the symbolic solutions for the individual enzyme forms, the ODEs are first collected in a `dict`. Keys are the enzyme forms, and values are their ODEs with the time dependency stripped via the `strip_time` function.
###Code
ode_dict = {
enzyme_form.id: Eq(strip_time(enzyme_form.ode), 0)
for enzyme_form in HEX1.enzyme_module_forms}
# Matrix rank of enzyme stoichiometric matrix without substrates
rank = matrix_rank(HEX1.S[6:])
print("Rank Deficiency: {0}".format(len(ode_dict) - rank))
###Output
Rank Deficiency: 1
###Markdown
Because the stoichiometric matrix (without ligands) has a rank deficiency of one, there is a dependent variable in the system unless another equation is added. Therefore, the completely free enzyme form is treated as the dependent variable, and all of the enzyme forms are solved in terms of the free enzyme form.
###Code
enzyme_solutions = {}
for enzyme_form in HEX1.enzyme_module_forms:
# Skip dependent variable
if enzyme_form.id == "hex1_c":
continue
# Get the ODE for the enzyme form from the ODE dict
equation = ode_dict[enzyme_form.id]
# Solve the equation for the enzyme form, substituting
# previously found enzyme form solutions into the equation
solution = solveset(equation.subs(enzyme_solutions),
enzyme_form.id)
# Store the solution
enzyme_solutions[enzyme_form.id] = list(solution)[0]
# Substitute the new solution into existing solutions
enzyme_solutions.update({
enzyme_form: sol.subs(enzyme_solutions)
for enzyme_form, sol in enzyme_solutions.items()})
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
###Output
_____no_output_____
###Markdown
Defining the Rate EquationTo make up for the rank deficiency, an additional equation is needed. Typically, the rate of the enzyme is the summation of the rates for the catalyzation reaction step(s) of the enzyme. The `make_enzyme_rate_equation()` method can be used to create the rate equation from a list of reactions. If `use_rates=True`, the rate expressions of the reactions are added together. If `update_enzyme=True`, the rate equation is set as a symbolic expression for the `enzyme_rate_equation` attribute.
###Code
# Get the catalyzation reactions
catalyzation_group = HEX1.enzyme_module_reactions_categorized.get_by_id(
"catalyzation")
HEX1.make_enzyme_rate_equation(catalyzation_group.members,
use_rates=True,
update_enzyme=True)
print(HEX1.enzyme_rate_equation)
###Output
kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c(t) - adp_c(t)*g6p_c(t)*hex1_c(t))/Keq_HEX1_8
###Markdown
With the rate equation defined, the `enzyme_rate_error()` method is used to get the equation as the difference between the flux value and the rate equation.
###Code
enzyme_rate_equation = strip_time(HEX1.enzyme_rate_error(use_values=False))
print(enzyme_rate_equation)
###Output
v_HEX1 - kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c - adp_c*g6p_c*hex1_c)/Keq_HEX1_8
###Markdown
The solutions for the enzyme forms are substituted into the rate equation, and the equation is solved for the free enzyme form. The solutions are subsequently updated, resulting in symbolic equations that do not depend on any enzyme form.
###Code
# Solve for last unknown concentration symbolically
solution = solveset(enzyme_rate_equation.subs(enzyme_solutions),
"hex1_c")
# Update solution dictionary with the new solution
enzyme_solutions["hex1_c"] = list(solution)[0]
# Update solutions with free variable solutions
enzyme_solutions = {
enzyme_form: simplify(solution.subs(enzyme_solutions))
for enzyme_form, solution in enzyme_solutions.items()}
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
print(args)
###Output
{g6p_c, kf_HEX1_8, kf_HEX1_A, v_HEX1, Keq_HEX1_A, atp_c, glc__D_c, _23dpg_c, kf_HEX1_G, Keq_HEX1_5, Keq_HEX1_8, adp_c, Keq_HEX1_2, Keq_HEX1_1, Keq_HEX1_G}
###Markdown
Numerical values for known quantities are substituted into the equations. For this `EnzymeModule` of Hexokinase, the following dissociation constants are used:$$\begin{align}K_{d,\ \text{GLC-D}} &= 0.038\ \text{mM} \\K_{d,\ \text{ATP}} &= 2.06\ \text{mM} \\K_{i,\ \text{23DPG}} &= 5.5\ \text{mM} \\K_{i,\ \text{ADP}} &= 1\ \text{mM} \\K_{i,\ \text{G6P}} &= 66.67\ \text{mM} \\\end{align}$$A value of $K_{\text{HEX1}}= 313.12$ is used for the catalyzation step. Note that the inverse of the dissociation constant is used for reactions that form complexes.
###Code
numerical_values = {
"Keq_HEX1_1": 1,
"Keq_HEX1_2": 1 / 66.67,
"Keq_HEX1_G": 1 / 0.038,
"Keq_HEX1_A": 1 / 2.06,
"Keq_HEX1_5": 1 / 5.5,
"Keq_HEX1_8": 313.12}
# Update the model with the parameters
HEX1.update_parameters(numerical_values)
###Output
_____no_output_____
###Markdown
The ligand concentrations and the rate for the enzyme are extracted from the merged glycolysis and hemoglobin model.
###Code
# Get steady state flux for EnzymeModule
HEX1.enzyme_rate = glyc_hb.reactions.get_by_id("HEX1").steady_state_flux
numerical_values[HEX1.enzyme_flux_symbol_str] = HEX1.enzyme_rate
# Get the ligand concentrations
for met in HEX1.enzyme_module_ligands:
concentration = glyc_hb.metabolites.get_by_id(met.id).initial_condition
# Set the ligand initial condition and add to numercal values dictionary
met.initial_condition = concentration
numerical_values[met.id] = concentration
###Output
_____no_output_____
###Markdown
The numerical values are substituted into the symbolic equations, resulting in the steady state concentrations that depend only on the rate constants.
###Code
enzyme_solutions = {
enzyme_form: simplify(sol.subs(numerical_values))
for enzyme_form, sol in enzyme_solutions.items()}
args = set()
for solution in enzyme_solutions.values():
args.update(solution.atoms(Symbol))
print(args)
###Output
{kf_HEX1_A, kf_HEX1_G, kf_HEX1_8}
###Markdown
Approximating Rate ConstantsTo determine the set of rate constants for the enzyme module, the absolute error between the total hexokinase concentration value (found in literature) and the computed hexokinase concentration is minimized. For this example, the `minimize()` function of the **SciPy** package is utilized to find a feasible set of rate constants.
###Code
from scipy.optimize import minimize
###Output
_____no_output_____
###Markdown
The objective function for the minimization is first made symbolically. The `enzyme_total_symbol_str` property can be used to represent the total enzyme concentration, while the `enzyme_concentration_total_equation` property creates a symbolic expression for the sum of all enzyme forms.
###Code
enzyme_total_error = abs(
Symbol(HEX1.enzyme_total_symbol_str)
- strip_time(HEX1.enzyme_concentration_total_equation))
print(enzyme_total_error)
###Output
Abs(-HEX1_Total + hex1_AG_c + hex1_A_PI_c + hex1_A_c + hex1_G_CI_c + hex1_G_PI_c + hex1_G_c + hex1_c)
###Markdown
The `enzyme_concentration_total` attribute stores the total amount of enzyme in the model and substituted into the expression. The total HEX1 concentration is $24 * 10^{-6} \text{mM}$.
###Code
HEX1.enzyme_concentration_total = 24e-6
enzyme_total_error = enzyme_total_error.subs({
HEX1.enzyme_total_symbol_str: HEX1.enzyme_concentration_total})
print(enzyme_total_error)
###Output
Abs(hex1_AG_c + hex1_A_PI_c + hex1_A_c + hex1_G_CI_c + hex1_G_PI_c + hex1_G_c + hex1_c - 2.4e-5)
###Markdown
Finally, the symbolic equations for the enzyme forms are substituted into the enzyme total error equation, resulting in an expression that represents the objective function with the only unknown variables being rate constants. The `lambdify()` function of the **SymPy** package converts the symbolic objective into a lambda function that can be used with the `minimize()` function of **SciPy**.
###Code
enzyme_total_error = simplify(enzyme_total_error.subs(enzyme_solutions))
# Sort the arguments to ensure input format remains consistent
args = sorted(list(map(str, args)))
# Use lambdify to make objective function as a lambda function
obj_fun = lambda x: lambdify(args, enzyme_total_error)(*x)
###Output
_____no_output_____
###Markdown
The `minimize()` function is now used to approximate the rate constants. The optimization problems for enzyme rate constants are typically nonlinear, and require nonlinear optimization routines to find feasible solutions.
###Code
# Minimize the objective function, initial guess based on publication values
initial_guess = [1e8, 9376585, 52001]
variable_bounds = ((0, 1e9), (0, 1e9), (0, 1e9))
solution = minimize(obj_fun, x0=initial_guess,
method="trust-constr",
bounds=variable_bounds)
# Map solution array to variables
rate_constants = dict(zip(args, solution.x))
print(rate_constants)
###Output
{'kf_HEX1_8': 100000000.0025878, 'kf_HEX1_A': 9376585.030755484, 'kf_HEX1_G': 52006.59981223971}
###Markdown
Because the rate constants associated with the inhibition of the enzyme forms are not necessary for computing the concentrations, a rapid binding assumption is made for the inhibition reactions. Therefore, a large number is set for the rate constants. The parameters are set using the `update_parameters()` method.
###Code
rate_constants["kf_HEX1_1"] = 1e6
rate_constants["kf_HEX1_2"] = 1e6
rate_constants["kf_HEX1_5"] = 1e6
HEX1.update_parameters(rate_constants)
###Output
_____no_output_____
###Markdown
Calculating numerical values for concentrationsOnce the rate constants have been estimated, they are substituted back into the symbolic concentration equations in order to obtain their numerical values.
###Code
for enzyme_form, solution in enzyme_solutions.items():
# Get the enzyme form object, determine the steady state concentration
enzyme_form = HEX1.enzyme_module_forms.get_by_id(enzyme_form)
enzyme_form.initial_condition = float(solution.subs(rate_constants))
print("{0}: {1:e}".format(enzyme_form.id,
enzyme_form.initial_condition))
###Output
hex1_A_c: 9.401421e-06
hex1_G_c: 5.718872e-08
hex1_AG_c: 1.174630e-08
hex1_G_CI_c: 3.223364e-08
hex1_A_PI_c: 3.519706e-06
hex1_G_PI_c: 8.847367e-09
hex1_c: 1.213692e-05
###Markdown
Error valuesAs a quality assurance check, the `enzyme_concentration_total_error()` method can be used to get the error between the `enzyme_concentration_total` attribute and the sum of the enzyme form concentrations. A positive value indicates the `enzyme_concentration_total` attribute is greater than the sum of the individual enzyme form concentrations that were computed.
###Code
print("Total Enzyme Concentration Error: {0}".format(
HEX1.enzyme_concentration_total_error(use_values=True)))
###Output
Total Enzyme Concentration Error: -1.1680622689093244e-06
###Markdown
Similarly, the error between the `enzyme_rate` attribute and the computed value from the `enzyme_rate_equation` can be also checked using the `enzyme_rate_error()` method, in which a positive value indicates that the `enzyme_rate` attribute is greater than the value computed when using the rate equation.
###Code
print("Enzyme Rate Error: {0}".format(
HEX1.enzyme_rate_error(use_values=True)))
###Output
Enzyme Rate Error: 4.440892098500626e-16
###Markdown
Adding EnzymeModules to ModelsWith the `EnzymeModule` built, it can be integrated into a larger network and simulated. To add an `EnzymeModule` to an existing `MassModel`, the `merge()` method is used. After merging, the `remove_reactions()` method is used to remove the reaction replaced with the enzyme module. The `EnzymeModule` should always be merged into the `MassModel` as demonstrated below:
###Code
glyc_hb_HEX1 = glyc_hb.merge(HEX1, inplace=False)
glyc_hb_HEX1.remove_reactions([
glyc_hb_HEX1.reactions.get_by_id("HEX1")])
###Output
_____no_output_____
###Markdown
All objects, numerical values, and certain attributes of the `EnzymeModule` are transferred into the `MassModel` upon merging. This includes all enzyme forms, reactions steps, initial conditions, rate parameters, and category groups.
###Code
glyc_hb_HEX1
###Output
_____no_output_____
###Markdown
The EnzymeModuleDict objectDuring the merge process, an `EnzymeModuleDict` is created from the `EnzymeModule` and added to the `MassModel.enzyme_modules` attribute.
###Code
print(glyc_hb_HEX1.enzyme_modules)
HEX1_dict = glyc_hb_HEX1.enzyme_modules.get_by_id("HEX1")
HEX1_dict
###Output
[<EnzymeModuleDict HEX1 at 0x7ff42bab8950>]
###Markdown
The `EnzymeModuleDict` inherits from an `OrderedDict`, thereby inheriting ordered dictionary methods such as `keys()`:
###Code
print("\n".join(HEX1_dict.keys()))
###Output
id
name
subsystem
enzyme_module_ligands
enzyme_module_forms
enzyme_module_reactions
enzyme_module_ligands_categorized
enzyme_module_forms_categorized
enzyme_module_reactions_categorized
enzyme_concentration_total
enzyme_rate
enzyme_concentration_total_equation
enzyme_rate_equation
S
model
###Markdown
The `EnzymeModuleDict` stores several of the enzyme-specific attributes so that they are still accessible after integrating the enzyme module into a larger network. The keys of the `EnzymeModuleDict` also can be treated as attribute accessors:
###Code
print("Enzyme Rate:\n{0} = {1}".format(
HEX1_dict["enzyme_rate"], # Returned using dict key
HEX1_dict.enzyme_rate_equation # Returned using attribute accessor
))
###Output
Enzyme Rate:
1.12 = kf_HEX1_8*(Keq_HEX1_8*hex1_AG_c(t) - adp_c(t)*g6p_c(t)*hex1_c(t))/Keq_HEX1_8
###Markdown
Steady State ValidationThe last step is to ensure that a steady state is reached with the completed enzyme module within a larger network context.
###Code
import matplotlib.pyplot as plt
from mass import Simulation
from mass.visualization import plot_time_profile
###Output
_____no_output_____
###Markdown
Here, the model is simulated, and the enzyme's ability to reach steady state is graphically verified:
###Code
# Setup simulation object
sim = Simulation(glyc_hb_HEX1, verbose=True)
# Simulate from 0 to 1000 with 10001 points in the output
conc_sol, flux_sol = sim.simulate(
glyc_hb_HEX1, time=(0, 1e3, 1e4 + 1))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
plot_time_profile(
conc_sol, observable=HEX1_dict.enzyme_module_forms, ax=ax,
legend="right outside", plot_function="loglog",
xlabel="Time [hr]", ylabel="Concentration [mM]",
title="TIme profile of Concentrations for Enzyme Forms");
###Output
Successfully loaded MassModel 'Glycolysis_Hemoglobin_HEX1' into RoadRunner.
|
analysis/simulation/pvalue.ipynb | ###Markdown
Hypothesis testing validationPower analysis for DM, DV, and DC
###Code
import pandas as pd
import matplotlib.pyplot as plt
import scanpy as sc
import scipy as sp
import itertools
import numpy as np
import scipy.stats as stats
from scipy.integrate import dblquad
import seaborn as sns
from statsmodels.stats.multitest import fdrcorrection
import imp
pd.options.display.max_rows = 999
pd.set_option('display.max_colwidth', -1)
import pickle as pkl
import time
import string
from sklearn.datasets import make_spd_matrix
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'medium',
'axes.labelsize': 'medium',
'axes.titlesize':'medium',
'figure.titlesize':'medium',
'xtick.labelsize':'small',
'ytick.labelsize':'small'}
pylab.rcParams.update(params)
import sys
sys.path.append('/data/home/Github/scrna-parameter-estimation/schypo')
import estimator, simulate, schypo, bootstrap, util, hypothesis_test
data_path = '/data/parameter_estimation/'
fig_path = '/data/home/Github/scrna-parameter-estimation/figures/fig3/'
###Output
_____no_output_____
###Markdown
Read the interferon data
###Code
ifn_adata = sc.read(data_path + 'interferon_filtered.h5ad')
###Output
_____no_output_____
###Markdown
Calibration on real data
###Code
q = 0.07
cal_adata = ifn_adata[ifn_adata.obs.cell == 'CD4 T cells'].copy()
# cal_adata.obs['stim'] = np.random.choice(['stim', 'ctrl'], cal_adata.shape[0])
cal_adata.shape
schypo.create_groups(
cal_adata,
q=q,
label_columns=['stim', 'ind'],
inplace=True)
schypo.compute_1d_moments(
cal_adata,
inplace=True,
filter_genes=True,
residual_var=True,
filter_mean_thresh=0.05,
min_perc_group=.9)
print(cal_adata.shape)
schypo.ht_1d_moments(
cal_adata,
formula_like='1 + stim',
cov_column='stim',
num_boot=1000,
num_cpus=6,
verbose=3)
plt.hist(cal_adata.uns['schypo']['1d_ht']['var_asl'], bins=20);
plt.hist(cal_adata.uns['schypo']['1d_ht']['mean_asl'], bins=20);
###Output
_____no_output_____
###Markdown
Power calculation1000 genes total, 300 are DE and 300 are DV
###Code
adata = ifn_adata[ifn_adata.obs.cell_type == 'CD4 T cells - ctrl']
# data = adata.X.copy()
# relative_data = data.toarray()/data.sum(axis=1)
n_cells = 10000
q=0.07
x_param, z_param, Nc, good_idx = simulate.extract_parameters(adata.X, q=q)
def simulate_two_datasets(x_param, Nc, n_cells, q, diff='mean'):
log_means_1, log_variances_1 = np.log(x_param[0]), np.log(x_param[1])
log_means_2, log_variances_2 = log_means_1.copy(), log_variances_1.copy()
if diff == 'null':
norm_cov_1, norm_cov_2 = 'indep', 'indep'
if diff == 'mean':
log_means_2[:500] += 0.5
norm_cov_1, norm_cov_2 = 'indep', 'indep'
if diff == 'variability':
log_variances_2[:500] += 0.5
norm_cov_1, norm_cov_2 = 'indep', 'indep'
if diff == 'correlation':
norm_cov_1 = make_spd_matrix(log_means_1.shape[0])
norm_corr_1 = norm_cov_1/np.outer(np.sqrt(np.diag(norm_cov_1)), np.sqrt(np.diag(norm_cov_1)))
norm_corr_subset = norm_corr_1[:100, :100].copy()
change_indices = np.where(norm_corr_subset < 0.5)
change_indices = (change_indices[0][:150], change_indices[1][:150])
norm_corr_subset[change_indices] += 0.5
norm_corr_2 = norm_corr_1.copy()
norm_corr_2[:100, :100] = norm_corr_subset
norm_cov_2 = norm_corr_2 * np.outer(np.sqrt(np.diag(norm_cov_1)), np.sqrt(np.diag(norm_cov_1)))
data_1 = simulate.simulate_transcriptomes(
n_cells=n_cells,
means=np.exp(log_means_1)*Nc.mean(),
variances=(np.exp(log_variances_1) + np.exp(log_means_1)**2)*(Nc**2).mean() - np.exp(log_means_1)**2*Nc.mean()**2,
Nc=Nc,
norm_cov=norm_cov_1)
data_2 = simulate.simulate_transcriptomes(
n_cells=n_cells,
means=np.exp(log_means_2)*Nc.mean(),
variances=(np.exp(log_variances_2) + np.exp(log_means_2)**2)*(Nc**2).mean() - np.exp(log_means_2)**2*Nc.mean()**2,
Nc=Nc,
norm_cov=norm_cov_2)
true_data = np.vstack([data_1, data_2])
_, hyper_captured = simulate.capture_sampling(true_data, q=q, process='hyper')
anndata = sc.AnnData(sp.sparse.csr_matrix(hyper_captured))
anndata.obs['ct_real'] = ['A' for i in range(n_cells)] + ['B' for i in range(n_cells)]
anndata.obs['ct_shuffled'] = np.random.choice(['A', 'B'], anndata.shape[0])
if diff == 'correlation':
return anndata, change_indices
else:
return anndata, None
def calculate_power(n_cells, test='mean', test_null=False):
sim_adata, change_indices = simulate_two_datasets(x_param, Nc, n_cells, q=q, diff=test if not test_null else 'null')
schypo.create_groups(
sim_adata,
q=q,
label_columns=['ct_real'],
inplace=True)
schypo.compute_1d_moments(
sim_adata,
inplace=True,
filter_genes=False,
residual_var=True,
filter_mean_thresh=0.0,
min_perc_group=.9)
if test == 'mean':
schypo.ht_1d_moments(
sim_adata,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
power = (sim_adata.uns['schypo']['1d_ht']['mean_asl'][:500] < 0.05).mean()
pvals = sim_adata.uns['schypo']['1d_ht']['mean_asl']
if test == 'variability':
schypo.ht_1d_moments(
sim_adata,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
power = (sim_adata.uns['schypo']['1d_ht']['var_asl'][:500] < 0.05).mean()
pvals = sim_adata.uns['schypo']['1d_ht']['var_asl']
if test == 'correlation':
schypo.compute_2d_moments(
sim_adata,
inplace=True,
gene_1=np.arange(50).astype(str).tolist(),
gene_2=np.arange(50).astype(str).tolist())
schypo.ht_2d_moments(
sim_adata,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
c_pv = sim_adata.uns['schypo']['2d_ht']['corr_asl'][:50, :50]
power = (c_pv[change_indices] < 0.05).mean()
pvals = sim_adata.uns['schypo']['2d_ht']['corr_asl']
return power, pvals, sim_adata
mean_power, var_power, corr_power = [], [], []
n_cells_list = np.logspace(2, 5, 25).astype(int)
for n_cells in n_cells_list:
print(n_cells)
mean_power.append(calculate_power(n_cells, test='mean')[0])
var_power.append(calculate_power(n_cells, test='variability')[0])
corr_power.append(calculate_power(n_cells, test='correlation')[0])
with open('temp.pkl', 'wb') as f:
pkl.dump([mean_power, var_power, corr_power], f)
n_cells_list = np.logspace(2, 5, 25).astype(int)
with open('/data/parameter_estimation/simulation/power_analysis.pkl', 'rb') as f:
mean_power, var_power, corr_power = pkl.load(f)
plt.figure(figsize=(2, 2))
plt.plot(n_cells_list[:23], mean_power, '-o')
plt.plot(n_cells_list[:23], var_power, '-o')
plt.plot(n_cells_list[:22], corr_power, '-o')
plt.xscale('log')
plt.xlabel('Number of cells in each group')
plt.ylabel('Power')
plt.legend(['DE', 'DV', 'DC'], loc='lower right', bbox_to_anchor=(0.56, -0.05, 0.5, 0.5), frameon=False)
plt.savefig(fig_path + 'power.pdf', bbox_inches='tight')
# with open('temp.pkl', 'wb') as f:
# pkl.dump([mean_power, var_power, corr_power], f)
###Output
_____no_output_____
###Markdown
Generate p-value histograms
###Code
adata = ifn_adata[ifn_adata.obs.cell_type == 'CD4 T cells - ctrl']
# data = adata.X.copy()
# relative_data = data.toarray()/data.sum(axis=1)
n_cells = 5000
q=0.07
x_param, z_param, Nc, good_idx = simulate.extract_parameters(adata.X, q=q, min_mean=0.001)
sim_adata_mean, _ = simulate_two_datasets(x_param, Nc, n_cells, q=q, diff='mean')
print(sim_adata_mean.shape)
schypo.create_groups(
sim_adata_mean,
q=q,
label_columns=['ct_real'],
inplace=True)
schypo.compute_1d_moments(
sim_adata_mean,
inplace=True,
filter_genes=True,
residual_var=True,
filter_mean_thresh=0.00001,
min_perc_group=.9)
schypo.ht_1d_moments(
sim_adata_mean,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
sim_adata_mean.write('/data/parameter_estimation/simulation/sim_mean.h5ad')
sim_adata_var, _ = simulate_two_datasets(x_param, Nc, n_cells, q=q, diff='variability')
schypo.create_groups(
sim_adata_var,
q=q,
label_columns=['ct_real'],
inplace=True)
schypo.compute_1d_moments(
sim_adata_var,
inplace=True,
filter_genes=True,
residual_var=True,
filter_mean_thresh=0.05,
min_perc_group=.9)
schypo.ht_1d_moments(
sim_adata_var,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
sim_adata_var.write('/data/parameter_estimation/simulation/sim_var.h5ad')
x_param, z_param, Nc, good_idx = simulate.extract_parameters(adata.X, q=q, min_mean=0.05)
sim_adata_corr, change_indices = simulate_two_datasets(x_param, Nc, n_cells, q=q, diff='correlation')
schypo.create_groups(
sim_adata_corr,
q=q,
label_columns=['ct_real'],
inplace=True)
schypo.compute_1d_moments(
sim_adata_corr,
inplace=True,
filter_genes=True,
residual_var=True,
filter_mean_thresh=0.0,
min_perc_group=.9)
schypo.compute_2d_moments(
sim_adata_corr,
inplace=True,
gene_1=np.arange(100).astype(str).tolist(),
gene_2=np.arange(100).astype(str).tolist())
schypo.ht_2d_moments(
sim_adata_corr,
formula_like='1 + ct_real',
cov_column='ct_real',
num_boot=5000,
num_cpus=6,
verbose=3)
sim_adata_corr.uns['null'] = corr_asl_null
sim_adata_corr.write('/data/parameter_estimation/simulation/sim_corr.h5ad')
###Output
... storing 'ct_real' as categorical
... storing 'ct_shuffled' as categorical
... storing 'schypo_group' as categorical
###Markdown
Plot p-value histograms
###Code
sim_adata_var = sc.read('/data/parameter_estimation/simulation/sim_var.h5ad')
var_asl = sim_adata_var.uns['schypo']['1d_ht']['var_asl']
sim_adata_mean = sc.read('/data/parameter_estimation/simulation/sim_mean.h5ad')
mean_asl = sim_adata_mean.uns['schypo']['1d_ht']['mean_asl']
sim_adata_corr = sc.read('/data/parameter_estimation/simulation/sim_corr.h5ad')
corr_asl = sim_adata_corr.uns['schypo']['2d_ht']['corr_asl'].ravel()
corr_null_asl = sim_adata_corr.uns['null'].ravel()
plt.figure(figsize=(6, 2));
plt.subplots_adjust(wspace=0.3)
plt.subplot(1, 3, 1);
plt.hist(mean_asl, bins=20, density=True, color='grey', label='DE');
plt.hist(mean_asl[500:], bins=5, density=True, histtype='step', linewidth=3, color='black', label='null');
plt.legend(frameon=False)
plt.subplot(1, 3, 2);
plt.hist(var_asl, bins=20, density=True, color='grey', label='DV');
plt.hist(var_asl[500:], bins=5, density=True, histtype='step', linewidth=3, color='black', label='null');
plt.legend(frameon=False)
plt.subplot(1, 3, 3);
plt.hist(corr_asl.ravel(), density=True, color='grey', bins=20, label='DC');
plt.hist(corr_asl_null.ravel(), bins=5, density=True, color='black', label='null', histtype='step', linewidth=3);
plt.legend(frameon=False)
plt.savefig(fig_path + 'calibration_histograms.pdf', bbox_inches='tight')
###Output
_____no_output_____ |
notebook/animal-predictor.ipynb | ###Markdown
Animal Predictor - (Flowers for now) Imports
###Code
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
###Output
_____no_output_____
###Markdown
Sourcing data
###Code
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)
roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
PIL.Image.open(str(roses[1]))
tulips = list(data_dir.glob('tulips/*'))
PIL.Image.open(str(tulips[0]))
PIL.Image.open(str(tulips[1]))
###Output
_____no_output_____
###Markdown
Preprocessing images
###Code
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
###Output
_____no_output_____
###Markdown
Visualisation by class
###Code
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(1, 10):
ax = plt.subplot(3, 3, i)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
###Output
_____no_output_____
###Markdown
Configuring dataset for performance
###Code
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image))
###Output
_____no_output_____
###Markdown
Creating the model
###Code
num_classes = 5
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Getting a model summary
###Code
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
###Output
_____no_output_____
###Markdown
Visualise the training results
###Code
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='upper right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
###Output
_____no_output_____
###Markdown
What does the above mean? The above graph indicate the we have suffered from overfitting. This is easy to view in graphical form
###Code
plt.figure(figsize=(15, 8))
x, y = range(5), [1, 2, 4, 4.5, 6.5]
m, b = np.polyfit(x, y, 1)
mx_plus_b = [m * i + b for i in x]
plt.subplot(1, 2, 1)
plt.plot(x, y, label='Dataset', marker='o', linestyle= "")
plt.plot(x, mx_plus_b, label='Good ML fit', color='green', )
plt.legend(loc='upper right')
plt.title('Good ML')
plt.subplot(1, 2, 2)
plt.plot(x, y, label='Dataset', marker='o', linestyle= "")
plt.plot(x, y, label='Bad ML fit (overfitted)')
plt.legend(loc='upper right')
plt.title('Bad ML')
plt.show()
###Output
_____no_output_____
###Markdown
Data augmentation
###Code
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
###Output
_____no_output_____
###Markdown
Dropout
###Code
model = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
###Output
_____no_output_____
###Markdown
New model with a dropout layer
###Code
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
epochs = 15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
###Output
_____no_output_____
###Markdown
Visualise the training results
###Code
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
img = keras.preprocessing.image.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(score)], 100 * np.max(score))
)
###Output
_____no_output_____ |
Coursera Deeplearning Specialization/c4 - CNN/c4wk4b Face Recognition.ipynb | ###Markdown
Face Recognition for the Happy HouseWelcome to the first assignment of week 4! Here you will build a face recognition system. Many of the ideas presented here are from [FaceNet](https://arxiv.org/pdf/1503.03832.pdf). In lecture, we also talked about [DeepFace](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf). Face recognition problems commonly fall into two categories: - **Face Verification** - "is this the claimed person?". For example, at some airports, you can pass through customs by letting a system scan your passport and then verifying that you (the person carrying the passport) are the correct person. A mobile phone that unlocks using your face is also using face verification. This is a 1:1 matching problem. - **Face Recognition** - "who is this person?". For example, the video lecture showed a face recognition video (https://www.youtube.com/watch?v=wr4rx0Spihs) of Baidu employees entering the office without needing to otherwise identify themselves. This is a 1:K matching problem. FaceNet learns a neural network that encodes a face image into a vector of 128 numbers. By comparing two such vectors, you can then determine if two pictures are of the same person. **In this assignment, you will:**- Implement the triplet loss function- Use a pretrained model to map face images into 128-dimensional encodings- Use these encodings to perform face verification and face recognitionIn this exercise, we will be using a pre-trained model which represents ConvNet activations using a "channels first" convention, as opposed to the "channels last" convention used in lecture and previous programming assignments. In other words, a batch of images will be of shape $(m, n_C, n_H, n_W)$ instead of $(m, n_H, n_W, n_C)$. Both of these conventions have a reasonable amount of traction among open-source implementations; there isn't a uniform standard yet within the deep learning community. Let's load the required packages.
###Code
from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
K.set_image_data_format('channels_first')
import cv2
import os
import numpy as np
from numpy import genfromtxt
import pandas as pd
import tensorflow as tf
from fr_utils import *
from inception_blocks_v2 import *
%matplotlib inline
%load_ext autoreload
%autoreload 2
np.set_printoptions(threshold=np.nan)
###Output
Using TensorFlow backend.
###Markdown
0 - Naive Face VerificationIn Face Verification, you're given two images and you have to tell if they are of the same person. The simplest way to do this is to compare the two images pixel-by-pixel. If the distance between the raw images are less than a chosen threshold, it may be the same person! **Figure 1** Of course, this algorithm performs really poorly, since the pixel values change dramatically due to variations in lighting, orientation of the person's face, even minor changes in head position, and so on. You'll see that rather than using the raw image, you can learn an encoding $f(img)$ so that element-wise comparisons of this encoding gives more accurate judgements as to whether two pictures are of the same person. 1 - Encoding face images into a 128-dimensional vector 1.1 - Using an ConvNet to compute encodingsThe FaceNet model takes a lot of data and a long time to train. So following common practice in applied deep learning settings, let's just load weights that someone else has already trained. The network architecture follows the Inception model from [Szegedy *et al.*](https://arxiv.org/abs/1409.4842). We have provided an inception network implementation. You can look in the file `inception_blocks.py` to see how it is implemented (do so by going to "File->Open..." at the top of the Jupyter notebook). The key things you need to know are:- This network uses 96x96 dimensional RGB images as its input. Specifically, inputs a face image (or batch of $m$ face images) as a tensor of shape $(m, n_C, n_H, n_W) = (m, 3, 96, 96)$ - It outputs a matrix of shape $(m, 128)$ that encodes each input face image into a 128-dimensional vectorRun the cell below to create the model for face images.
###Code
FRmodel = faceRecoModel(input_shape=(3, 96, 96))
print("Total Params:", FRmodel.count_params())
###Output
Total Params: 3743280
###Markdown
** Expected Output **Total Params: 3743280 By using a 128-neuron fully connected layer as its last layer, the model ensures that the output is an encoding vector of size 128. You then use the encodings the compare two face images as follows: **Figure 2**: By computing a distance between two encodings and thresholding, you can determine if the two pictures represent the same personSo, an encoding is a good one if: - The encodings of two images of the same person are quite similar to each other - The encodings of two images of different persons are very differentThe triplet loss function formalizes this, and tries to "push" the encodings of two images of the same person (Anchor and Positive) closer together, while "pulling" the encodings of two images of different persons (Anchor, Negative) further apart. **Figure 3**: In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N) 1.2 - The Triplet LossFor an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.<!--We will also add a normalization step at the end of our model so that $\mid \mid f(x) \mid \mid_2 = 1$ (means the vector of encoding should be of norm 1).!-->Training will use triplets of images $(A, P, N)$: - A is an "Anchor" image--a picture of a person. - P is a "Positive" image--a picture of the same person as the Anchor image.- N is a "Negative" image--a picture of a different person than the Anchor image.These triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example. You'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\alpha$:$$\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 + \alpha < \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$$You would thus like to minimize the following "triplet cost":$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \underbrace{\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2}_\text{(1)} - \underbrace{\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2}_\text{(2)} + \alpha \large ] \small_+ \tag{3}$$Here, we are using the notation "$[z]_+$" to denote $max(z,0)$. Notes:- The term (1) is the squared distance between the anchor "A" and the positive "P" for a given triplet; you want this to be small. - The term (2) is the squared distance between the anchor "A" and the negative "N" for a given triplet, you want this to be relatively large, so it thus makes sense to have a minus sign preceding it. - $\alpha$ is called the margin. It is a hyperparameter that you should pick manually. We will use $\alpha = 0.2$. Most implementations also normalize the encoding vectors to have norm equal one (i.e., $\mid \mid f(img)\mid \mid_2$=1); you won't have to worry about that here.**Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:1. Compute the distance between the encodings of "anchor" and "positive": $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$2. Compute the distance between the encodings of "anchor" and "negative": $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$3. Compute the formula per training example: $ \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2 + \alpha$3. Compute the full formula by taking the max with zero and summing over the training examples:$$\mathcal{J} = \sum^{m}_{i=1} \large[ \small \mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2 - \mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2+ \alpha \large ] \small_+ \tag{3}$$Useful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.For steps 1 and 2, you will need to sum over the entries of $\mid \mid f(A^{(i)}) - f(P^{(i)}) \mid \mid_2^2$ and $\mid \mid f(A^{(i)}) - f(N^{(i)}) \mid \mid_2^2$ while for step 4 you will need to sum over the training examples.
###Code
# GRADED FUNCTION: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
### START CODE HERE ### (≈ 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis = -1)
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis = -1)
# Step 3: subtract the two previous distances and add alpha.
basic_loss = tf.subtract(pos_dist, neg_dist) + alpha
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum( tf.maximum(basic_loss, 0.00) )
### END CODE HERE ###
return loss
with tf.Session() as test:
tf.set_random_seed(1)
y_true = (None, None, None)
y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),
tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),
tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))
loss = triplet_loss(y_true, y_pred)
print("loss = " + str(loss.eval()))
###Output
loss = 528.143
###Markdown
**Expected Output**: **loss** 528.143 2 - Loading the trained modelFaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run.
###Code
FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
load_weights_from_FaceNet(FRmodel)
###Output
_____no_output_____
###Markdown
Here're some examples of distances between the encodings between three individuals: **Figure 4**: Example of distance outputs between three individuals' encodingsLet's now use this model to perform face verification and face recognition! 3 - Applying the model Back to the Happy House! Residents are living blissfully since you implemented happiness recognition for the house in an earlier assignment. However, several issues keep coming up: The Happy House became so happy that every happy person in the neighborhood is coming to hang out in your living room. It is getting really crowded, which is having a negative impact on the residents of the house. All these random happy people are also eating all your food. So, you decide to change the door entry policy, and not just let random happy people enter anymore, even if they are happy! Instead, you'd like to build a **Face verification** system so as to only let people from a specified list come in. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the door. The face recognition system then checks that they are who they claim to be. 3.1 - Face VerificationLet's build a database containing one encoding vector for each person allowed to enter the happy house. To generate the encoding we use `img_to_encoding(image_path, model)` which basically runs the forward propagation of the model on the specified image. Run the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.
###Code
database = {}
database["danielle"] = img_to_encoding("images/danielle.png", FRmodel)
database["younes"] = img_to_encoding("images/younes.jpg", FRmodel)
database["tian"] = img_to_encoding("images/tian.jpg", FRmodel)
database["andrew"] = img_to_encoding("images/andrew.jpg", FRmodel)
database["kian"] = img_to_encoding("images/kian.jpg", FRmodel)
database["dan"] = img_to_encoding("images/dan.jpg", FRmodel)
database["sebastiano"] = img_to_encoding("images/sebastiano.jpg", FRmodel)
database["bertrand"] = img_to_encoding("images/bertrand.jpg", FRmodel)
database["kevin"] = img_to_encoding("images/kevin.jpg", FRmodel)
database["felix"] = img_to_encoding("images/felix.jpg", FRmodel)
database["benoit"] = img_to_encoding("images/benoit.jpg", FRmodel)
database["arnaud"] = img_to_encoding("images/arnaud.jpg", FRmodel)
###Output
_____no_output_____
###Markdown
Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.**Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called "identity". You will have to go through the following steps:1. Compute the encoding of the image from image_path2. Compute the distance about this encoding and the encoding of the identity image stored in the database3. Open the door if the distance is less than 0.7, else do not open.As presented above, you should use the L2 distance (np.linalg.norm). (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.)
###Code
# GRADED FUNCTION: verify
def verify(image_path, identity, database, model):
"""
Function that verifies if the person on the "image_path" image is "identity".
Arguments:
image_path -- path to an image
identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
model -- your Inception model instance in Keras
Returns:
dist -- distance between the image_path and the image of "identity" in the database.
door_open -- True, if the door should open. False otherwise.
"""
### START CODE HERE ###
# Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
encoding = img_to_encoding(image_path, model)
# Step 2: Compute distance with identity's image (≈ 1 line)
dist = np.linalg.norm( (encoding - database[identity]) )
# Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
if dist < 0.7:
print("It's " + str(identity) + ", welcome home!")
door_open = True
else:
print("It's not " + str(identity) + ", please go away")
door_open = False
### END CODE HERE ###
return dist, door_open
###Output
_____no_output_____
###Markdown
Younes is trying to enter the Happy House and the camera takes a picture of him ("images/camera_0.jpg"). Let's run your verification algorithm on this picture:
###Code
verify("images/camera_0.jpg", "younes", database, FRmodel)
###Output
It's younes, welcome home!
###Markdown
**Expected Output**: **It's younes, welcome home!** (0.65939283, True) Benoit, who broke the aquarium last weekend, has been banned from the house and removed from the database. He stole Kian's ID card and came back to the house to try to present himself as Kian. The front-door camera took a picture of Benoit ("images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.
###Code
verify("images/camera_2.jpg", "kian", database, FRmodel)
###Output
It's not kian, please go away
###Markdown
**Expected Output**: **It's not kian, please go away** (0.86224014, False) 3.2 - Face RecognitionYour face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the house that evening he couldn't get in! To reduce such shenanigans, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the house, and the front door will unlock for them! You'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as another input. **Exercise**: Implement `who_is_it()`. You will have to go through the following steps:1. Compute the target encoding of the image from image_path2. Find the encoding from the database that has smallest distance with the target encoding. - Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding. - Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`. - Compute L2 distance between the target "encoding" and the current "encoding" from the database. - If this distance is less than the min_dist, then set min_dist to dist, and identity to name.
###Code
# GRADED FUNCTION: who_is_it
def who_is_it(image_path, database, model):
"""
Implements face recognition for the happy house by finding who is the person on the image_path image.
Arguments:
image_path -- path to an image
database -- database containing image encodings along with the name of the person on the image
model -- your Inception model instance in Keras
Returns:
min_dist -- the minimum distance between image_path encoding and the encodings from the database
identity -- string, the name prediction for the person on image_path
"""
### START CODE HERE ###
## Step 1: Compute the target "encoding" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)
encoding = img_to_encoding(image_path, model)
## Step 2: Find the closest encoding ##
# Initialize "min_dist" to a large value, say 100 (≈1 line)
min_dist = 100
# Loop over the database dictionary's names and encodings.
# The starting notebooks requested a tuple (name, db_enc) from database
# ... it gave an unpacking error so I just grabbed name and called the value later
for name in database:
# Compute L2 distance between the target "encoding" and the current "emb" from the database. (≈ 1 line)
dist = np.linalg.norm( (encoding - database[name]))
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)
if dist < min_dist:
min_dist = dist
identity = name
### END CODE HERE ###
if min_dist > 0.7:
print("Not in the database.")
else:
print ("it's " + str(identity) + ", the distance is " + str(min_dist))
return min_dist, identity
###Output
_____no_output_____
###Markdown
Younes is at the front-door and the camera takes a picture of him ("images/camera_0.jpg"). Let's see if your who_it_is() algorithm identifies Younes.
###Code
who_is_it("images/camera_0.jpg", database, FRmodel)
###Output
it's younes, the distance is 0.659393
|
GeoData/peek_geojson.ipynb | ###Markdown
Peek the Geo json File
###Code
import os
import json
# import urllib
import numpy as np
import geopandas as gpd
import pandas as pd
from urllib.request import urlopen
###Output
_____no_output_____
###Markdown
The Geo data is downloaded from aliyunThanks the website of [https://geo.datav.aliyun.com/areas_v2]
###Code
local_dir = 'C:\\Sync\\GeoData'
def full_url(adcode):
# Make full URL of the [adcode]
json_name = f'{adcode}_full.json'
return f'https://geo.datav.aliyun.com/areas_v2/bound/{json_name}'
def fetch_adcode(adcode=100000):
# Request on the website for fetching the geojson of [adcode]
url = full_url(adcode)
local = os.path.join(local_dir, os.path.basename(url))
if os.path.isfile(local):
print(f'Found file in {local}')
return json.load(open(local))
with urlopen(url) as response:
print(f'Requiring json from {url}')
obj = json.load(response)
with open(local, 'w') as f:
json.dump(obj, f)
return obj
def parse_features(geojson):
# Parse features section in [geojson],
# it supports to be the Geo features
features = gpd.GeoDataFrame.from_features(geojson['features'])
features = pd.DataFrame(features)
features['geometry'] = '--'
return features
###Output
_____no_output_____
###Markdown
Building a quick-to-check DataFrame of Chinese Geo DataThe DataFrame of main_df will store the features of every avaiable provinces, cities and districts in China.The DataFrame will start from the collection of provinces, and iteratively being filled based on the features.
###Code
geojson = fetch_adcode()
main_df = parse_features(geojson)
main_df
geojson = fetch_adcode()
main_df = parse_features(geojson)
known_adcodes = set()
while len(main_df) > len(known_adcodes):
for i in range(len(main_df)):
se = main_df.iloc[i]
adcode = se['adcode']
if adcode in known_adcodes:
continue
known_adcodes.add(adcode)
if se['childrenNum'] > 0:
main_df = pd.concat([main_df, parse_features(fetch_adcode(adcode))], axis=0)
main_df.index = range(len(main_df))
main_df.to_json(os.path.join(local_dir, 'main.json'))
main_df
# main_df['adcode'] = main_df.adcode.map(str)
# main_df = main_df.sort_values(by='adcode')
# main_df
df = pd.read_json(os.path.join(local_dir, 'main.json'))
df
###Output
_____no_output_____ |
means/statistics_all_datasets.ipynb | ###Markdown
Imports
###Code
from statistics import mean
import numpy as np
import pandas as pd
import math
import os
from collections import Counter
from functools import reduce
import glob
import copy
###Output
_____no_output_____
###Markdown
Opening the CSV files
###Code
dataframes = [pd.read_csv(file, sep=',', index_col=0) for file in sorted(glob.glob('../preprocessed_datasets' + "/*."+'csv'))]
cohorts = [file.strip(".csv") for file in sorted(os.listdir('../preprocessed_datasets'))]
# reduce to BL visit only
all_cohorts = dict()
for name, df in zip(cohorts, dataframes):
all_cohorts[name] = df.loc[df["Visit"] == 1]
###Output
_____no_output_____
###Markdown
Functions to perform essential calculations
###Code
def cat_stat_df(dfs, result):
"""Counting different categories, calculate the % of categorical features, store results in a df"""
categorical = {'APOE4': [2.0, 1.0], 'Sex': ['Female'], 'Diagnosis': ['CU', 'MCI', 'AD']}
column_cat = ['Sex', 'Diagnosis', 'APOE4']
for cohort in dfs:
if dfs[cohort].empty==True:
continue
else:
calc_dict = dict()
df = dfs[cohort]
for col in column_cat:
ca = Counter(df[col].dropna())
calc_dict[col] = ca
cohort_df = pd.DataFrame(calc_dict).transpose()
cohort_df = cohort_df.dropna(how='all')
cohort_df.loc[cohort] = cohort_df.sum()
for i in categorical:
if i == 'Diagnosis':
if i in cohort_df.index:
result.loc[cohort, categorical[i]] = cohort_df.loc[cohort, cohort_df.loc[i].notna()].astype(int)
result.loc[cohort, categorical[i]] = result.loc[cohort, categorical[i]].replace({np.nan: 0})
result.loc[cohort, 'n'] = int(sum(cohort_df.loc[cohort, cohort_df.loc[i].notna()]))
result.loc[cohort, 'Total'] = int(len(dfs[cohort].index))
else:
result.loc[cohort, i] = np.nan
result.loc[cohort, 'n'] = int(len(dfs[cohort].index))
elif i == 'APOE4':
if 'APOE4' in list(cohort_df.index.astype(str)):
if '2.0' not in list(cohort_df.columns.astype(str)) and '2' not in list(cohort_df.columns.astype(str)):
cohort_df[2.0] = np.nan
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]]) /
sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = np.nan
elif i == 'Sex':
if (i in cohort_df.index) & ("Female" in cohort_df.columns):
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]])
/ sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = 0
result.rename(columns={"Sex": "Female %", "APOE4": "APOE4 %"}, inplace=True)
return result
def num_stat_df(dfs, result_df):
"""Calculating std and mean and storing it in the result dataframe"""
column_names = ['Age', 'CDR', 'Education', 'MMSE', 'CDRSB', 'Hippocampus', 'A-beta', 'Ttau', 'Ptau']
for df in dfs:
dataset = dfs[df]
calc_dict = dict()
for col in column_names:
if (col in dataset.columns) and (dataset[col].notna().any()):
df_std = round(np.nanstd(dataset[col]), 1)
df_mean = round(np.nanmean(dataset[col]), 1)
dict_value = str(df_mean) + ' (' + str(df_std) + ')'
calc_dict[col] = dict_value
else:
calc_dict[col] = np.nan
for key in calc_dict:
result_df.loc[df, key] = calc_dict[key]
return result_df
###Output
_____no_output_____
###Markdown
Make an empty dataframe to fill in with the results
###Code
results = pd.DataFrame(index = all_cohorts.keys(), columns = [col for col in all_cohorts['AIBL'].columns])
results.index.name = 'Name of Dataset'
for i in ['CU', 'MCI', 'AD', 'Total']:
results[i] = np.nan
cat_stat_df(all_cohorts, results)
num_stat_df(all_cohorts, results)
results.drop(columns=['Diagnosis', 'Visit', 'Race', 'Months'], inplace=True)
results
###Output
_____no_output_____
###Markdown
Final table
###Code
results[['n', 'Total', 'CU', 'MCI', 'AD', 'Female %', 'Age', 'Education', 'MMSE', 'CDR', 'CDRSB', 'APOE4 %', 'Hippocampus']]
###Output
_____no_output_____ |
Fig1.ipynb | ###Markdown
Figure 1: Algorithms for 1/f-estimation
###Code
import fractions
import warnings
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as sig
import yaml
from fooof.plts.spectra import plot_spectrum
from fooof.sim.gen import gen_aperiodic
from fooof_modified import FOOOF, plot_annotated_peak_search_MG
from utils import elec_phys_signal, irasa
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Load params and make directory
###Code
yaml_file = open('params.yml')
parsed_yaml_file = yaml.load(yaml_file, Loader=yaml.FullLoader)
globals().update(parsed_yaml_file)
Path(fig_path).mkdir(parents=True, exist_ok=True)
###Output
_____no_output_____
###Markdown
Simulate signal
###Code
# Signal params
sample_rate = 2400 # Hz
window_seconds = 2 # seconds
welch_params_fooof = dict(fs=sample_rate, nperseg=sample_rate)
welch_params_irasa = dict(fs=sample_rate, nperseg=window_seconds*sample_rate)
# Aperiodic parameters
aperiodic_exponent_simulation = 1
# Periodic parameters
peak_center_freq = 10 # Hz
peak_amplitude = .02
peak_width = .001
periodic_params = [(peak_center_freq, peak_amplitude, peak_width)]
# Simulate Signal
tme_series_params = dict(exponent=aperiodic_exponent_simulation,
periodic_params=periodic_params)
aperiodic_signal, full_signal = elec_phys_signal(**tme_series_params)
# Increase arbitrary units
aperiodic_signal, full_signal = aperiodic_signal*1e6, full_signal*1e6
# Calc PSD
freqs_fooof, psd_full_fooof = sig.welch(full_signal, **welch_params_fooof)
freqs_irasa, psd_full_irasa = sig.welch(full_signal, **welch_params_irasa)
# Keep PSD within 3Hz-30Hz for visualization
highpass = 3
lowpass = 30
freq_mask_fooof = (freqs_fooof >= highpass) & (freqs_fooof <= lowpass)
freqs_fooof = freqs_fooof[freq_mask_fooof]
psd_full_fooof = psd_full_fooof[freq_mask_fooof]
freq_mask_irasa = (freqs_irasa >= highpass) & (freqs_irasa <= lowpass)
freqs_irasa = freqs_irasa[freq_mask_irasa]
psd_full_irasa = psd_full_irasa[freq_mask_irasa]
###Output
_____no_output_____
###Markdown
Apply fooof to simulated signal
###Code
fm = FOOOF(max_n_peaks=1, verbose=False) # initialize
fm.add_data(freqs_fooof, psd_full_fooof) # add data
# Fit the power spectrum model
freq_range = (highpass, lowpass)
fm.fit(freqs_fooof, psd_full_fooof, freq_range)
###Output
_____no_output_____
###Markdown
Extract initial fooof fit and initial flattened PSD
###Code
# Do an initial aperiodic fit -> a robust fit, that excludes outliers
init_ap_fit = gen_aperiodic(fm.freqs,
fm._robust_ap_fit(fm.freqs, fm.power_spectrum))
# Recompute the flattened spectrum using the initial aperiodic fit
init_flat_spec = fm.power_spectrum - init_ap_fit
init_flat_spec_lin = 10**fm.power_spectrum - 10**init_ap_fit
###Output
_____no_output_____
###Markdown
Apply IRASA to simulated signalIRASA code modified from https://raphaelvallat.com/yasa/build/html/index.html
###Code
# Set resampling factors
hset = np.array([1.3, 1.6, 2])
hset_inv = 1 / hset
# Set parameters and apply
irasa_params = dict(sf=sample_rate, band=freq_range,
win_sec=window_seconds, hset=hset)
IRASA = irasa(data=full_signal, **irasa_params)
freqs_irasa_full, psd_ap, psd_osc, params = IRASA
psd_ap, psd_osc = psd_ap[0], psd_osc[0] # only one spectrum fitted
###Output
_____no_output_____
###Markdown
Save IRASA's intermediate resampling steps for visualization
###Code
# Initialize arrays
window_samples = welch_params_irasa["nperseg"]
psds_resampled = np.zeros((len(hset), *psd_full_irasa.shape))
psds_up = np.zeros((len(hset), *psd_full_irasa.shape))
psds_dw = np.zeros((len(hset), *psd_full_irasa.shape))
for i, h in enumerate(hset):
# Get the upsampling/downsampling (h, 1/h) factors as integer
rat = fractions.Fraction(str(h))
up, down = rat.numerator, rat.denominator
# resample
data_up = sig.resample_poly(full_signal, up, down)
data_down = sig.resample_poly(full_signal, down, up, axis=-1)
# calc PSD
freqs_up, psd_up = sig.welch(data_up, h * sample_rate,
nperseg=window_samples)
freqs_dw, psd_dw = sig.welch(data_down, sample_rate / h,
nperseg=window_samples)
# save
psds_up[i, :] = psd_up[freq_mask_irasa]
psds_dw[i, :] = psd_dw[freq_mask_irasa]
# Geometric mean:
psds_resampled[i, :] = np.sqrt(psd_up * psd_dw)[freq_mask_irasa]
# Now we take the median PSD of all the resampling factors, which gives
# a good estimate of the aperiodic component of the PSD.
psd_median = np.median(psds_resampled, axis=0)
###Output
_____no_output_____
###Markdown
Plot functions for each subplot
###Code
def input_series(ax, duration=1, step=sample_rate//100):
"""Create dummy time series signal."""
time = np.arange(0, len(full_signal)/sample_rate, 1/sample_rate)
time_series = np.sin(2 * np.pi * peak_center_freq * time)
time_series += .01 * aperiodic_signal # add some 1/f
mask = [(time >= duration) & (time <= 2*duration)]
ax.plot(time[mask][::step],
time_series[mask][::step],
c_sim, lw=1, label="Signal")
ax.set(xticks=[], yticks=[], xticklabels=[], yticklabels=[])
ax.axis("off")
def input_psd(ax):
"""Plot input PSD."""
ax.loglog(freqs_fooof, psd_full_fooof, c_sim, lw=1, label="PSD")
ax.set(xticks=[], yticks=[], xticklabels=[], yticklabels=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.set_yticklabels([], minor=True)
ax.set_xticklabels([], minor=True)
ax.axis("off")
def fooof_1(ax, ybottom=1):
"""Plot initial fooof fit."""
plot_spectrum(fm.freqs, 10**fm.power_spectrum, log_freqs=False,
lw=1.5, color=c_sim, ax=ax)
plot_spectrum(fm.freqs, 10**init_ap_fit, log_freqs=False,
label='Initial Fit', color=c_fit, lw=2, alpha=1,
ls=(0, (3, 3)), ax=ax)
ax.grid(False)
ax.set_yscale("log")
ax.set_xscale("log")
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin / ybottom, ymax))
ax.axis("off")
leg = ax.legend(handlelength=2, handletextpad=.5, loc="lower center",
frameon=False)
leg.get_frame().set_alpha(None)
leg.get_frame().set_facecolor((0, 0, 1, 0))
for legobj in leg.legendHandles:
legobj.set_linewidth(1.5)
legobj.set_linestyle((.6, (3, 2)))
def fooof_2(ax, yscale=1.5, ybottom=1):
"""Plot flattended spectrum."""
plot_spectrum(fm.freqs, init_flat_spec, log_freqs=False,
label='Flattened PSD', lw=1.5, color=c_flat, ax=ax)
ax.set_xscale("log")
ax.grid(False)
ax.axis("off")
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin/ybottom, ymax))
ymin, ymax = ax.get_ylim()
ylim = ax.set_ylim([ymin, yscale*ymax])
ax.get_legend().remove()
leg = ax.legend(handlelength=1, handletextpad=.5, frameon=False,
loc="lower center")
for legobj in leg.legendHandles:
legobj.set_linewidth(1.5)
leg.get_frame().set_alpha(None)
leg.get_frame().set_facecolor((0, 0, 1, 0))
return ylim
def fooof_3(ax, ylim=None):
"""Plot fooof Gauss fits."""
plot_annotated_peak_search_MG(fm, 0, ax, lw=1.5,
markersize=10,
c_flat=c_flat, c_gauss=c_osc,
c_thresh=c_thresh, label_flat=None,
label_rthresh=None,
anno_rthresh_font=legend_fontsize_small)
ax.set_xscale("log")
ax.grid(False)
ax.axis("off")
if ylim:
ax.set_ylim(ylim)
leg = ax.legend(handlelength=1.5, frameon=False, loc=(.17, 0),
handletextpad=.2)
leg.get_frame().set_alpha(None)
leg.get_frame().set_facecolor((0, 0, 1, 0))
for legobj in leg.legendHandles:
legobj.set_linewidth(2)
legobj.set_linestyle((0, (1, 1)))
ax.set_title(None)
def fooof_4(ax, ylim=None, ybottom=1):
"""Plot fooof Gauss fit removal."""
plot_annotated_peak_search_MG(fm, 1, ax,
lw=1.5, markersize=10,
c_flat=c_flat, c_gauss=c_osc,
c_thresh=c_thresh, anno_rthresh_font=None)
ax.set_xscale("log")
ax.grid(False)
ax.axis("off")
if ylim:
ax.set_ylim(ylim)
ax.set_title(None)
def aperiodic_fit(ax, ybottom=1):
"""Plot final aperiodic fit."""
plot_spectrum(fm.freqs, 10**fm._spectrum_peak_rm, log_freqs=False,
label='Aperiodic PSD', color=c_aperiodic, lw=3, ax=ax)
plot_spectrum(fm.freqs, 10**fm._ap_fit, log_freqs=False,
label='Aperiodic Fit', lw=2,
color=c_fit, alpha=1, ls=(0, (3, 3)), ax=ax)
ax.set_yscale("log")
ax.set_xscale("log")
ax.grid(False)
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin/ybottom, ymax))
leg = ax.legend(handlelength=2, handletextpad=.5, frameon=False,
loc="lower center")
leg.legendHandles[0].set_linewidth(1.5)
leg.legendHandles[1].set_linewidth(1.5)
leg.legendHandles[1].set_linestyle((0, (2.5, 2)))
leg.get_frame().set_alpha(None)
leg.get_frame().set_facecolor((0, 0, 1, 0))
ax.axis("off")
def IRASA_res1(ax, ybottom=None, ytop=None):
"""Plot IRASA resampling h1."""
ax.loglog(freqs_irasa, psds_up[0], c_h1, lw=1)
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--",
label=rf"$h={hset[0]:.1f}$")
ax.loglog(freqs_irasa, psds_dw[0], c_h1, lw=1,
label=rf"$h=\frac{{{1}}}{{{hset[0]:.1f}}}$")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
ax.set_ylabel("Resampled\nPSD pairs", labelpad=-12, y=.4,
fontsize=legend_fontsize_small)
ax.set_title(f"h={hset[0]:.1f}",
y=.65, fontsize=legend_fontsize_small)
ymin, ymax = ax.get_ylim()
if ybottom and not ytop:
ax.set_ylim((ymin/ybottom, ymax))
if ytop and not ybottom:
ax.set_ylim((ymin, ymax/ytop))
if ytop and ybottom:
ax.set_ylim((ymin/ybottom, ymax/ytop))
def IRASA_res2(ax, ybottom=None, ytop=None):
"""Plot IRASA resampling h2."""
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--")
ax.loglog(freqs_irasa, psds_up[1], c_h2, lw=1,
label=rf"$h={hset[1]:.1f}$")
ax.loglog(freqs_irasa, psds_dw[1], c_h2, lw=1,
label=rf"$h=\frac{{{1}}}{{{hset[1]:.1f}}}$")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
ax.set_title(f"h={hset[1]:.1f}",
y=.65, fontsize=legend_fontsize_small)
ymin, ymax = ax.get_ylim()
if ybottom and not ytop:
ax.set_ylim((ymin/ybottom, ymax))
if ytop and not ybottom:
ax.set_ylim((ymin, ymax/ytop))
if ytop and ybottom:
ax.set_ylim((ymin/ybottom, ymax/ytop))
def IRASA_res3(ax, ybottom=None, ytop=None):
"""Plot IRASA resampling h3."""
ax.loglog(freqs_irasa, psds_up[2], c_h3, lw=1,
label=rf"$h={hset[2]:.0f}$")
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--")
ax.loglog(freqs_irasa, psds_dw[2], c_h3, lw=1,
label=rf"$h=\frac{{{1}}}{{{hset[2]:.0f}}}$")
ax.set_title(f"h={hset[2]:.0f}",
y=.65, fontsize=legend_fontsize_small)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
ymin, ymax = ax.get_ylim()
if ybottom and not ytop:
ax.set_ylim((ymin/ybottom, ymax))
if ytop and not ybottom:
ax.set_ylim((ymin, ymax/ytop))
if ytop and ybottom:
ax.set_ylim((ymin/ybottom, ymax/ytop))
def IRASA_mean1(ax, ybottom=None, lw_median=.1):
"""Plot IRASA geometric mean h1."""
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--")
ax.loglog(freqs_irasa, psds_resampled[0], c_h1, lw=1,
label=f"h={hset[i]}")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(lw_median)
ax.spines["top"].set_linewidth(lw_median)
ax.spines["bottom"].set_linewidth(lw_median)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
ax.set_ylabel("Geometric\nmean", labelpad=-12, y=.5,
fontsize=legend_fontsize_small)
if ybottom:
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin/ybottom, ymax))
def IRASA_mean2(ax, ybottom=None, lw_median=.1):
"""Plot IRASA geometric mean h2."""
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--")
ax.loglog(freqs_irasa, psds_resampled[1], c_h2, lw=1,
label=f"h={hset[i]}")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_linewidth(lw_median)
ax.spines["bottom"].set_linewidth(lw_median)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
if ybottom:
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin/ybottom, ymax))
def IRASA_mean3(ax, ybottom=None, lw_median=.1):
"""Plot IRASA geometric mean h3."""
ax.loglog(freqs_irasa, psd_full_irasa, c_sim, lw=1, ls="--")
ax.loglog(freqs_irasa, psds_resampled[2], c_h3, lw=1,
label=f"h={hset[i]}")
ax.spines["right"].set_linewidth(lw_median)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_linewidth(lw_median)
ax.spines["bottom"].set_linewidth(lw_median)
ax.set(xticks=[], yticks=[])
ax.set_yticks([], minor=True)
ax.set_xticks([], minor=True)
ax.patch.set_visible(False)
ymin, ymax = ax.get_ylim()
if ybottom:
ax.set_ylim((ymin/ybottom, ymax))
freq = 5
ax.annotate(f"{freq}Hz ",
xy=(freq, psd_full_irasa[freqs_irasa == freq][0]),
xytext=(freq, ymin*.9), fontsize=legend_fontsize_small, ha="center",
arrowprops=dict(arrowstyle="-", lw=1, ls=":", shrinkA=0))
freq = 10
ax.annotate(f"{freq}Hz",
xy=(freq, psd_full_irasa[freqs_irasa == freq][0]),
xytext=(freq, ymin*.9), fontsize=legend_fontsize_small, ha="center",
arrowprops=dict(arrowstyle="-", lw=1, ls=":", shrinkA=0))
freq = 20
ax.annotate(f" {freq}Hz",
xy=(freq, psd_full_irasa[freqs_irasa == freq][0]),
xytext=(freq, ymin*.9), fontsize=legend_fontsize_small, ha="center",
arrowprops=dict(arrowstyle="-", lw=1, ls=":", shrinkA=0))
def make_frame(ax, c, title=None, lw_box=0, ls_box="-", **kwargs):
"""Make plotting frames."""
ax = fig.add_subplot(ax, **kwargs)
ax.tick_params(axis='both', which='both', bottom=0, left=0,
labelbottom=0, labelleft=0)
ax.set_facecolor(c)
ax.patch.set_alpha(.6)
ax.spines["right"].set_linestyle(ls_box)
ax.spines["left"].set_linestyle(ls_box)
ax.spines["top"].set_linestyle(ls_box)
ax.spines["bottom"].set_linestyle(ls_box)
ax.spines["right"].set_linewidth(lw_box)
ax.spines["left"].set_linewidth(lw_box)
ax.spines["top"].set_linewidth(lw_box)
ax.spines["bottom"].set_linewidth(lw_box)
if title:
ax.set_title(title)
return ax
###Output
_____no_output_____
###Markdown
Plot settings
###Code
# Mpl settings
panel_fontsize_small = 7
legend_fontsize_small = 5
mpl.rcParams['font.size'] = panel_fontsize_small
mpl.rcParams['legend.fontsize'] = legend_fontsize_small
mpl.rcParams["axes.spines.right"] = True
mpl.rcParams["axes.spines.top"] = True
mpl.rcParams["axes.spines.left"] = True
mpl.rcParams["axes.spines.bottom"] = True
# Arrows
arr_props = dict(facecolor='k', width=.3, headwidth=2, headlength=2, shrink=0)
arr_props_round1 = dict(facecolor='k', width=.00001, headwidth=2, headlength=2,
shrink=0, connectionstyle="arc3,rad=-.3")
arr_props_round2 = dict(facecolor='k', width=.00001, headwidth=1.7,
headlength=1.7, shrink=0,
connectionstyle="arc3,rad=-.3",
lw=.1, ls=(0, (10, 10)))
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
Figure 1
###Code
fig = plt.figure(figsize=(fig_width, 3.5))
gs = fig.add_gridspec(nrows=2, ncols=3,
width_ratios=[1, 3, 1], height_ratios=[5, 4],
wspace=.3, hspace=.3)
# Input gridspec
gs_input = gs[:, 0].subgridspec(2, 1)
input_frame = make_frame(gs_input[:], c_box, title="Input")
inp_margins = dict(xmargin=.4, ymargin=.4)
ax_inp_ser = fig.add_subplot(gs_input[0], **inp_margins)
ax_inp_PSD = fig.add_subplot(gs_input[1], **inp_margins)
# IRASA algorithm gridspec
irasa_frame = make_frame(gs[0, 1], c_box, title="IRASA")
gs_IRASA = gs[0, 1].subgridspec(2, 3, hspace=0, wspace=0)
IR_margins = dict(xmargin=.5, ymargin=.4)
gs_IR11 = fig.add_subplot(gs_IRASA[0, 0], **IR_margins)
gs_IR12 = fig.add_subplot(gs_IRASA[1, 0], **IR_margins)
gs_IR21 = fig.add_subplot(gs_IRASA[0, 1], **IR_margins)
gs_IR22 = fig.add_subplot(gs_IRASA[1, 1], **IR_margins)
gs_IR31 = fig.add_subplot(gs_IRASA[0, 2], **IR_margins)
gs_IR32 = fig.add_subplot(gs_IRASA[1, 2], **IR_margins)
# fooof gridspecs
gs_fooof = gs[1, 1].subgridspec(1, 2, width_ratios=[2, 1])
fooof_frame = make_frame(gs_fooof[:, :], c_box, title="FOOOF")
gs_fooof1 = gs_fooof[0].subgridspec(1, 2, hspace=0)
fooof_margins = dict(xmargin=.4, ymargin=.6)
ax_fooof1 = fig.add_subplot(gs_fooof1[0], **fooof_margins)
ax_fooof2 = fig.add_subplot(gs_fooof1[1], **fooof_margins)
gs_fooof2 = gs_fooof[1].subgridspec(2, 1, hspace=0)
fooof_margins = dict(xmargin=.4)
ax_fooof3 = fig.add_subplot(gs_fooof2[0], **fooof_margins)
ax_fooof4 = fig.add_subplot(gs_fooof2[1], **fooof_margins)
# Output gridspec
gs_output = gs[:, 2].subgridspec(3, 1, height_ratios=[1, 3, 1])
output_frame = make_frame(gs_output[1], c_box, title="Output")
our_margins = dict(xmargin=.4, ymargin=.3)
ap = fig.add_subplot(gs_output[1], **our_margins)
# Add Plots
input_series(ax_inp_ser, duration=.5, step=24)
input_psd(ax_inp_PSD)
IRASA_res1(gs_IR11, ytop=.5)
IRASA_res2(gs_IR21, ytop=.5)
IRASA_res3(gs_IR31, ytop=.5)
IRASA_mean1(gs_IR12, ybottom=1.6)
IRASA_mean2(gs_IR22, ybottom=1.6)
IRASA_mean3(gs_IR32, ybottom=1.6)
fooof_1(ax_fooof1, ybottom=1.5)
ylim = fooof_2(ax_fooof2, yscale=1.0, ybottom=.9)
fooof_3(ax_fooof3, ylim)
fooof_4(ax_fooof4, ylim)
aperiodic_fit(ap, ybottom=2)
# Panel labels
panel_dic = dict(fontweight="bold", fontsize=panel_fontsize_small,
x=.03, y=.97, va="top")
ax_inp_ser.text(s="a", transform=ax_inp_ser.transAxes, **panel_dic)
gs_IR11.text(s="b", transform=gs_IR11.transAxes, **panel_dic)
gs_IR12.text(s="c", transform=gs_IR12.transAxes, **panel_dic)
ap.text(s="d", transform=ap.transAxes, **panel_dic)
ax_inp_PSD.text(s="e", transform=ax_inp_PSD.transAxes, **panel_dic)
ax_fooof1.text(s="f", transform=ax_fooof1.transAxes, **panel_dic)
ax_fooof2.text(s="g", transform=ax_fooof2.transAxes, **panel_dic)
ax_fooof3.text(s="h", transform=ax_fooof3.transAxes, **panel_dic)
# Add annotations
ax_inp_ser.annotate(s="", xy=(.5, -.2),
xytext=(.5, 0),
xycoords='axes fraction',
annotation_clip=False, arrowprops=arr_props)
ax_inp_ser.text(s="PSD", x=.6, y=-.11, transform=ax_inp_ser.transAxes,
fontsize=legend_fontsize_small)
ax_fooof1.annotate(s="", xy=(-.1, .5),
xytext=(-.5, .5),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
ax_fooof1.annotate(s="", xy=(1.2, .5),
xytext=(.95, .5),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
ax_fooof2.annotate(s="", xy=(1.2, .5),
xytext=(.95, .5),
xycoords='axes fraction',
annotation_clip=False, arrowprops=arr_props)
ax_fooof3.annotate(s="", xy=(.85, -.4),
xytext=(.85, 0),
xycoords='axes fraction',
annotation_clip=False, arrowprops=arr_props_round1)
ax_fooof3.text(s="Subtract\npeak\nfrom PSD",
x=1.05, y=.5,
transform=ax_fooof3.transAxes,
fontsize=legend_fontsize_small, va="top")
ax_fooof4.annotate(s="", xy=(.12, 1),
xytext=(.12, .6),
xycoords='axes fraction',
annotation_clip=False, arrowprops=arr_props_round2)
ax_fooof4.text(s="repeat", x=.15, y=.85, transform=ax_fooof4.transAxes,
fontsize=legend_fontsize_small, va="top")
gs_IR11.annotate(s="", xy=(-.1, .5),
xytext=(-.4, .5),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR11.annotate(s="", xy=(.51, .1),
xytext=(.51, .25),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR21.annotate(s="", xy=(.51, .1),
xytext=(.51, .25),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR31.annotate(s="", xy=(.51, .1),
xytext=(.51, .25),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR12.annotate(s="", xy=(1.05, .5),
xytext=(.9, .5),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR22.annotate(s="", xy=(1.05, .5),
xytext=(.9, .5),
xycoords='axes fraction',
annotation_clip=False, arrowprops=arr_props)
gs_IR32.annotate(s="", xy=(1.45, .5),
xytext=(1.05, .5),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
gs_IR32.text(s="median",
x=1.05, y=.7,
va="top",
fontsize=legend_fontsize_small,
transform=gs_IR32.transAxes)
ax_fooof3.annotate(s="", xy=(1.5, .6),
xytext=(1.05, .6),
xycoords='axes fraction',
annotation_clip=False,
arrowprops=arr_props)
plt.savefig(fig_path + "Fig1.pdf", bbox_inches="tight")
plt.savefig(fig_path + "Fig1.png", dpi=1000, bbox_inches="tight")
plt.show()
###Output
_____no_output_____
###Markdown
RNA-SeQC 2: main figuresThis notebook contains the code for generating Fig. 1 from the manuscript. A notebook for the supplemental figures is provided [separately](Supplementary_Figures.ipynb).Metrics tables from various datasets used in the paper are provided in the [data](data) directory of this repository.Sample annotations from GTEx and read counts from RNA-SeQC 2 can be downloaded by uncommenting and running the cell below.
###Code
# !wget -P data/ https://personal.broadinstitute.org/francois/rnaseqc2_paper/GTEx_v8_18655_samples_RNASeQCv2.3.6.gene_reads.gct.gz
# !wget -P data/ https://storage.googleapis.com/gtex_analysis_v8/annotations/GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt
import numpy as np
import pandas as pd
import scipy.stats as stats
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
matplotlib.font_manager._rebuild()
plt.rcParams.update({'font.family': 'Helvetica', 'svg.fonttype':'none', 'pdf.fonttype':42})
import seaborn as sns
import qtl.plot
import qtl.pca
import rnaseqc.plot
if not os.path.exists:
os.mkdir('pdf')
###Output
_____no_output_____
###Markdown
Load inputs
###Code
gtex_counts_df = pd.read_parquet('data/GTEx_v8_18655_samples_RNASeQCv2.3.6.gene_reads.parquet')
gtex_metrics_df = pd.read_csv('data/GTEx_v8_18655_samples_RNASeQCv2.3.6.metrics.tsv.gz',
sep='\t', index_col=0)
ffpe_metrics_df = pd.read_csv('data/VanAllen2015_32_samples_RNASeQCv2.3.6.metrics.tsv.gz',
sep='\t', index_col=0)
entex_metrics_df = pd.read_csv('data/ENTEx_97_samples_RNASeQCv2.3.6.metrics.tsv.gz',
sep='\t', index_col=0)
cptac2_metrics_df = pd.read_csv('data/CPTAC2_CRC_105_samples_RNASeQCv2.3.6.metrics.tsv.gz',
sep='\t', index_col=0)
cptac3_metrics_df = pd.read_csv('data/CPTAC3_LUAD_214_samples_RNASeQCv2.3.6.metrics.tsv.gz',
sep='\t', index_col=0)
sample_df = pd.read_csv('data/GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt',
sep='\t', index_col=0, low_memory=False)
sample_df = sample_df[(sample_df['SMGEBTCHT']=='TruSeq.v1') &
sample_df.index.str.startswith('GTEX')]
sample_df['tissue_id'] = sample_df['SMTSD'].apply(lambda x: x.replace('(','').replace(')','').replace(' - ', ' ').replace(' ', '_'))
###Output
_____no_output_____
###Markdown
Panel A: metrics across different protocols
###Code
sample_ids = (sample_df[sample_df['tissue_id'] == 'Pancreas'].index.tolist()[:100] +
sample_df[sample_df['tissue_id'] == 'Cells_Cultured_fibroblasts'].index.tolist()[:100])
metrics_all_df = pd.concat([
ffpe_metrics_df,
cptac2_metrics_df,
cptac3_metrics_df,
gtex_metrics_df.loc[sample_ids],
entex_metrics_df,
])
cohort_s = pd.Series('GTEx, polyA+', index=metrics_all_df.index)
cohort_s[entex_metrics_df.index] = 'GTEx, total RNA'
cohort_s[cptac2_metrics_df.index] = 'CPTAC, polyA+'
cohort_s[cptac3_metrics_df.index] = 'CPTAC, total RNA'
cohort_s[ffpe_metrics_df.index] = 'FFPE, capture'
cohort_colors = {
'GTEx, polyA+': hsv_to_rgb([0.55, 0.8, 0.6]),
'GTEx, total RNA': hsv_to_rgb([0.55, 0.8, 0.8]),
'CPTAC, polyA+': hsv_to_rgb([0.05, 0.8, 0.6]),
'CPTAC, total RNA': hsv_to_rgb([0.05, 0.8, 0.8]),
'FFPE, capture': hsv_to_rgb([0.25, 0.8, 0.6]),
}
ylim_dict = {
'Duplicate Rate of Mapped': [0, 1],
'Exonic Rate': [0, 1],
'Intronic Rate': [0, 1],
}
cohorts = list(cohort_colors)
for k,metric in enumerate(['Exonic Rate', 'Intronic Rate', 'Duplicate Rate of Mapped']):
v = metrics_all_df[metric].copy()
v[v == 0] = np.NaN
ax = rnaseqc.plot.metrics(v, cohort_s=cohort_s, cohort_colors=cohort_colors,
cohort_order=cohorts, ylim=ylim_dict[metric],
ms=12, alpha=0.5, plot_density=False,
ah=1, dt=0.25, db=0.5, dl=0.75, rasterized=False,
aw=1.25, ds=0.25, daw=0.5, dr=0.1)
ax.set_xticks(np.arange(0, 800, 200))
ax.spines['left'].set_position(('outward', 6))
ax.spines['bottom'].set_position(('outward', 3))
ax.set_xlabel('Samples', fontsize=12)
title = metric.replace('Ambiguous Alignment', 'Amb. Align.').replace(
'Duplicate Rate of Mapped', 'Duplicate Rate')
ax.set_title(title.replace(' Rate', '').replace('Duplicate', 'Duplicates'), fontsize=12)
if k == 0:
ax.set_ylabel('Alignment rate', fontsize=12)
if k != 1:
ax.set_xlabel(None)
if k > 0:
ax.spines['left'].set_visible(False)
ax.set_yticks([])
ax.set_ylabel(None)
plt.savefig(f"pdf/Fig1A.protocols.{metric.replace(' ','_')}.pdf", dpi=300)
ax = qtl.plot.setup_figure(0, 1, xspace=[0.05, 1.5], yspace=[0.5, 0.125])
qtl.plot.format_plot(ax, hide=['top', 'bottom', 'left', 'right'])
ax.set_xticks([])
ax.set_yticks([])
lg = [ax.scatter([],[], s=30, c=[cohort_colors[k]], lw=0.5, edgecolor='none', label=k) for k in cohorts]
leg = ax.legend(lg, cohorts, fontsize=10, labelspacing=None, handletextpad=0.4,
handlelength=1, loc='upper left', borderpad=None, borderaxespad=0, bbox_to_anchor=(1.1,1))
plt.savefig('pdf/protocols.legend.pdf')
###Output
_____no_output_____
###Markdown
Panel B: RIN vs. 3' bias & ischemic time
###Code
tissue_id = 'Adrenal_Gland'
sample_ids = sample_df[sample_df['tissue_id'] == tissue_id].index
rin = sample_df.loc[sample_ids, 'SMRIN']
bias = gtex_metrics_df.loc[sample_ids, "Median 3' bias"]
isch = sample_df.loc[sample_ids, 'SMTSISCH'] / 60
ax, cax = qtl.plot.setup_figure(2, 2, xspace=[0.75, 0.5],
colorbar=True, ds=0.05, cw=0.1, ct=0.1)
h = ax.scatter(rin, bias, c=isch, cmap=plt.cm.GnBu,
clip_on=False, s=36, edgecolor='k',lw=0.5,
vmin=0, vmax=24)
ax.set_xlabel('RIN', fontsize=12)
ax.set_ylabel("Median 3' bias", fontsize=12)
ax.set_xticks(np.arange(0,11))
ax.set_xlim([5.5, 10])
qtl.plot.format_plot(ax, fontsize=10)
ax.spines['left'].set_position(('outward', 6))
ax.spines['bottom'].set_position(('outward', 6))
hc = plt.colorbar(h, cax=cax)
hc.set_ticks([0,12,24])
plt.savefig(f'pdf/{tissue_id}.rin_vs_bias.scatter.pdf')
###Output
_____no_output_____
###Markdown
Panel C: 3' bias across different tissues
###Code
sample_ids = sample_df[sample_df['tissue_id'].isin(
['Liver', 'Pancreas', 'Lung', 'Cells_Cultured_fibroblasts', 'Brain_Cortex']
)].index
# sort tissues by average RIN
median_s = sample_df.loc[sample_ids].groupby('tissue_id').apply(
lambda x: x['SMRIN'].median()).sort_values(ascending=False)
order = median_s.index
abbrv_dict = gtex_metrics_df[['tissue_id', 'tissue_abbrv']].drop_duplicates().set_index('tissue_id')['tissue_abbrv'].to_dict()
order = order.map(abbrv_dict)
num_s = sample_df.loc[sample_ids].groupby('tissue_id').apply(len).rename(index=abbrv_dict)
colors = {
'LIVER': '#AABB66',
'PNCREAS': '#995522',
'LUNG': '#99FF00',
'FIBRBLS': '#AAEEFF',
'BRNCTXA': '#EEEE00',
}
ax = qtl.plot.setup_figure(2,2, xspace=[0.75, 0.25])
ax.margins(0.02)
sns.violinplot(x='tissue_abbrv', y="Median 3' bias",
data=gtex_metrics_df.loc[sample_ids], ax=ax,
palette=colors, order=order,
scale='width', width=0.66, linewidth=1.25, saturation=1)
ax.set_xlabel(None)
ax.plot([-0.5,4.5], [0.5,0.5], '--', c=[0.6]*3, zorder=-10)
# median RIN
for k,i in enumerate(median_s.values):
ax.text(k, 1.04, i, ha='center', fontsize=10)
qtl.plot.format_plot(ax, fontsize=10, x_offset=6, y_offset=6)
ax.set_ylim([ax.get_ylim()[0], 1])
ax.set_ylabel("Median 3' bias", fontsize=12)
ax.set_xlim([-0.5, 4.5])
ax.set_xticklabels(order, rotation=30, ha='right');
ax.set_xticklabels(order +' ('+num_s[order].astype(str)+')',
rotation=25, ha='right')
plt.savefig('pdf/bias_distributions.pdf')
###Output
_____no_output_____
###Markdown
Panel D: correlation of 3' bias with PCs
###Code
tissue_id = 'Colon_Sigmoid'
sample_ids = sample_df[sample_df['tissue_id'] == tissue_id].index
# compute PCs
pc_df, pve_s = qtl.pca.get_pcs(gtex_counts_df[sample_ids])
ax, cax = qtl.plot.setup_figure(2, 2, xspace=[0.75, 0.5],
colorbar=True, ds=0.05, cw=0.1, ct=0.1)
h = ax.scatter(pc_df['PC1'], pc_df['PC2'],
c=gtex_metrics_df.loc[sample_ids, "Median 3' bias"],
cmap=plt.cm.GnBu,
clip_on=False, s=36, edgecolor='k',lw=0.5,
)
ax.set_xlabel(f"PC1 ({pve_s['PC1']:.2f}%)", fontsize=12)
ax.set_ylabel(f"PC2 ({pve_s['PC2']:.2f}%)", fontsize=12)
ax.set_yticks([-0.1, 0.0, 0.1])
qtl.plot.format_plot(ax, fontsize=10)
ax.spines['left'].set_position(('outward', 6))
ax.spines['bottom'].set_position(('outward', 6))
hc = plt.colorbar(h, cax=cax)
hc.set_ticks([0.6,0.7,0.8])
plt.savefig(f'pdf/{tissue_id}.PCA_bias.pdf')
r = stats.pearsonr(pc_df['PC2'], gtex_metrics_df.loc[sample_ids, "Median 3' bias"])[0]
print(f'PC2: R2 = {r**2:.2f}')
###Output
PC2: R2 = 0.64
###Markdown
Panel E: FFPE data
###Code
ax, cax = rnaseqc.plot.detection_bias(ffpe_metrics_df, bias_metric="Median Exon CV", ct=0.1)
cax.set_ylabel('')
plt.savefig('pdf/FFPE.genes_vs_CV.scatter.pdf')
###Output
_____no_output_____
###Markdown
Figure 1 Anisotropic KDE as density estimate for filaments Load dependencies:
###Code
import pandas as pd
import numpy as np
from skimage.filters import gaussian
from skimage import exposure
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Set pixel size and load file, filter outer region and uncertain localizations:
###Code
pixel_size = 10.0; # nm
filename = 'data/spot1.csv'
data = pd.read_csv(filename,delimiter=',')
data = data.query('1000<`x [nm]`<17000 and 1000<`y [nm]`<17000 and `uncertainty_xy [nm]`<50')
###Output
_____no_output_____
###Markdown
Generate 2D image of this localization dataset:
###Code
x = data["x [nm]"].to_numpy()
y = data["y [nm]"].to_numpy()
xbins = np.arange(x.min(), x.max()+1, pixel_size)
ybins = np.arange(y.min(), y.max()+1, pixel_size)
img,xe,ye = np.histogram2d(y,x,bins=(ybins,xbins));
img[img<2]=0
s = 0.5;
img_filt = gaussian(img,sigma=(s,s));
###Output
_____no_output_____
###Markdown
Show overview with scale bar:
###Code
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(img_filt),cmap=plt.get_cmap("magma"))
plt.plot((1300,1500), (1500,1500), color='white', linewidth=10)
plt.axis("off")
plt.title("Figure 1b")
plt.show()
###Output
_____no_output_____
###Markdown
Histogram rendering of a complete field of view of a reconstructed dSTORM image of microtubules in a COS7 cell, scale bar = 2 μm.---- Cut out small subset for zoomed in panels:
###Code
subset = data.query('11000<`x [nm]`<12560 and 11000<`y [nm]`<12560 and `uncertainty_xy [nm]`<50')
###Output
_____no_output_____
###Markdown
Generate 2D histogram of small subset:
###Code
x = subset["x [nm]"].to_numpy()
y = subset["y [nm]"].to_numpy()
xbins = np.arange(x.min(), x.max()+1, pixel_size)
ybins = np.arange(y.min(), y.max()+1, pixel_size)
subimg,xe,ye = np.histogram2d(y,x,bins=(ybins,xbins));
subimg[subimg<2]=0
###Output
_____no_output_____
###Markdown
Show small cut-out for illustration of method:
###Code
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(subimg[60:110,100:150]),cmap=plt.get_cmap("magma"))
plt.axis("off")
plt.title("Figure 1a, top")
plt.show()
###Output
_____no_output_____
###Markdown
Same in inverted colormap for illustrating the anisotropic Gaussians:
###Code
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(subimg[60:110,100:150]),cmap=plt.get_cmap("Blues"))
plt.axis("off")
plt.title("Figure 1a, bottom")
plt.show()
###Output
_____no_output_____
###Markdown
Plot small image of original unfiltered histogram with scale bar:
###Code
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(subimg),cmap=plt.get_cmap("magma"))
plt.plot((125,145), (145,145), color='white', linewidth=15)
plt.axis("off")
plt.title("Figure 1c original")
plt.show()
###Output
_____no_output_____
###Markdown
Filter with different Gaussian sigma:
###Code
from skimage.filters import gaussian
s = 3;
subimg_filt = gaussian(subimg,sigma=(s,s));
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(subimg_filt),cmap=plt.get_cmap("magma"))
plt.axis("off")
plt.title("Figure 1c top, $\sigma$ = "+str(s))
plt.show()
###Output
_____no_output_____
###Markdown
---- Calculate anisotropic KDE on enlarged image with 100 pix padding:
###Code
from aniso_kde import kde_image
subimg_kde = img_filt[900:1256,900:1256]
s_kde = 1
subimg_kde = kde_image(subimg_kde,11,s_kde);
###Output
_____no_output_____
###Markdown
Remove padded regions:
###Code
subimg_kde = subimg_kde[101:-100,101:-100]
###Output
_____no_output_____
###Markdown
Show resulting image:
###Code
plt.figure(figsize=(7,7))
plt.imshow(exposure.equalize_hist(subimg_kde),cmap=plt.get_cmap("magma"))
#plt.plot((125,145), (145,145), color='white', linewidth=15)
plt.axis("off")
plt.title("Figure 1c bottom, s = "+str(s_kde))
plt.show()
###Output
_____no_output_____
###Markdown
Plotting boundaires of HxC planes with different number of bins.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
lag = 512 # {32, 64, 128, 256, 512} Choose one of them
base = pd.read_pickle('./pkl_datasets/mamiraua_dataset_ACF_' + str(lag) + '.gzip')
plt.figure(figsize=(24,10))
plt.rc('font', size=22)
plt.rc('axes', titlesize=22)
plt.subplot(1,2,1)
lags = [32, 64, 128, 256, 512]
for lag in lags:
cotas = pd.read_csv('./boundary_files/Cotas_HxC_bins_' + str(int(lag)) + '.csv')
noise = pd.read_csv('./coloredNoises/coloredNoises_' + str(int(lag)) + '.csv')
if lag == 32:
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises')
else:
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = '')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = '')
plt.text(0.7, 0.475, '512', fontsize= 18)
plt.text(0.7, 0.445, '256', fontsize= 18)
plt.text(0.7, 0.415, '128', fontsize= 18)
plt.text(0.7, 0.376, '64', fontsize= 18)
plt.text(0.7, 0.34, '32', fontsize= 18)
plt.text(0.58, 0.27, '512', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.6, 0.254, '256', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.62, 0.238, '128', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.64, 0.223, '64', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.66, 0.207, '32', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.xlim([0, 1])
plt.ylim([0, np.max(cotas['Complexity'])+0.01])
plt.ylabel('Complexity [C]')
plt.xlabel('Entropy [H]')
plt.legend(loc = 'upper left', frameon=False)
plt.title('a)')
plt.text(0.66, 0.015, 'disorder', fontsize= 22)
plt.arrow(0.15, 0.01, 0.705, 0, head_width=0.015, head_length=0.015, linewidth=1, length_includes_head=True)
plt.text(0.2, 0.055, 'order', fontsize= 22)
plt.arrow(0.85, 0.05, -0.70, 0, head_width=0.015, head_length=0.015, linewidth=1, length_includes_head=True)
plt.subplot(1,2,2)
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises')
plt.xlim([0, 1])
plt.ylim([0, np.max(cotas['Complexity'])+0.01])
plt.ylabel('Complexity [C]')
plt.xlabel('Entropy [H]')
plt.legend(loc = 'upper left', frameon=False)
plt.scatter(base['H'], base['C'], marker='.', s=15, c=base['JSD'], norm=plt.Normalize(vmax=1, vmin=0),
cmap = 'tab20c')
plt.axvline(x=0.7, ymin=0, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.40, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.37, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.34, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.plot(.7, .40, 'o', color='r', linewidth=1)
plt.annotate('$p_1$', xy=(.71, .40))
plt.plot(.7, .37, 'o', color='r', linewidth=1)
plt.annotate('$p_2$', xy=(.71, .37))
plt.plot(.7, .34, 'o', color='r', linewidth=1)
plt.annotate('$p_3$', xy=(.71, .34))
plt.title('b)')
# plt.savefig('./figures/Fig1.eps', format="eps", bbox_inches='tight')
plt.show()
###Output
_____no_output_____
###Markdown
###Code
library(magrittr)
library(ggplot2)
library(dplyr)
###Output
Attaching package: ‘dplyr’
The following objects are masked from ‘package:stats’:
filter, lag
The following objects are masked from ‘package:base’:
intersect, setdiff, setequal, union
###Markdown
Fig 1A
###Code
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/fig1.csv")
dat = read.table(file = "skiex003/datasets/EnsembleMerge/fig1.csv", header = T, sep = ",")
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/scVI.csv")
scvi = read.table(file = "skiex003/datasets/EnsembleMerge/scVI.csv", header = T, sep = ",")
scvi$Dataset = recode_factor(scvi$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_3 = "Chen 2020", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
scvi = scvi[,c("score", "score_method", "method", "Dataset", "Datasets")]
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/fastMNN.csv")
mnn = read.table(file = "skiex003/datasets/EnsembleMerge/fastMNN.csv", header = T, sep = ",")
mnn$Dataset = recode_factor(mnn$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_3 = "Chen 2020", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
mnn = mnn[,c("score", "score_method", "method", "Dataset", "Datasets")]
dat = rbind(dat, mnn)
dat = rbind(dat, scvi)
dat = dat[which(dat$Dataset != "Chen 2020"), ]
nData = dat$Datasets %>% unique() %>% length()
nMethod = dat$method %>% unique() %>% length()
range0_1 <- function(x){x/(nData*nMethod)}
Metric_Colors = c("ARI_Cell" = "#C93327", "NMI" = "#F4CDB3", "ASW_Cell" = "#F8766D", "GC" = "#857622", "ARI_Batch" = "#8A9DA4", "ASW_Batch" = "#75A08B")
dat$method = factor(dat$method, levels = c("EnsembleMerge", "Seurat", "Harmony", "Liger","Scanorama", "BBKNN", "fastMNN", "scVI", "Uncorrected"))
dat$score_method = factor(dat$score_method, levels = c("NMI", "ARI_Cell", "ASW_Cell", "GC", "ARI_Batch", "ASW_Batch"))
dat %<>% group_by(score_method, Datasets) %>% mutate(Rank = rank(score)) %>% ungroup() %>% group_by(score_method, method) %>% mutate(Rank_Sum = sum(Rank)) %>% ungroup() %>% group_by(score_method, method) %>% mutate(avg_rank = mean(Rank)) %>% ungroup() %>% mutate(scaled_rank_sum = range0_1(Rank_Sum))
dat %>% head(n = 20)
options(repr.plot.width = 20, repr.plot.height = 12)
p <- ggplot(dat, aes(x=method, y=scaled_rank_sum, fill = score_method)) +
geom_bar(stat = "identity", position=position_dodge()) + theme_minimal(base_size=20) + ggtitle((label = "Sum Ranked Performance per Method")) + scale_fill_manual(values = Metric_Colors[1:6]) + ylab("Scaled Rank Sum") + xlab("Methods") + ylim(0,1)
p
###Output
_____no_output_____
###Markdown
Fig 1B
###Code
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/fig1.csv")
dat = read.table(file = "skiex003/datasets/EnsembleMerge/fig1.csv", header = T, sep = ",")
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/scVI.csv")
scvi = read.table(file = "skiex003/datasets/EnsembleMerge/scVI.csv", header = T, sep = ",")
scvi$Dataset = recode_factor(scvi$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_3 = "Chen 2020", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
scvi = scvi[,c("score", "score_method", "method", "Dataset", "Datasets")]
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/fastMNN.csv")
mnn = read.table(file = "skiex003/datasets/EnsembleMerge/fastMNN.csv", header = T, sep = ",")
mnn$Dataset = recode_factor(mnn$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_3 = "Chen 2020", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
mnn = mnn[,c("score", "score_method", "method", "Dataset", "Datasets")]
dat = rbind(dat, mnn)
dat = rbind(dat, scvi)
dat = dat[which(dat$Dataset != "Chen 2020"), ]
Metric_Colors = c("ARI_Cell" = "#C93327", "NMI" = "#F4CDB3", "ASW_Cell" = "#F8766D", "GC" = "#857622", "ARI_Batch" = "#8A9DA4", "ASW_Batch" = "#75A08B")
dat$method = factor(dat$method, levels = c("EnsembleMerge", "Seurat", "Harmony", "Liger","Scanorama", "BBKNN", "fastMNN", "scVI", "Uncorrected"))
dat$score_method = factor(dat$score_method, levels = c("NMI", "ARI_Cell", "ASW_Cell", "GC", "ARI_Batch", "ASW_Batch"))
range0_1 <- function(x){(x-min(x))/(max(x)-min(x))}
dat %<>% group_by(score_method, Datasets) %>% mutate(Rank = rank(score)) %>% ungroup() %>% group_by(score_method, method) %>% mutate(Rank_Sum = sum(Rank)) %>% ungroup() %>% group_by(score_method, method) %>% mutate(avg_rank = mean(Rank)) %>% ungroup() %>% mutate(scaled_rank_sum = range0_1(Rank_Sum))
dat$datasets = factor(dat$Datasets, levels = c("Dataset_1", "Dataset_2", "Dataset_3", "Dataset_4",
"Dataset_5", "Dataset_6", "Dataset_7", "Dataset_8", "Dataset_9",
"Dataset_10", "Dataset_11", "Dataset_12", "Dataset_13", "Dataset_14",
"Dataset_15", "Dataset_16","Dataset_17","Dataset_18"))
dat$Dataset = recode_factor(dat$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
Method_Colors = c("EnsembleMerge" = "#66D1C2", "Seurat" = "#528460", "Harmony" = "#6B8D5E", "Liger" = "#8DAB81", "Scanorama" = "#C74955", "BBKNN" = "#D5757E", "fastMNN" = "#E099A0", "scVI" = "#EAB8BD", "Uncorrected" = "#F4DDDC")
dat %>% head(n = 20)
options(repr.plot.width = 20, repr.plot.height = 12)
dat[which(dat$score_method == "ARI_Cell"),] %>%
ggplot(aes(x = Dataset, y = score, fill = method)) + geom_bar(stat = "identity", position=position_dodge()) + theme_minimal(base_size=20) + ggtitle((label = "ARI Cell Performance per dataset")) + facet_wrap(~Dataset, scales = "free") + scale_fill_manual(values = Method_Colors)#+ geom_errorbar(aes(ymin=Sum_Rank-sd, ymax=Sum_Rank+sd), width=.2, position=position_dodge(.9))
###Output
_____no_output_____
###Markdown
Fig 1C
###Code
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/Figure11_scores.csv")
scores = read.table(file = "skiex003/datasets/EnsembleMerge/Figure11_scores.csv", header = T, sep = ",")
system("wget -x -c -nH https://s3.msi.umn.edu/skiex003/datasets/EnsembleMerge/Figures11_weights.csv")
weights = read.table(file = "skiex003/datasets/EnsembleMerge/Figures11_weights.csv", header = T, sep = ",")
weights$method[which(weights$method == "bbknn")] = "BBKNN"
data = merge(scores, weights, by = c("method", "Datasets"))
data$datasets = factor(data$Datasets, levels = c("Dataset_1", "Dataset_2", "Dataset_4",
"Dataset_5", "Dataset_6", "Dataset_7", "Dataset_8", "Dataset_9",
"Dataset_10", "Dataset_11", "Dataset_12", "Dataset_13", "Dataset_14",
"Dataset_15", "Dataset_16","Dataset_17","Dataset_18"))
data$Dataset = recode_factor(data$Datasets, Dataset_1 = "Villani 2017", Dataset_2 = "Han 2018", Dataset_4 = "Hemberg Panc",
Dataset_5 = "PBMC", Dataset_6 = "293t_Jurkat", Dataset_7 = "Hemberg Retina", Dataset_8 = "Saunders 2018", Dataset_9 = "HCA Blood",
Dataset_10 = "Paul 2015", Dataset_11 = "Human/mouse Retina", Dataset_12 = "Zilionis 2019", Dataset_13 = "Shekhar 2016", Dataset_14 = "Nestorowa 2016",
Dataset_15 = "Zheng 2017", Dataset_16 = "Polanski 2019", Dataset_17 = "HBDC", Dataset_18 = "Panc8")
data = data[which(data$Datasets != "Dataset_3"), ]
data %>% head()
data = data %>%
group_by(Datasets, score_method) %>%
mutate(score_method_type = case_when(
endsWith(score_method, "Cell") ~ "Biological",
endsWith(score_method, "NMI") ~ "Biological",
endsWith(score_method, "GC") ~ "Technical",
endsWith(score_method, "Batch") ~ "Technical"
), per_metric_correlation = cor(score, Weight),
per_metric_average_score = mean(score)) %>%
group_by(Datasets) %>%
mutate(total_correlation = cor(score, Weight), total_average_score = mean(score)) %>%
group_by(Datasets, score_method_type) %>%
mutate(score_method_type_correlation = cor(score, Weight), score_method_type_average_score = mean(score))
data %>% head()
options(repr.plot.width = 10, repr.plot.height = 12)
data %<>% filter(score_method_type == "Biological") %>% ggplot( aes(x=reorder(Dataset, score_method_type_correlation), y=score_method_type_correlation)) +
geom_bar(stat = "identity", position=position_dodge(), fill="#AD5F76") + theme_minimal(base_size=20) + ggtitle((label = "EnsembleMerge Score vs ARI Correlation")) + coord_flip() + ylab("KTA vs. ARI") + xlab("Datasets")
sessionInfo()
###Output
_____no_output_____
###Markdown
Plotting boundaires of HxC planes with different number of bins.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
lag = 512 # {32, 64, 128, 256, 512} Choose one of them
base = pd.read_pickle('../pkl_datasets/mamiraua_dataset_ACF_' + str(lag) + '.gzip')
cotas = pd.read_csv('./boundary_files/Cotas_HxC_bins_' + str(int(lag)) + '.csv')
noise = pd.read_csv('./coloredNoises/coloredNoises_' + str(int(lag)) + '.csv')
plt.figure(figsize=(24,10))
plt.rc('font', size=22)
plt.rc('axes', titlesize=22)
plt.subplot(1,2,1)
lags = [32, 64, 128, 256, 512]
for lag in lags:
cotas = pd.read_csv('./boundary_files/Cotas_HxC_bins_' + str(int(lag)) + '.csv')
noise = pd.read_csv('./coloredNoises/coloredNoises_' + str(int(lag)) + '.csv')
if lag == 32:
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises')
else:
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = '')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = '')
plt.text(0.7, 0.475, '512', fontsize= 18)
plt.text(0.7, 0.445, '256', fontsize= 18)
plt.text(0.7, 0.415, '128', fontsize= 18)
plt.text(0.7, 0.376, '64', fontsize= 18)
plt.text(0.7, 0.34, '32', fontsize= 18)
plt.text(0.58, 0.27, '512', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.6, 0.254, '256', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.62, 0.238, '128', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.64, 0.223, '64', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.text(0.66, 0.207, '32', fontsize= 16, color='blue', backgroundcolor='0.99')
plt.xlim([0, 1])
plt.ylim([0, np.max(cotas['Complexity'])+0.01])
plt.ylabel('Complexity [C]')
plt.xlabel('Entropy [H]')
plt.legend(loc = 'upper left', frameon=False)
plt.title('a)')
plt.subplot(1,2,2)
plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries')
plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises')
plt.xlim([0, 1])
plt.ylim([0, np.max(cotas['Complexity'])+0.01])
plt.ylabel('Complexity [C]')
plt.xlabel('Entropy [H]')
plt.legend(loc = 'upper left', frameon=False)
plt.scatter(base['H'], base['C'], marker='.', s=15, c=base['JSD'], norm=plt.Normalize(vmax=1, vmin=0),
cmap = 'tab20c')
plt.axvline(x=0.7, ymin=0, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.40, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.37, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.axhline(y=.34, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.')
plt.plot(.7, .40, 'o', color='r', linewidth=1)
plt.annotate('$p_1$', xy=(.71, .40))
plt.plot(.7, .37, 'o', color='r', linewidth=1)
plt.annotate('$p_2$', xy=(.71, .37))
plt.plot(.7, .34, 'o', color='r', linewidth=1)
plt.annotate('$p_3$', xy=(.71, .34))
plt.title('b)')
plt.show()
###Output
_____no_output_____ |
Make_TAD_heatmaps.ipynb | ###Markdown
Make Heatmaps of MAG abundance over time in the mesocosms.Input are normalized MAG abundnace tables (TAD80 >95%ID) divide by GE. Represented as %total community. The % signs have been removed.Consolidated and Dereplicated the MAG abundances across the biological replicates in excel and then opened them in python for making the heatmaps.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from scipy import stats
H1 = pd.read_table("H1_mesocosms_TAD.txt", header = 0, index_col=0)
H2 = pd.read_table("H2_mesocosms_TAD.txt", header = 0, index_col=0)
H3 = pd.read_table("H3_mesocosms_TAD.txt", header = 0, index_col=0)
H = pd.read_table("Hum_mesocosms_TAD.txt", header = 0, index_col=0)
H
sns.heatmap(H, cmap="BuPu", linewidths=.5)
plt.xlabel("time (d)")
plt.savefig("Hum_TADheatmap.pdf", format='pdf', bbox_inches='tight')
C = pd.read_table("cow_mesocosms_TAD.txt", header = 0, index_col=0)
C
sns.heatmap(C, cmap="BuPu", linewidths=.5)
plt.savefig("Cow_TADheatmap.pdf", format='pdf', bbox_inches='tight')
P = pd.read_table("pig_mesocosms_TAD.txt", header = 0, index_col=0)
P.head()
m = sns.heatmap(P, cmap="BuPu", linewidths=.5, yticklabels="auto", cbar_kws={'label': '%total community'})
plt.xlabel("time (d)")
plt.savefig("Pig_TADheatmap.pdf", format='pdf', bbox_inches='tight')
#Import LL MAGs TAD table
df = pd.read_table("LL_mesocosms_TAD.txt", header = 0, index_col=0)
df
fig, ax = plt.subplots(figsize=(6.8,9.0)) # Sample figsize in inches
sns.heatmap(df, cmap="BuPu", linewidths=.5, ax=ax, xticklabels=1, yticklabels=False, cbar_kws={'label': '%total community'})
plt.tick_params(bottom=False)
plt.xlabel("Mesocosm_time (days)")
plt.savefig("LL_TADheatmap.pdf", format='pdf', bbox_inches='tight')
df = pd.read_table("D7_mesocosms_TAD.txt", header = 0, index_col=0)
df
#fig, ax = plt.subplots(figsize=(6.8,9.0)) # Sample figsize in inches
sns.heatmap(df, cmap="BuPu", linewidths=.5, xticklabels=1, yticklabels=1, cbar_kws={'label': '%total community'})
plt.tick_params(bottom=False)
plt.xlabel("Mesocosm_SamplingDay")
plt.ylabel("D7 MAG")
plt.savefig("D7_TADheatmap.pdf", format='pdf', bbox_inches='tight')
###Output
_____no_output_____ |
AI For Medical Prognosis/Week3/C2M3_Assignment.ipynb | ###Markdown
Survival Estimates that Vary with TimeWelcome to the third assignment of Course 2. In this assignment, we'll use Python to build some of the statistical models we learned this past week to analyze surivival estimates for a dataset of lymphoma patients. We'll also evaluate these models and interpret their outputs. Along the way, you will be learning about the following: - Censored Data- Kaplan-Meier Estimates- Subgroup Analysis Outline- [1. Import Packages](1)- [2. Load the Dataset](2)- [3. Censored Data]() - [Exercise 1](Ex-1)- [4. Survival Estimates](4) - [Exercise 2](Ex-2) - [Exercise 3](Ex-3)- [5. Subgroup Analysis](5) - [5.1 Bonus: Log Rank Test](5-1) 1. Import PackagesWe'll first import all the packages that we need for this assignment. - `lifelines` is an open-source library for data analysis.- `numpy` is the fundamental package for scientific computing in python.- `pandas` is what we'll use to manipulate our data.- `matplotlib` is a plotting library.
###Code
import lifelines
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from util import load_data
from lifelines import KaplanMeierFitter as KM
from lifelines.statistics import logrank_test
###Output
_____no_output_____
###Markdown
2. Load the Dataset Run the next cell to load the lymphoma data set.
###Code
data = load_data()
###Output
_____no_output_____
###Markdown
As always, you first look over your data.
###Code
print("data shape: {}".format(data.shape))
data.head()
###Output
data shape: (80, 3)
###Markdown
The column `Time` states how long the patient lived before they died or were censored.The column `Event` says whether a death was observed or not. `Event` is 1 if the event is observed (i.e. the patient died) and 0 if data was censored.Censorship here means that the observation has ended without any observed event.For example, let a patient be in a hospital for 100 days at most. If a patient dies after only 44 days, their event will be recorded as `Time = 44` and `Event = 1`. If a patient walks out after 100 days and dies 3 days later (103 days total), this event is not observed in our process and the corresponding row has `Time = 100` and `Event = 0`. If a patient survives for 25 years after being admitted, their data for are still `Time = 100` and `Event = 0`. 3. Censored DataWe can plot a histogram of the survival times to see in general how long cases survived before censorship or events.
###Code
data.Time.hist();
plt.xlabel("Observation time before death or censorship (days)");
plt.ylabel("Frequency (number of patients)");
# Note that the semicolon at the end of the plotting line
# silences unnecessary textual output - try removing it
# to observe its effect
###Output
_____no_output_____
###Markdown
Exercise 1In the next cell, write a function to compute the fraction ($\in [0, 1]$) of observations which were censored. Hints Summing up the 'Event' column will give you the number of observations where censorship has NOT occurred.
###Code
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def frac_censored(df):
"""
Return percent of observations which were censored.
Args:
df (dataframe): dataframe which contains column 'Event' which is
1 if an event occurred (death)
0 if the event did not occur (censored)
Returns:
frac_censored (float): fraction of cases which were censored.
"""
result = 0.0
### START CODE HERE ###
censored_count = sum(df['Event'] == 0)
result = censored_count/len(df)
### END CODE HERE ###
return result
print(frac_censored(data))
###Output
0.325
###Markdown
Expected Output:```CPP0.325``` Run the next cell to see the distributions of survival times for censored and uncensored examples.
###Code
df_censored = data[data.Event == 0]
df_uncensored = data[data.Event == 1]
df_censored.Time.hist()
plt.title("Censored")
plt.xlabel("Time (days)")
plt.ylabel("Frequency")
plt.show()
df_uncensored.Time.hist()
plt.title("Uncensored")
plt.xlabel("Time (days)")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
4. Survival EstimatesWe'll now try to estimate the survival function:$$S(t) = P(T > t)$$To illustrate the strengths of Kaplan Meier, we'll start with a naive estimator of the above survival function. To estimate this quantity, we'll divide the number of people who we know lived past time $t$ by the number of people who were not censored before $t$.Formally, let $i$ = 1, ..., $n$ be the cases, and let $t_i$ be the time when $i$ was censored or an event happened. Let $e_i= 1$ if an event was observed for $i$ and 0 otherwise. Then let $X_t = \{i : T_i > t\}$, and let $M_t = \{i : e_i = 1 \text{ or } T_i > t\}$. The estimator you will compute will be:$$\hat{S}(t) = \frac{|X_t|}{|M_t|}$$ Exercise 2Write a function to compute this estimate for arbitrary $t$ in the cell below.
###Code
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def naive_estimator(t, df):
"""
Return naive estimate for S(t), the probability
of surviving past time t. Given by number
of cases who survived past time t divided by the
number of cases who weren't censored before time t.
Args:
t (int): query time
df (dataframe): survival data. Has a Time column,
which says how long until that case
experienced an event or was censored,
and an Event column, which is 1 if an event
was observed and 0 otherwise.
Returns:
S_t (float): estimator for survival function evaluated at t.
"""
S_t = 0.0
### START CODE HERE ###
X = sum(df['Time'] > t)
M = sum( (df['Time'] > t) | (df['Event'] == 1) )
S_t = X / M
### END CODE HERE ###
return S_t
print("Test Cases")
sample_df = pd.DataFrame(columns = ["Time", "Event"])
sample_df.Time = [5, 10, 15]
sample_df.Event = [0, 1, 0]
print("Sample dataframe for testing code:")
print(sample_df)
print("\n")
print("Test Case 1: S(3)")
print("Output: {}, Expected: {}\n".format(naive_estimator(3, sample_df), 1.0))
print("Test Case 2: S(12)")
print("Output: {}, Expected: {}\n".format(naive_estimator(12, sample_df), 0.5))
print("Test Case 3: S(20)")
print("Output: {}, Expected: {}\n".format(naive_estimator(20, sample_df), 0.0))
# Test case 4
sample_df = pd.DataFrame({'Time': [5,5,10],
'Event': [0,1,0]
})
print("Test case 4: S(5)")
print(f"Output: {naive_estimator(5, sample_df)}, Expected: 0.5")
###Output
Test Cases
Sample dataframe for testing code:
Time Event
0 5 0
1 10 1
2 15 0
Test Case 1: S(3)
Output: 1.0, Expected: 1.0
Test Case 2: S(12)
Output: 0.5, Expected: 0.5
Test Case 3: S(20)
Output: 0.0, Expected: 0.0
Test case 4: S(5)
Output: 0.5, Expected: 0.5
###Markdown
In the next cell, we will plot the naive estimator using the real data up to the maximum time in the dataset.
###Code
max_time = data.Time.max()
x = range(0, max_time+1)
y = np.zeros(len(x))
for i, t in enumerate(x):
y[i] = naive_estimator(t, data)
plt.plot(x, y)
plt.title("Naive Survival Estimate")
plt.xlabel("Time")
plt.ylabel("Estimated cumulative survival rate")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 3Next let's compare this with the Kaplan Meier estimate. In the cell below, write a function that computes the Kaplan Meier estimate of $S(t)$ at every distinct time in the dataset. Recall the Kaplan-Meier estimate:$$S(t) = \prod_{t_i \leq t} (1 - \frac{d_i}{n_i})$$where $t_i$ are the events observed in the dataset and $d_i$ is the number of deaths at time $t_i$ and $n_i$ is the number of people who we know have survived up to time $t_i$. Hints Try sorting by Time. Use pandas.Series.unique If you get a division by zero error, please double-check how you calculated `n_t`
###Code
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
def HomemadeKM(df):
"""
Return KM estimate evaluated at every distinct
time (event or censored) recorded in the dataset.
Event times and probabilities should begin with
time 0 and probability 1.
Example:
input:
Time Censor
0 5 0
1 10 1
2 15 0
correct output:
event_times: [0, 5, 10, 15]
S: [1.0, 1.0, 0.5, 0.5]
Args:
df (dataframe): dataframe which has columns for Time
and Event, defined as usual.
Returns:
event_times (list of ints): array of unique event times
(begins with 0).
S (list of floats): array of survival probabilites, so that
S[i] = P(T > event_times[i]). This
begins with 1.0 (since no one dies at time
0).
"""
# individuals are considered to have survival probability 1
# at time 0
event_times = [0]
p = 1.0
S = [p]
### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###
# get collection of unique observed event times
observed_event_times = df.Time.unique()
# sort event times
observed_event_times = sorted(observed_event_times)
# iterate through event times
for t in observed_event_times:
# compute n_t, number of people who survive at least to time t
n_t = len(df[df.Time >= t])
# compute d_t, number of people who die at time t
d_t = len(df[(df.Time == t) & (df.Event == 1)])
# update p
p = p*(1 - float(d_t)/n_t)
# update S and event_times (ADD code below)
# hint: use append
event_times.append(t)
S.append(p)
### END CODE HERE ###
return event_times, S
print("TEST CASES:\n")
print("Test Case 1\n")
print("Test DataFrame:")
sample_df = pd.DataFrame(columns = ["Time", "Event"])
sample_df.Time = [5, 10, 15]
sample_df.Event = [0, 1, 0]
print(sample_df.head())
print("\nOutput:")
x, y = HomemadeKM(sample_df)
print("Event times: {}, Survival Probabilities: {}".format(x, y))
print("\nExpected:")
print("Event times: [0, 5, 10, 15], Survival Probabilities: [1.0, 1.0, 0.5, 0.5]")
print("\nTest Case 2\n")
print("Test DataFrame:")
sample_df = pd.DataFrame(columns = ["Time", "Event"])
sample_df.loc[:, "Time"] = [2, 15, 12, 10, 20]
sample_df.loc[:, "Event"] = [0, 0, 1, 1, 1]
print(sample_df.head())
print("\nOutput:")
x, y = HomemadeKM(sample_df)
print("Event times: {}, Survival Probabilities: {}".format(x, y))
print("\nExpected:")
print("Event times: [0, 2, 10, 12, 15, 20], Survival Probabilities: [1.0, 1.0, 0.75, 0.5, 0.5, 0.0]")
###Output
TEST CASES:
Test Case 1
Test DataFrame:
Time Event
0 5 0
1 10 1
2 15 0
Output:
Event times: [0, 5, 10, 15], Survival Probabilities: [1.0, 1.0, 0.5, 0.5]
Expected:
Event times: [0, 5, 10, 15], Survival Probabilities: [1.0, 1.0, 0.5, 0.5]
Test Case 2
Test DataFrame:
Time Event
0 2 0
1 15 0
2 12 1
3 10 1
4 20 1
Output:
Event times: [0, 2, 10, 12, 15, 20], Survival Probabilities: [1.0, 1.0, 0.75, 0.5, 0.5, 0.0]
Expected:
Event times: [0, 2, 10, 12, 15, 20], Survival Probabilities: [1.0, 1.0, 0.75, 0.5, 0.5, 0.0]
###Markdown
Now let's plot the two against each other on the data to see the difference.
###Code
max_time = data.Time.max()
x = range(0, max_time+1)
y = np.zeros(len(x))
for i, t in enumerate(x):
y[i] = naive_estimator(t, data)
plt.plot(x, y, label="Naive")
x, y = HomemadeKM(data)
plt.step(x, y, label="Kaplan-Meier")
plt.xlabel("Time")
plt.ylabel("Survival probability estimate")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
QuestionWhat differences do you observe between the naive estimator and Kaplan-Meier estimator? Do any of our earlier explorations of the dataset help to explain these differences? 5. Subgroup AnalysisWe see that along with Time and Censor, we have a column called `Stage_group`. - A value of 1 in this column denotes a patient with stage III cancer- A value of 2 denotes stage IV. We want to compare the survival functions of these two groups.This time we'll use the `KaplanMeierFitter` class from `lifelines`. Run the next cell to fit and plot the Kaplan Meier curves for each group.
###Code
S1 = data[data.Stage_group == 1]
km1 = KM()
km1.fit(S1.loc[:, 'Time'], event_observed = S1.loc[:, 'Event'], label = 'Stage III')
S2 = data[data.Stage_group == 2]
km2 = KM()
km2.fit(S2.loc[:, "Time"], event_observed = S2.loc[:, 'Event'], label = 'Stage IV')
ax = km1.plot(ci_show=False)
km2.plot(ax = ax, ci_show=False)
plt.xlabel('time')
plt.ylabel('Survival probability estimate')
plt.savefig('two_km_curves', dpi=300)
###Output
_____no_output_____
###Markdown
Let's compare the survival functions at 90, 180, 270, and 360 days
###Code
survivals = pd.DataFrame([90, 180, 270, 360], columns = ['time'])
survivals.loc[:, 'Group 1'] = km1.survival_function_at_times(survivals['time']).values
survivals.loc[:, 'Group 2'] = km2.survival_function_at_times(survivals['time']).values
survivals
###Output
_____no_output_____
###Markdown
This makes clear the difference in survival between the Stage III and IV cancer groups in the dataset. 5.1 Bonus: Log-Rank TestTo say whether there is a statistical difference between the survival curves we can run the log-rank test. This test tells us the probability that we could observe this data if the two curves were the same. The derivation of the log-rank test is somewhat complicated, but luckily `lifelines` has a simple function to compute it. Run the next cell to compute a p-value using `lifelines.statistics.logrank_test`.
###Code
def logrank_p_value(group_1_data, group_2_data):
result = logrank_test(group_1_data.Time, group_2_data.Time,
group_1_data.Event, group_2_data.Event)
return result.p_value
logrank_p_value(S1, S2)
###Output
_____no_output_____ |
examples/Advanced Plotting/Animations.ipynb | ###Markdown
Animations of marks and axes can be enabled by setting 'animation_duration' property of the figure Line Animations
###Code
import numpy as np
import bqplot.pyplot as plt
from bqplot import *
axes_options = {'x': {'label': 'x'}, 'y': {'label': 'y'}}
x = np.arange(100)
y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks
fig = plt.figure(animation_duration=1000)
lines = plt.plot(x=x, y=y, colors=['red', 'green'], axes_options=axes_options)
fig
# update data of the line mark
lines.y = np.cumsum(np.random.randn(2, 100), axis=1)
###Output
_____no_output_____
###Markdown
Scatter Animations
###Code
fig = plt.figure(animation_duration=1000)
x, y = np.random.rand(2, 20)
scatt = plt.scatter(x, y, colors=['blue'], axes_options=axes_options)
fig
#data updates
scatt.x = np.random.rand(20) * 10
scatt.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Pie Animations
###Code
data = np.random.rand(6)
fig = plt.figure(animation_duration=1000)
pie = plt.pie(data, radius=180, sort=False, display_labels='outside', display_values=True,
values_format='.0%', labels=list('ABCDEFGHIJ'))
fig
pie.sizes = np.random.rand(8)
pie.sort = True
#make pie a donut
with pie.hold_sync():
pie.radius = 180
pie.inner_radius = 120
###Output
_____no_output_____
###Markdown
Bar animations
###Code
n = 10
x = list('ABCDEFGHIJ')
y1, y2 = np.random.rand(2, n)
fig = plt.figure(animation_duration=1000)
bar = plt.bar(x, [y1, y2], padding=0.2, type='grouped')
fig
y1, y2 = np.random.rand(2, n)
bar.y = [y1, y2]
###Output
_____no_output_____
###Markdown
Multiple Mark Animations (using the object model)
###Code
xs = LinearScale()
ys1 = LinearScale()
ys2 = LinearScale()
x = np.arange(20)
y = np.cumsum(np.random.randn(20))
y1 = np.random.rand(20)
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys1}, colors=['magenta'], marker='square')
bar = Bars(x=x, y=y1, scales={'x': xs, 'y': ys2}, colorpadding=0.2, colors=['steelblue'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax1 = Axis(scale=ys1, orientation='vertical', tick_format='0.1f', label='y', grid_lines='solid')
yax2 = Axis(scale=ys2, orientation='vertical', side='right', tick_format='0.0%', label='y1', grid_lines='none')
Figure(marks=[bar, line], axes=[xax, yax1, yax2], animation_duration=1000)
# update mark data
line.y = np.cumsum(np.random.randn(20))
bar.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Animations of marks and axes can be enabled by setting 'animation_duration' property of the figure Line Animations
###Code
import numpy as np
import bqplot.pyplot as plt
from bqplot import LinearScale, Axis, Lines, Bars, Figure
axes_options = {"x": {"label": "x"}, "y": {"label": "y"}}
x = np.arange(100)
y = np.cumsum(np.random.randn(2, 100), axis=1) # two random walks
fig = plt.figure(animation_duration=1000)
lines = plt.plot(x=x, y=y, colors=["red", "green"], axes_options=axes_options)
fig
# update data of the line mark
lines.y = np.cumsum(np.random.randn(2, 100), axis=1)
###Output
_____no_output_____
###Markdown
Scatter Animations
###Code
fig = plt.figure(animation_duration=1000)
x, y = np.random.rand(2, 20)
scatt = plt.scatter(x, y, colors=["blue"], axes_options=axes_options)
fig
# data updates
scatt.x = np.random.rand(20) * 10
scatt.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Pie Animations
###Code
data = np.random.rand(6)
fig = plt.figure(animation_duration=1000)
pie = plt.pie(
data,
radius=180,
sort=False,
display_labels="outside",
display_values=True,
values_format=".0%",
labels=list("ABCDEFGHIJ"),
)
fig
pie.sizes = np.random.rand(8)
pie.sort = True
# make pie a donut
with pie.hold_sync():
pie.radius = 180
pie.inner_radius = 120
###Output
_____no_output_____
###Markdown
Bar animations
###Code
n = 10
x = list("ABCDEFGHIJ")
y1, y2 = np.random.rand(2, n)
fig = plt.figure(animation_duration=1000)
bar = plt.bar(x, [y1, y2], padding=0.2, type="grouped")
fig
y1, y2 = np.random.rand(2, n)
bar.y = [y1, y2]
###Output
_____no_output_____
###Markdown
Multiple Mark Animations (using the object model)
###Code
xs = LinearScale()
ys1 = LinearScale()
ys2 = LinearScale()
x = np.arange(20)
y = np.cumsum(np.random.randn(20))
y1 = np.random.rand(20)
line = Lines(x=x, y=y, scales={"x": xs, "y": ys1}, colors=["magenta"], marker="square")
bar = Bars(
x=x, y=y1, scales={"x": xs, "y": ys2}, colorpadding=0.2, colors=["steelblue"]
)
xax = Axis(scale=xs, label="x", grid_lines="solid")
yax1 = Axis(
scale=ys1, orientation="vertical", tick_format="0.1f", label="y", grid_lines="solid"
)
yax2 = Axis(
scale=ys2,
orientation="vertical",
side="right",
tick_format="0.0%",
label="y1",
grid_lines="none",
)
Figure(marks=[bar, line], axes=[xax, yax1, yax2], animation_duration=1000)
# update mark data
line.y = np.cumsum(np.random.randn(20))
bar.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Animations of marks and axes can be enabled by setting 'animation_duration' property of the figure Line Animations
###Code
import numpy as np
import bqplot.pyplot as plt
from bqplot import *
axes_options = {'x': {'label': 'x'}, 'y': {'label': 'y'}}
x = np.arange(100)
y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks
fig = plt.figure(animation_duration=1000)
lines = plt.plot(x=x, y=y, colors=['red', 'green'], axes_options=axes_options)
fig
# update data of the line mark
lines.y = np.cumsum(np.random.randn(2, 100), axis=1)
###Output
_____no_output_____
###Markdown
Scatter Animations
###Code
fig = plt.figure(animation_duration=1000)
x, y = np.random.rand(2, 20)
scatt = plt.scatter(x, y, colors=['blue'], axes_options=axes_options)
fig
#data updates
scatt.x = np.random.rand(20) * 10
scatt.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Pie Animations
###Code
data = np.random.rand(6)
fig = plt.figure(animation_duration=1000)
pie = plt.pie(data, radius=180, sort=False, display_labels='outside', display_values=True,
values_format='.0%', labels=list('ABCDEFGHIJ'))
fig
pie.sizes = np.random.rand(8)
pie.sort = True
#make pie a donut
with pie.hold_sync():
pie.radius = 180
pie.inner_radius = 120
###Output
_____no_output_____
###Markdown
Bar animations
###Code
n = 10
x = list('ABCDEFGHIJ')
y1, y2 = np.random.rand(2, n)
fig = plt.figure(animation_duration=1000)
bar = plt.bar(x, [y1, y2], padding=0.2, type='grouped')
fig
y1, y2 = np.random.rand(2, n)
bar.y = [y1, y2]
###Output
_____no_output_____
###Markdown
Multiple Mark Animations (using the object model)
###Code
xs = LinearScale()
ys1 = LinearScale()
ys2 = LinearScale()
x = np.arange(20)
y = np.cumsum(np.random.randn(20))
y1 = np.random.rand(20)
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys1}, colors=['magenta'], marker='square')
bar = Bars(x=x, y=y1, scales={'x': xs, 'y': ys2}, colorpadding=0.2, colors=['steelblue'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax1 = Axis(scale=ys1, orientation='vertical', tick_format='0.1f', label='y', grid_lines='solid')
yax2 = Axis(scale=ys2, orientation='vertical', side='right', tick_format='0.0%', label='y1', grid_lines='none')
Figure(marks=[bar, line], axes=[xax, yax1, yax2], animation_duration=1000)
# update mark data
line.y = np.cumsum(np.random.randn(20))
bar.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Animations of marks and axes can be enabled by setting 'animation_duration' property of the figure Line Animations
###Code
import numpy as np
import bqplot.pyplot as plt
from bqplot import LinearScale, Axis, Lines, Bars, Figure
axes_options = {'x': {'label': 'x'}, 'y': {'label': 'y'}}
x = np.arange(100)
y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks
fig = plt.figure(animation_duration=1000)
lines = plt.plot(x=x, y=y, colors=['red', 'green'], axes_options=axes_options)
fig
# update data of the line mark
lines.y = np.cumsum(np.random.randn(2, 100), axis=1)
###Output
_____no_output_____
###Markdown
Scatter Animations
###Code
fig = plt.figure(animation_duration=1000)
x, y = np.random.rand(2, 20)
scatt = plt.scatter(x, y, colors=['blue'], axes_options=axes_options)
fig
#data updates
scatt.x = np.random.rand(20) * 10
scatt.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Pie Animations
###Code
data = np.random.rand(6)
fig = plt.figure(animation_duration=1000)
pie = plt.pie(data, radius=180, sort=False, display_labels='outside', display_values=True,
values_format='.0%', labels=list('ABCDEFGHIJ'))
fig
pie.sizes = np.random.rand(8)
pie.sort = True
#make pie a donut
with pie.hold_sync():
pie.radius = 180
pie.inner_radius = 120
###Output
_____no_output_____
###Markdown
Bar animations
###Code
n = 10
x = list('ABCDEFGHIJ')
y1, y2 = np.random.rand(2, n)
fig = plt.figure(animation_duration=1000)
bar = plt.bar(x, [y1, y2], padding=0.2, type='grouped')
fig
y1, y2 = np.random.rand(2, n)
bar.y = [y1, y2]
###Output
_____no_output_____
###Markdown
Multiple Mark Animations (using the object model)
###Code
xs = LinearScale()
ys1 = LinearScale()
ys2 = LinearScale()
x = np.arange(20)
y = np.cumsum(np.random.randn(20))
y1 = np.random.rand(20)
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys1}, colors=['magenta'], marker='square')
bar = Bars(x=x, y=y1, scales={'x': xs, 'y': ys2}, colorpadding=0.2, colors=['steelblue'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax1 = Axis(scale=ys1, orientation='vertical', tick_format='0.1f', label='y', grid_lines='solid')
yax2 = Axis(scale=ys2, orientation='vertical', side='right', tick_format='0.0%', label='y1', grid_lines='none')
Figure(marks=[bar, line], axes=[xax, yax1, yax2], animation_duration=1000)
# update mark data
line.y = np.cumsum(np.random.randn(20))
bar.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Animations of marks and axes can be enabled by setting 'animation_duration' property of the figure Line Animations
###Code
import numpy as np
from bqplot import *
xs = LinearScale()
ys = LinearScale()
x = np.arange(100)
y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax = Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')
Figure(marks=[line], axes=[xax, yax], animation_duration=1000)
# update data of the line mark
line.y = np.cumsum(np.random.randn(2, 100), axis=1)
###Output
_____no_output_____
###Markdown
Scatter Animations
###Code
xs = LinearScale()
ys = LinearScale()
x, y = np.random.rand(2, 20)
scatt = Scatter(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['blue'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax = Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')
Figure(marks=[scatt], axes=[xax, yax], animation_duration=1000)
#data updates
scatt.x = np.random.rand(20) * 10
scatt.y = np.random.rand(20)
###Output
_____no_output_____
###Markdown
Pie Animations
###Code
data = np.random.rand(6)
pie = Pie(sizes=data, radius=180, sort=False,
display_labels='outside', display_values=True,
values_format='.0%', labels=list('ABCDEFGHIJ'))
Figure(marks=[pie], animation_duration=1000)
pie.sizes = np.random.rand(8)
pie.sort = True
#make pie a donut
with pie.hold_sync():
pie.radius = 180
pie.inner_radius = 120
###Output
_____no_output_____
###Markdown
Bar animations
###Code
n = 10
x = list('ABCDEFGHIJ')
y1, y2 = np.random.rand(2, n)
xs = OrdinalScale()
ys = LinearScale()
bar = Bars(x=x, y=[y1, y2], scales={'x': xs, 'y': ys}, padding=0.2, type='grouped')
xax = Axis(scale=xs)
yax = Axis(scale=ys, orientation='vertical', tick_format='0.0%', grid_lines='solid')
Figure(marks=[bar], axes=[xax, yax], animation_duration=1000)
y1, y2 = np.random.rand(2, n)
bar.y = [y1, y2]
###Output
_____no_output_____
###Markdown
Multiple Mark Animations
###Code
xs = LinearScale()
ys1 = LinearScale()
ys2 = LinearScale()
x = np.arange(20)
y = np.cumsum(np.random.randn(20))
y1 = np.random.rand(20)
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys1}, colors=['magenta'], marker='square')
bar = Bars(x=x, y=y1, scales={'x': xs, 'y': ys2}, colorpadding=0.2, colors=['steelblue'])
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax1 = Axis(scale=ys1, orientation='vertical', tick_format='0.1f', label='y', grid_lines='solid')
yax2 = Axis(scale=ys2, orientation='vertical', side='right', tick_format='0.0%', label='y1', grid_lines='none')
Figure(marks=[bar, line], axes=[xax, yax1, yax2], animation_duration=1000)
# update mark data
line.y = np.cumsum(np.random.randn(20))
bar.y = np.random.rand(20)
###Output
_____no_output_____ |
PyTorch/Exercise_4.ipynb | ###Markdown
Exercise 4: Training a Model (GPU)
###Code
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
device = "cuda" if torch.cuda.is_available() else "cpu"
device
x = torch.rand((2,3,4))
x.device
x = x.to(device)
x.device
###Output
_____no_output_____
###Markdown
Data Loading and Preprocessing
###Code
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = datasets.CIFAR10(
root='./data',
train=True,
download=True,
transform=transform_train)
trainloader = DataLoader(
trainset,
batch_size=32,
shuffle=True)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
###Output
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz
###Markdown
Model Design
###Code
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = LeNet()
###Output
_____no_output_____
###Markdown
Define Loss Function and Optimizer
###Code
import torch.optim as optim
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),
lr=0.001,
momentum=0.9)
model.to(device) #### GPU
for epoch in range(5):
running_loss = 0
for inputs, labels in trainloader:
optimizer.zero_grad()
outputs = model(inputs.to(device)) #### GPU
loss = loss_fn(outputs, labels.to(device)) #### GPU
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Average Loss per Epoch:", running_loss/len(trainloader))
###Output
Average Loss per Epoch: 2.081776667236138
Average Loss per Epoch: 1.7247091567600223
Average Loss per Epoch: 1.5949005734363735
Average Loss per Epoch: 1.5164100259294588
Average Loss per Epoch: 1.4575592195537712
###Markdown
Test Model Performance
###Code
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
testset = datasets.CIFAR10(
root='./data',
train=False,
download=True,
transform=transform_test)
testloader = DataLoader(
testset,
batch_size=32,
shuffle=True)
correct = 0
with torch.no_grad():
for inputs, labels in testloader:
outputs = model(inputs.to(device)) #### GPU
_, preds = torch.max(outputs, 1)
correct += (preds == labels).sum().item()
print("Model accuracy on %d Test Images: %.2f %%" %
(len(testset), (correct/len(testset) * 100)))
###Output
Model accuracy on 10000 Test Images: 52.15 %
|
notebooks/08_Neural_Networks.ipynb | ###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 20K+ developers in learning how to responsibly deliver value with applied ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/madewithml/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[["X1", "X2"]].values
y = df["color"].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {"c1": "red", "c2": "yellow", "c3": "blue"}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors="k", s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format="png")
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{"X1": 0.1, "X2": 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and obtain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 20K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 20K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/madewithml/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 20K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[["X1", "X2"]].values
y = df["color"].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {"c1": "red", "c2": "yellow", "c3": "blue"}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors="k", s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format="png")
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{"X1": 0.1, "X2": 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 20K+ developers in learning how to responsibly deliver value with applied ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/madewithml/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[['X1', 'X2']].values
y = df['color'].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Get metrics
performance = {'overall': {}, 'class': {}}
metrics = precision_recall_fscore_support(y_true, y_pred)
# Overall performance
performance['overall']['precision'] = np.mean(metrics[0])
performance['overall']['recall'] = np.mean(metrics[1])
performance['overall']['f1'] = np.mean(metrics[2])
performance['overall']['num_samples'] = np.float64(np.sum(metrics[3]))
# Per-class performance
for i in range(len(classes)):
performance['class'][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i])
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format='png')
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain('relu'))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Made With MLApplied ML · MLOps · ProductionJoin 30K+ developers in learning how to responsibly deliver value with ML. 🔥 Among the top ML repositories on GitHub Neural NetworksIn this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We'll first motivate non-linear activation functions by trying to fit a linear model (logistic regression) on our non-linear spiral data. Then we'll implement an MLP using just NumPy and then with PyTorch. Overview Our goal is to learn a model $\hat{y}$ that models $y$ given $X$ . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data.* **Advantages:** * Can model non-linear patterns in the data really well.* **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable.* **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). > We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. Set up
###Code
import numpy as np
import random
SEED = 1234
# Set seed for reproducibility
np.random.seed(SEED)
random.seed(SEED)
###Output
_____no_output_____
###Markdown
Load data I created some non-linearly separable spiral data so let's go ahead and download it for our classification task.
###Code
import matplotlib.pyplot as plt
import pandas as pd
# Load data
url = "https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/spiral.csv"
df = pd.read_csv(url, header=0) # load
df = df.sample(frac=1).reset_index(drop=True) # shuffle
df.head()
# Data shapes
X = df[["X1", "X2"]].values
y = df["color"].values
print ("X: ", np.shape(X))
print ("y: ", np.shape(y))
# Visualize data
plt.title("Generated non-linear data")
colors = {"c1": "red", "c2": "yellow", "c3": "blue"}
plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors="k"', s=25)
plt.show()
###Output
_____no_output_____
###Markdown
Split data We'll shuffle our dataset (since it's ordered by class) and then create our data splits (stratified on class).
###Code
import collections
from sklearn.model_selection import train_test_split
TRAIN_SIZE = 0.7
VAL_SIZE = 0.15
TEST_SIZE = 0.15
def train_val_test_split(X, y, train_size):
"""Split dataset into data splits."""
X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)
X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)
return X_train, X_val, X_test, y_train, y_val, y_test
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
###Output
X_train: (1050, 2), y_train: (1050,)
X_val: (225, 2), y_val: (225,)
X_test: (225, 2), y_test: (225,)
Sample point: [-0.63919105 -0.69724176] → c1
###Markdown
Label encoding In the previous lesson we wrote our own label encoder class to see the inner functions but this time we'll use scikit-learn [`LabelEncoder`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) class which does the same operations as ours.
###Code
from sklearn.preprocessing import LabelEncoder
# Output vectorizer
label_encoder = LabelEncoder()
# Fit on train data
label_encoder = label_encoder.fit(y_train)
classes = list(label_encoder.classes_)
print (f"classes: {classes}")
# Convert labels to tokens
print (f"y_train[0]: {y_train[0]}")
y_train = label_encoder.transform(y_train)
y_val = label_encoder.transform(y_val)
y_test = label_encoder.transform(y_test)
print (f"y_train[0]: {y_train[0]}")
# Class weights
counts = np.bincount(y_train)
class_weights = {i: 1.0/count for i, count in enumerate(counts)}
print (f"counts: {counts}\nweights: {class_weights}")
###Output
counts: [350 350 350]
weights: {0: 0.002857142857142857, 1: 0.002857142857142857, 2: 0.002857142857142857}
###Markdown
Standardize data We need to standardize our data (zero mean and unit variance) so a specific feature's magnitude doesn't affect how the model learns its weights. We're only going to standardize the inputs X because our outputs y are class values.
###Code
from sklearn.preprocessing import StandardScaler
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
# Apply scaler on training and test data (don't standardize outputs for classification)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Check (means should be ~0 and std should be ~1)
print (f"X_test[0]: mean: {np.mean(X_test[:, 0], axis=0):.1f}, std: {np.std(X_test[:, 0], axis=0):.1f}")
print (f"X_test[1]: mean: {np.mean(X_test[:, 1], axis=0):.1f}, std: {np.std(X_test[:, 1], axis=0):.1f}")
###Output
X_test[0]: mean: 0.1, std: 0.9
X_test[1]: mean: 0.0, std: 1.0
###Markdown
Linear model Before we get to our neural network, we're going to motivate non-linear activation functions by implementing a generalized linear model (logistic regression). We'll see why linear models (with linear activations) won't suffice for our dataset.
###Code
import torch
# Set seed for reproducibility
torch.manual_seed(SEED)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch import nn
import torch.nn.functional as F
INPUT_DIM = X_train.shape[1] # X is 2-dimensional
HIDDEN_DIM = 100
NUM_CLASSES = len(classes) # 3 classes
class LinearModel(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LinearModel, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = self.fc1(x_in) # linear activation
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = LinearModel(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of LinearModel(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
from torch.optim import Adam
LEARNING_RATE = 1e-2
NUM_EPOCHS = 10
BATCH_SIZE = 32
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%1==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.13, accuracy: 49.9
Epoch: 1 | loss: 0.91, accuracy: 50.3
Epoch: 2 | loss: 0.79, accuracy: 55.3
Epoch: 3 | loss: 0.74, accuracy: 54.6
Epoch: 4 | loss: 0.74, accuracy: 53.7
Epoch: 5 | loss: 0.75, accuracy: 53.6
Epoch: 6 | loss: 0.76, accuracy: 53.7
Epoch: 7 | loss: 0.77, accuracy: 53.8
Epoch: 8 | loss: 0.77, accuracy: 53.9
Epoch: 9 | loss: 0.78, accuracy: 53.9
###Markdown
Evaluation
###Code
import json
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
def get_performance(y_true, y_pred, classes):
"""Per-class performance metrics."""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
return performance
# Predictions
y_prob = model(X_test, apply_softmax=True)
print (f"sample probability: {y_prob[0]}")
y_pred = y_prob.max(dim=1)[1]
print (f"sample class: {y_pred[0]}")
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary(model, X, y):
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))
cmap = plt.cm.Spectral
X_test = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()
y_pred = model(X_test, apply_softmax=True)
_, y_pred = y_pred.max(dim=1)
y_pred = y_pred.reshape(xx.shape)
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data yet our activation functions were linear. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few.
###Code
# Fig size
plt.figure(figsize=(12,3))
# Data
x = torch.arange(-5., 5., 0.1)
# Sigmoid activation (constrain a value between 0 and 1.)
plt.subplot(1, 3, 1)
plt.title("Sigmoid activation")
y = torch.sigmoid(x)
plt.plot(x.numpy(), y.numpy())
# Tanh activation (constrain a value between -1 and 1.)
plt.subplot(1, 3, 2)
y = torch.tanh(x)
plt.title("Tanh activation")
plt.plot(x.numpy(), y.numpy())
# Relu (clip the negative values to 0)
plt.subplot(1, 3, 3)
y = F.relu(x)
plt.title("ReLU activation")
plt.plot(x.numpy(), y.numpy())
# Show plots
plt.show()
###Output
_____no_output_____
###Markdown
The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has its own constraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. > In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. NumPyNow let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. > It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using PyTorch. Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear.$z_1 = XW_1$$a_1 = f(z_1)$$z_2 = a_1W_2$$\hat{y} = softmax(z_2)$ classification* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)* $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1)* $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$* $f$ = non-linear activation function* $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$* $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes)* $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$* $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) Initialize weights 1. Randomly initialize the model's weights $W$ (we'll cover more effective initialization strategies later in this lesson).
###Code
# Initialize first layer's weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
print (f"W1: {W1.shape}")
print (f"b1: {b1.shape}")
###Output
W1: (2, 100)
b1: (1, 100)
###Markdown
Model 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$
###Code
# z1 = [NX2] · [2X100] + [1X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
print (f"z1: {z1.shape}")
###Output
z1: (1050, 100)
###Markdown
Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$
###Code
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
print (f"a_1: {a1.shape}")
###Output
a_1: (1050, 100)
###Markdown
We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$
###Code
# Initialize second layer's weights
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
print (f"W2: {W2.shape}")
print (f"b2: {b2.shape}")
# z2 = logits = [NX100] · [100X3] + [1X3] = [NX3]
logits = np.dot(a1, W2) + b2
print (f"logits: {logits.shape}")
print (f"sample: {logits[0]}")
###Output
logits: (1050, 3)
sample: [-0.00010001 0.00418463 -0.00067274]
###Markdown
We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$
###Code
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
print (f"y_hat: {y_hat.shape}")
print (f"sample: {y_hat[0]}")
###Output
y_hat: (1050, 3)
sample: [0.33292037 0.33434987 0.33272975]
###Markdown
Loss 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $
###Code
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
###Output
_____no_output_____
###Markdown
Gradients 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to $W_2$ is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$The gradient of the loss w.r.t $W_1$ is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $
###Code
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
###Output
_____no_output_____
###Markdown
Update weights 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probability for the incorrect classes ($j$) and encourage a higher probability for the correct class ($y$). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$
###Code
# Update weights
W1 += -LEARNING_RATE * dW1
b1 += -LEARNING_RATE * db1
W2 += -LEARNING_RATE * dW2
b2 += -LEARNING_RATE * db2
###Output
_____no_output_____
###Markdown
Training 6. Repeat steps 2 - 4 until model performs well.
###Code
# Convert tensors to NumPy arrays
X_train = X_train.numpy()
y_train = y_train.numpy()
X_val = X_val.numpy()
y_val = y_val.numpy()
X_test = X_test.numpy()
y_test = y_test.numpy()
# Initialize random weights
W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM)
b1 = np.zeros((1, HIDDEN_DIM))
W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES)
b2 = np.zeros((1, NUM_CLASSES))
# Training loop
for epoch_num in range(1000):
# First layer forward pass [NX2] · [2X100] = [NX100]
z1 = np.dot(X_train, W1) + b1
# Apply activation function
a1 = np.maximum(0, z1) # ReLU
# z2 = logits = [NX100] · [100X3] = [NX3]
logits = np.dot(a1, W2) + b2
# Normalization via softmax to obtain class probabilities
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
# Loss
correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train])
loss = np.sum(correct_class_logprobs) / len(y_train)
# show progress
if epoch_num%100 == 0:
# Accuracy
y_pred = np.argmax(logits, axis=1)
accuracy = np.mean(np.equal(y_train, y_pred))
print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}")
# dJ/dW2
dscores = y_hat
dscores[range(len(y_hat)), y_train] -= 1
dscores /= len(y_train)
dW2 = np.dot(a1.T, dscores)
db2 = np.sum(dscores, axis=0, keepdims=True)
# dJ/dW1
dhidden = np.dot(dscores, W2.T)
dhidden[a1 <= 0] = 0 # ReLu backprop
dW1 = np.dot(X_train.T, dhidden)
db1 = np.sum(dhidden, axis=0, keepdims=True)
# Update weights
W1 += -1e0 * dW1
b1 += -1e0 * db1
W2 += -1e0 * dW2
b2 += -1e0 * db2
###Output
Epoch: 0, loss: 1.099, accuracy: 0.349
Epoch: 100, loss: 0.545, accuracy: 0.687
Epoch: 200, loss: 0.247, accuracy: 0.903
Epoch: 300, loss: 0.142, accuracy: 0.949
Epoch: 400, loss: 0.099, accuracy: 0.974
Epoch: 500, loss: 0.076, accuracy: 0.986
Epoch: 600, loss: 0.062, accuracy: 0.990
Epoch: 700, loss: 0.052, accuracy: 0.994
Epoch: 800, loss: 0.046, accuracy: 0.995
Epoch: 900, loss: 0.041, accuracy: 0.995
###Markdown
Evaluation
###Code
class MLPFromScratch():
def predict(self, x):
z1 = np.dot(x, W1) + b1
a1 = np.maximum(0, z1)
logits = np.dot(a1, W2) + b2
exp_logits = np.exp(logits)
y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
return y_hat
# Evaluation
model = MLPFromScratch()
y_prob = model.predict(X_test)
y_pred = np.argmax(y_prob, axis=1)
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
def plot_multiclass_decision_boundary_numpy(model, X, y, savefig_fp=None):
"""Plot the multiclass decision boundary for a model that accepts 2D inputs.
Credit: https://cs231n.github.io/neural-networks-case-study/
Arguments:
model {function} -- trained model with function model.predict(x_in).
X {numpy.ndarray} -- 2D inputs with shape (N, 2).
y {numpy.ndarray} -- 1D outputs with shape (N,).
"""
# Axis boundaries
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
# Create predictions
x_in = np.c_[xx.ravel(), yy.ravel()]
y_pred = model.predict(x_in)
y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape)
# Plot decision boundary
plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot
if savefig_fp:
plt.savefig(savefig_fp, format="png")
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary_numpy(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary_numpy(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
PyTorch Model We'll be using two linear layers along with PyTorch [Functional](https://pytorch.org/docs/stable/nn.functional.html) API's [ReLU](https://pytorch.org/docs/stable/nn.functional.htmltorch.nn.functional.relu) operation.
###Code
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Training
###Code
# Define Loss
class_weights_tensor = torch.Tensor(list(class_weights.values()))
loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)
# Accuracy
def accuracy_fn(y_pred, y_true):
n_correct = torch.eq(y_pred, y_true).sum().item()
accuracy = (n_correct / len(y_pred)) * 100
return accuracy
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Training
for epoch in range(NUM_EPOCHS*10):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%10==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
###Output
Epoch: 0 | loss: 1.11, accuracy: 24.3
Epoch: 10 | loss: 0.67, accuracy: 55.4
Epoch: 20 | loss: 0.51, accuracy: 70.6
Epoch: 30 | loss: 0.39, accuracy: 88.5
Epoch: 40 | loss: 0.29, accuracy: 90.3
Epoch: 50 | loss: 0.22, accuracy: 93.4
Epoch: 60 | loss: 0.18, accuracy: 94.7
Epoch: 70 | loss: 0.15, accuracy: 95.9
Epoch: 80 | loss: 0.12, accuracy: 97.3
Epoch: 90 | loss: 0.11, accuracy: 97.7
###Markdown
Evaluation
###Code
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____
###Markdown
Inference
###Code
# Inputs for inference
X_infer = pd.DataFrame([{"X1": 0.1, "X2": 0.1}])
X_infer.head()
# Standardize
X_infer = X_scaler.transform(X_infer)
print (X_infer)
# Predict
y_infer = model(torch.Tensor(X_infer), apply_softmax=True)
prob, _class = y_infer.max(dim=1)
label = label_encoder.inverse_transform(_class.detach().numpy())[0]
print (f"The probability that you have {label} is {prob.detach().numpy()[0]*100.0:.0f}%")
###Output
The probability that you have c1 is 92%
###Markdown
Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. > A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://pytorch.org/docs/stable/nn.init.html).
###Code
from torch.nn import init
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in)) # ReLU activaton function added!
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
###Output
_____no_output_____
###Markdown
Dropout A great technique to have our models generalize (perform well on test data) is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time.* [Dropout: A Simple Way to Prevent Neural Networks fromOverfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
###Code
DROPOUT_P = 0.1 # % of the neurons that are dropped each pass
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_p, num_classes):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.dropout = nn.Dropout(dropout_p) # dropout
self.fc2 = nn.Linear(hidden_dim, num_classes)
def init_weights(self):
init.xavier_normal(self.fc1.weight, gain=init.calculate_gain("relu"))
def forward(self, x_in, apply_softmax=False):
z = F.relu(self.fc1(x_in))
z = self.dropout(z) # dropout
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
###Output
<bound method Module.named_parameters of MLP(
(fc1): Linear(in_features=2, out_features=100, bias=True)
(dropout): Dropout(p=0.1, inplace=False)
(fc2): Linear(in_features=100, out_features=3, bias=True)
)>
###Markdown
Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons.
###Code
NUM_EPOCHS = 500
NUM_SAMPLES_PER_CLASS = 50
LEARNING_RATE = 1e-1
HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D
# Generate random data
X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM)
y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1)
print ("X: ", format(np.shape(X)))
print ("y: ", format(np.shape(y)))
# Create data splits
X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(
X=X, y=y, train_size=TRAIN_SIZE)
print (f"X_train: {X_train.shape}, y_train: {y_train.shape}")
print (f"X_val: {X_val.shape}, y_val: {y_val.shape}")
print (f"X_test: {X_test.shape}, y_test: {y_test.shape}")
print (f"Sample point: {X_train[0]} → {y_train[0]}")
# Standardize the inputs (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
X_train = X_scaler.transform(X_train)
X_val = X_scaler.transform(X_val)
X_test = X_scaler.transform(X_test)
# Convert data to tensors
X_train = torch.Tensor(X_train)
y_train = torch.LongTensor(y_train)
X_val = torch.Tensor(X_val)
y_val = torch.LongTensor(y_val)
X_test = torch.Tensor(X_test)
y_test = torch.LongTensor(y_test)
# Initialize model
model = MLP(input_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM,
dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)
print (model.named_parameters)
# Optimizer
optimizer = Adam(model.parameters(), lr=LEARNING_RATE)
# Training
for epoch in range(NUM_EPOCHS):
# Forward pass
y_pred = model(X_train)
# Loss
loss = loss_fn(y_pred, y_train)
# Zero all gradients
optimizer.zero_grad()
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch%20==0:
predictions = y_pred.max(dim=1)[1] # class
accuracy = accuracy_fn(y_pred=predictions, y_true=y_train)
print (f"Epoch: {epoch} | loss: {loss:.2f}, accuracy: {accuracy:.1f}")
# Predictions
y_prob = model(X_test, apply_softmax=True)
y_pred = y_prob.max(dim=1)[1]
# Performance report
performance = get_performance(y_true=y_test, y_pred=y_pred, classes=classes)
print (json.dumps(performance, indent=2))
# Visualize the decision boundary
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_multiclass_decision_boundary(model=model, X=X_train, y=y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_multiclass_decision_boundary(model=model, X=X_test, y=y_test)
plt.show()
###Output
_____no_output_____ |
colab/stability_vs_knn.ipynb | ###Markdown
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import os.path
import warnings
warnings.filterwarnings('ignore')
# install DenMune clustering algorithm using pip command from the offecial Python repository, PyPi
# from https://pypi.org/project/denmune/
!pip install denmune
# then import it
from denmune import DenMune
# clone datasets from our repository datasets
if not os.path.exists('datasets'):
!git clone https://github.com/egy1st/datasets
data_path = 'datasets/denmune/pendigits/'
file_2d = data_path + 'pendigits-2d.csv'
X_train = pd.read_csv(data_path + 'train.csv', sep=',', header=None)
y_train = X_train.iloc[:, -1]
X_train = X_train.drop(X_train.columns[-1], axis=1)
X_test = pd.read_csv(data_path + 'test.csv', sep=',', header=None)
X_test = X_test.drop(X_test.columns[-1], axis=1)
data_stability = []
from IPython.display import clear_output
for knn in range (10, 110, 10):
clear_output(wait=True)
dm = DenMune(train_data=X_train,
train_truth=y_train,
test_data=X_test,
k_nearest=knn,
file_2d=file_2d,
rgn_tsne=False)
labels, validity = dm.fit_predict(show_plots=True, show_analyzer=False)
validity_key = "F1"
print ('k=' , knn, validity_key , 'score is:', validity['train'][validity_key])
data_stability.append([knn, validity['train'][validity_key]])
#computing moving average to smoth the curve
x, y = zip(*data_stability)
window = 5
cumsum, moving_aves = [0], []
for i, n in enumerate(y, 1):
cumsum.append(cumsum[i-1] + n)
if i>=window:
moving_ave = (cumsum[i] - cumsum[i-window])/window
#can do stuff with moving_ave here
moving_aves.append(moving_ave)
y = moving_aves
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
ax.plot(x[:-window+1], y, marker='.', linewidth=2, label='DenMune Stability')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbors')
ax.set_ylabel(validity_key + ' score')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import os.path
import warnings
warnings.filterwarnings('ignore')
# install DenMune clustering algorithm using pip command from the offecial Python repository, PyPi
# from https://pypi.org/project/denmune/
!pip install denmune
# then import it
from denmune import DenMune
# clone datasets from our repository datasets
if not os.path.exists('datasets'):
!git clone https://github.com/egy1st/datasets
data_path = 'datasets/denmune/pendigits/'
file_2d = data_path + 'pendigits-2d.csv'
X_train = pd.read_csv(data_path + 'train.csv', sep=',', header=None)
y_train = X_train.iloc[:, -1]
X_train = X_train.drop(X_train.columns[-1], axis=1)
X_test = pd.read_csv(data_path + 'test.csv', sep=',', header=None)
X_test = X_test.drop(X_test.columns[-1], axis=1)
data_stability = []
from IPython.display import clear_output
for knn in range (1, 200):
clear_output(wait=True)
dm = DenMune(train_data=X_train,
train_truth=y_train,
test_data=X_test,
k_nearest=knn,
file_2d=file_2d,
rgn_tsne=False)
labels, validity = dm.fit_predict(show_plots=True, show_analyzer=False)
validity_key = "F1"
print ('k=' , knn, validity_key , 'score is:', validity['train'][validity_key])
data_stability.append([knn, validity['train'][validity_key]])
#computing moving average to smoth the curve
x, y = zip(*data_stability)
window = 5
cumsum, moving_aves = [0], []
for i, n in enumerate(y, 1):
cumsum.append(cumsum[i-1] + n)
if i>=window:
moving_ave = (cumsum[i] - cumsum[i-window])/window
#can do stuff with moving_ave here
moving_aves.append(moving_ave)
y = moving_aves
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
ax.plot(x[:-window+1], y, marker='.', linewidth=2, label='DenMune Stability')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbors')
ax.set_ylabel(validity_key + ' score')
plt.legend()
plt.show()
###Output
_____no_output_____ |
apps/image-augmentation/image-augmentation.ipynb | ###Markdown
Image Augmentation Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
###Code
from zoo.common.nncontext import init_nncontext
from zoo.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext(create_spark_conf().setAppName("Image Augmentation Example"))
###Output
_____no_output_____
###Markdown
Create LocalImageSet
###Code
# create LocalImageSet from an image
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/")
# create LocalImageSet from list of images
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
###Output
creating: createLocalImageSet
[array([[[ 33., 32., 34., ..., 79., 79., 80.],
[ 33., 32., 34., ..., 78., 79., 82.],
[ 34., 33., 34., ..., 78., 78., 81.],
...,
[ 82., 47., 43., ..., 42., 42., 42.],
[102., 58., 46., ..., 43., 43., 43.],
[112., 66., 48., ..., 41., 41., 42.]],
[[ 56., 55., 57., ..., 102., 99., 100.],
[ 56., 55., 57., ..., 100., 99., 102.],
[ 57., 56., 57., ..., 100., 98., 101.],
...,
[115., 79., 74., ..., 75., 75., 75.],
[133., 91., 77., ..., 76., 76., 76.],
[144., 97., 79., ..., 74., 74., 75.]],
[[ 94., 93., 95., ..., 118., 117., 118.],
[ 94., 93., 95., ..., 118., 117., 120.],
[ 95., 94., 95., ..., 118., 116., 119.],
...,
[131., 98., 97., ..., 108., 108., 108.],
[148., 107., 98., ..., 109., 109., 109.],
[157., 112., 100., ..., 107., 107., 108.]]], dtype=float32)]
('isDistributed: ', False, ', isLocal: ', True)
###Markdown
Create DistributedImageSet
###Code
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
###Output
creating: createDistributedImageSet
PythonRDD[24] at RDD at PythonRDD.scala:48
PythonRDD[25] at RDD at PythonRDD.scala:48
('isDistributed: ', True, ', isLocal: ', False)
('total images:', 1)
###Markdown
Transform images
###Code
path = os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
###Output
_____no_output_____
###Markdown
BrightnessAdjust the image brightness
###Code
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
###Output
creating: createImageBrightness
###Markdown
HueAdjust image hue
###Code
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHue
###Markdown
SaturationAdjust image saturation
###Code
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageSaturation
###Markdown
ChannelOrderRandom change the channel of an image
###Code
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelOrder
###Markdown
ColorJitterRandom adjust brightness, contrast, hue, saturation
###Code
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageColorJitter
###Markdown
ResizeResize the roi(region of interest) according to scale
###Code
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageResize
###Markdown
AspectScaleResize the image, keep the aspect ratio. scale according to the short edge
###Code
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageAspectScale
###Markdown
RandomAspectScaleResize the image by randomly choosing a scale
###Code
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomAspectScale
###Markdown
ChannelNormalizeImage channel normalize
###Code
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelNormalize
###Markdown
PixelNormalizePixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
###Code
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
PixelNormalize takes nearly one and a half minutes. Please wait a moment.
creating: createImagePixelNormalize
###Markdown
CenterCropCrop a `cropWidth` x `cropHeight` patch from center of image.
###Code
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageCenterCrop
###Markdown
RandomCropRandom crop a `cropWidth` x `cropHeight` patch from an image.
###Code
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomCrop
###Markdown
FixedCropCrop a fixed area of image
###Code
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFixedCrop
###Markdown
FillerFill part of image with certain pixel value
###Code
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFiller
###Markdown
ExpandExpand image, fill the blank part with the meanR, meanG, meanB
###Code
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageExpand
###Markdown
HFlipFlip the image horizontally
###Code
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHFlip
###Markdown
Image Augmentation Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
###Code
from bigdl.dllib.nncontext import init_nncontext
from bigdl.dllib.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext("Image Augmentation Example")
###Output
Prepending /home/ding/anaconda3/envs/conda_py36/lib/python3.6/site-packages/bigdl/share/dllib/conf/spark-bigdl.conf to sys.path
pyspark_submit_args is: --driver-class-path /home/ding/anaconda3/envs/conda_py36/lib/python3.6/site-packages/bigdl/share/dllib/lib/bigdl-dllib-2.1.0-SNAPSHOT-jar-with-dependencies.jar pyspark-shell
###Markdown
Create LocalImageSet
###Code
# create LocalImageSet from an image
local_image_set = ImageSet.read("image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read("image/")
# create LocalImageSet from list of images
image = cv2.imread("image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
###Output
creating: createLocalImageSet
[array([[[ 33., 32., 34., ..., 79., 79., 80.],
[ 33., 32., 34., ..., 78., 79., 82.],
[ 34., 33., 34., ..., 78., 78., 81.],
...,
[ 82., 47., 42., ..., 42., 42., 42.],
[100., 58., 46., ..., 43., 43., 43.],
[112., 64., 48., ..., 41., 41., 42.]],
[[ 56., 55., 57., ..., 102., 99., 100.],
[ 56., 55., 57., ..., 101., 99., 102.],
[ 57., 56., 57., ..., 100., 98., 101.],
...,
[114., 79., 74., ..., 75., 75., 75.],
[133., 90., 77., ..., 76., 76., 76.],
[143., 97., 79., ..., 74., 74., 75.]],
[[ 94., 93., 95., ..., 118., 117., 118.],
[ 94., 93., 95., ..., 117., 117., 120.],
[ 95., 94., 95., ..., 118., 116., 119.],
...,
[133., 98., 97., ..., 108., 108., 108.],
[149., 109., 100., ..., 109., 109., 109.],
[158., 113., 100., ..., 107., 107., 108.]]], dtype=float32)]
isDistributed: False , isLocal: True
###Markdown
Create DistributedImageSet
###Code
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read("image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read("image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread("image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
###Output
creating: createDistributedImageSet
PythonRDD[20] at RDD at PythonRDD.scala:53
PythonRDD[21] at RDD at PythonRDD.scala:53
isDistributed: True , isLocal: False
total images: 1
###Markdown
Transform images
###Code
path = "image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
###Output
_____no_output_____
###Markdown
BrightnessAdjust the image brightness
###Code
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
###Output
creating: createImageBrightness
###Markdown
HueAdjust image hue
###Code
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHue
###Markdown
SaturationAdjust image saturation
###Code
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageSaturation
###Markdown
ChannelOrderRandom change the channel of an image
###Code
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelOrder
###Markdown
ColorJitterRandom adjust brightness, contrast, hue, saturation
###Code
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageColorJitter
###Markdown
ResizeResize the roi(region of interest) according to scale
###Code
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageResize
###Markdown
AspectScaleResize the image, keep the aspect ratio. scale according to the short edge
###Code
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageAspectScale
###Markdown
RandomAspectScaleResize the image by randomly choosing a scale
###Code
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomAspectScale
###Markdown
ChannelNormalizeImage channel normalize
###Code
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelNormalize
###Markdown
PixelNormalizePixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
###Code
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
PixelNormalize takes nearly one and a half minutes. Please wait a moment.
creating: createImagePixelNormalize
###Markdown
CenterCropCrop a `cropWidth` x `cropHeight` patch from center of image.
###Code
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageCenterCrop
###Markdown
RandomCropRandom crop a `cropWidth` x `cropHeight` patch from an image.
###Code
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomCrop
###Markdown
FixedCropCrop a fixed area of image
###Code
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFixedCrop
###Markdown
FillerFill part of image with certain pixel value
###Code
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFiller
###Markdown
ExpandExpand image, fill the blank part with the meanR, meanG, meanB
###Code
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageExpand
###Markdown
HFlipFlip the image horizontally
###Code
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHFlip
###Markdown
Image Augmentation Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
###Code
from zoo.common.nncontext import init_nncontext
from zoo.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext("Image Augmentation Example")
###Output
_____no_output_____
###Markdown
Create LocalImageSet
###Code
# create LocalImageSet from an image
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/")
# create LocalImageSet from list of images
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
###Output
creating: createLocalImageSet
[array([[[ 33., 32., 34., ..., 79., 79., 80.],
[ 33., 32., 34., ..., 78., 79., 82.],
[ 34., 33., 34., ..., 78., 78., 81.],
...,
[ 82., 47., 43., ..., 42., 42., 42.],
[102., 58., 46., ..., 43., 43., 43.],
[112., 66., 48., ..., 41., 41., 42.]],
[[ 56., 55., 57., ..., 102., 99., 100.],
[ 56., 55., 57., ..., 100., 99., 102.],
[ 57., 56., 57., ..., 100., 98., 101.],
...,
[115., 79., 74., ..., 75., 75., 75.],
[133., 91., 77., ..., 76., 76., 76.],
[144., 97., 79., ..., 74., 74., 75.]],
[[ 94., 93., 95., ..., 118., 117., 118.],
[ 94., 93., 95., ..., 118., 117., 120.],
[ 95., 94., 95., ..., 118., 116., 119.],
...,
[131., 98., 97., ..., 108., 108., 108.],
[148., 107., 98., ..., 109., 109., 109.],
[157., 112., 100., ..., 107., 107., 108.]]], dtype=float32)]
('isDistributed: ', False, ', isLocal: ', True)
###Markdown
Create DistributedImageSet
###Code
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
###Output
creating: createDistributedImageSet
PythonRDD[24] at RDD at PythonRDD.scala:48
PythonRDD[25] at RDD at PythonRDD.scala:48
('isDistributed: ', True, ', isLocal: ', False)
('total images:', 1)
###Markdown
Transform images
###Code
path = os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
###Output
_____no_output_____
###Markdown
BrightnessAdjust the image brightness
###Code
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
###Output
creating: createImageBrightness
###Markdown
HueAdjust image hue
###Code
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHue
###Markdown
SaturationAdjust image saturation
###Code
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageSaturation
###Markdown
ChannelOrderRandom change the channel of an image
###Code
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelOrder
###Markdown
ColorJitterRandom adjust brightness, contrast, hue, saturation
###Code
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageColorJitter
###Markdown
ResizeResize the roi(region of interest) according to scale
###Code
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageResize
###Markdown
AspectScaleResize the image, keep the aspect ratio. scale according to the short edge
###Code
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageAspectScale
###Markdown
RandomAspectScaleResize the image by randomly choosing a scale
###Code
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomAspectScale
###Markdown
ChannelNormalizeImage channel normalize
###Code
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelNormalize
###Markdown
PixelNormalizePixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
###Code
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
PixelNormalize takes nearly one and a half minutes. Please wait a moment.
creating: createImagePixelNormalize
###Markdown
CenterCropCrop a `cropWidth` x `cropHeight` patch from center of image.
###Code
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageCenterCrop
###Markdown
RandomCropRandom crop a `cropWidth` x `cropHeight` patch from an image.
###Code
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomCrop
###Markdown
FixedCropCrop a fixed area of image
###Code
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFixedCrop
###Markdown
FillerFill part of image with certain pixel value
###Code
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFiller
###Markdown
ExpandExpand image, fill the blank part with the meanR, meanG, meanB
###Code
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageExpand
###Markdown
HFlipFlip the image horizontally
###Code
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHFlip
###Markdown
Image Augmentation Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
###Code
from bigdl.dllib.nncontext import init_nncontext
from bigdl.dllib.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext("Image Augmentation Example")
###Output
Prepending /home/ding/anaconda3/envs/conda_py36/lib/python3.6/site-packages/bigdl/share/dllib/conf/spark-bigdl.conf to sys.path
pyspark_submit_args is: --driver-class-path /home/ding/anaconda3/envs/conda_py36/lib/python3.6/site-packages/bigdl/share/dllib/lib/bigdl-dllib-0.14.0-SNAPSHOT-jar-with-dependencies.jar pyspark-shell
###Markdown
Create LocalImageSet
###Code
# create LocalImageSet from an image
local_image_set = ImageSet.read("image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read("image/")
# create LocalImageSet from list of images
image = cv2.imread("image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
###Output
creating: createLocalImageSet
[array([[[ 33., 32., 34., ..., 79., 79., 80.],
[ 33., 32., 34., ..., 78., 79., 82.],
[ 34., 33., 34., ..., 78., 78., 81.],
...,
[ 82., 47., 42., ..., 42., 42., 42.],
[100., 58., 46., ..., 43., 43., 43.],
[112., 64., 48., ..., 41., 41., 42.]],
[[ 56., 55., 57., ..., 102., 99., 100.],
[ 56., 55., 57., ..., 101., 99., 102.],
[ 57., 56., 57., ..., 100., 98., 101.],
...,
[114., 79., 74., ..., 75., 75., 75.],
[133., 90., 77., ..., 76., 76., 76.],
[143., 97., 79., ..., 74., 74., 75.]],
[[ 94., 93., 95., ..., 118., 117., 118.],
[ 94., 93., 95., ..., 117., 117., 120.],
[ 95., 94., 95., ..., 118., 116., 119.],
...,
[133., 98., 97., ..., 108., 108., 108.],
[149., 109., 100., ..., 109., 109., 109.],
[158., 113., 100., ..., 107., 107., 108.]]], dtype=float32)]
isDistributed: False , isLocal: True
###Markdown
Create DistributedImageSet
###Code
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read("image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read("image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread("image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
###Output
creating: createDistributedImageSet
PythonRDD[20] at RDD at PythonRDD.scala:53
PythonRDD[21] at RDD at PythonRDD.scala:53
isDistributed: True , isLocal: False
total images: 1
###Markdown
Transform images
###Code
path = "image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
###Output
_____no_output_____
###Markdown
BrightnessAdjust the image brightness
###Code
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
###Output
creating: createImageBrightness
###Markdown
HueAdjust image hue
###Code
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHue
###Markdown
SaturationAdjust image saturation
###Code
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageSaturation
###Markdown
ChannelOrderRandom change the channel of an image
###Code
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelOrder
###Markdown
ColorJitterRandom adjust brightness, contrast, hue, saturation
###Code
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageColorJitter
###Markdown
ResizeResize the roi(region of interest) according to scale
###Code
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageResize
###Markdown
AspectScaleResize the image, keep the aspect ratio. scale according to the short edge
###Code
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageAspectScale
###Markdown
RandomAspectScaleResize the image by randomly choosing a scale
###Code
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomAspectScale
###Markdown
ChannelNormalizeImage channel normalize
###Code
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelNormalize
###Markdown
PixelNormalizePixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
###Code
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
PixelNormalize takes nearly one and a half minutes. Please wait a moment.
creating: createImagePixelNormalize
###Markdown
CenterCropCrop a `cropWidth` x `cropHeight` patch from center of image.
###Code
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageCenterCrop
###Markdown
RandomCropRandom crop a `cropWidth` x `cropHeight` patch from an image.
###Code
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomCrop
###Markdown
FixedCropCrop a fixed area of image
###Code
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFixedCrop
###Markdown
FillerFill part of image with certain pixel value
###Code
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFiller
###Markdown
ExpandExpand image, fill the blank part with the meanR, meanG, meanB
###Code
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageExpand
###Markdown
HFlipFlip the image horizontally
###Code
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHFlip
###Markdown
Image Augmentation Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
###Code
from zoo.common.nncontext import init_nncontext
from zoo.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext("Image Augmentation Example")
###Output
_____no_output_____
###Markdown
Create LocalImageSet
###Code
# create LocalImageSet from an image
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/")
# create LocalImageSet from list of images
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
###Output
creating: createLocalImageSet
[array([[[ 33., 32., 34., ..., 79., 79., 80.],
[ 33., 32., 34., ..., 78., 79., 82.],
[ 34., 33., 34., ..., 78., 78., 81.],
...,
[ 82., 47., 43., ..., 42., 42., 42.],
[102., 58., 46., ..., 43., 43., 43.],
[112., 66., 48., ..., 41., 41., 42.]],
[[ 56., 55., 57., ..., 102., 99., 100.],
[ 56., 55., 57., ..., 100., 99., 102.],
[ 57., 56., 57., ..., 100., 98., 101.],
...,
[115., 79., 74., ..., 75., 75., 75.],
[133., 91., 77., ..., 76., 76., 76.],
[144., 97., 79., ..., 74., 74., 75.]],
[[ 94., 93., 95., ..., 118., 117., 118.],
[ 94., 93., 95., ..., 118., 117., 120.],
[ 95., 94., 95., ..., 118., 116., 119.],
...,
[131., 98., 97., ..., 108., 108., 108.],
[148., 107., 98., ..., 109., 109., 109.],
[157., 112., 100., ..., 107., 107., 108.]]], dtype=float32)]
('isDistributed: ', False, ', isLocal: ', True)
###Markdown
Create DistributedImageSet
###Code
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
###Output
creating: createDistributedImageSet
PythonRDD[24] at RDD at PythonRDD.scala:48
PythonRDD[25] at RDD at PythonRDD.scala:48
('isDistributed: ', True, ', isLocal: ', False)
('total images:', 1)
###Markdown
Transform images
###Code
path = os.getenv("ANALYTICS_ZOO_HOME")+"/apps/image-augmentation/image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
###Output
_____no_output_____
###Markdown
BrightnessAdjust the image brightness
###Code
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
###Output
creating: createImageBrightness
###Markdown
HueAdjust image hue
###Code
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHue
###Markdown
SaturationAdjust image saturation
###Code
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageSaturation
###Markdown
ChannelOrderRandom change the channel of an image
###Code
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelOrder
###Markdown
ColorJitterRandom adjust brightness, contrast, hue, saturation
###Code
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageColorJitter
###Markdown
ResizeResize the roi(region of interest) according to scale
###Code
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageResize
###Markdown
AspectScaleResize the image, keep the aspect ratio. scale according to the short edge
###Code
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageAspectScale
###Markdown
RandomAspectScaleResize the image by randomly choosing a scale
###Code
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomAspectScale
###Markdown
ChannelNormalizeImage channel normalize
###Code
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageChannelNormalize
###Markdown
PixelNormalizePixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
###Code
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
PixelNormalize takes nearly one and a half minutes. Please wait a moment.
creating: createImagePixelNormalize
###Markdown
CenterCropCrop a `cropWidth` x `cropHeight` patch from center of image.
###Code
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageCenterCrop
###Markdown
RandomCropRandom crop a `cropWidth` x `cropHeight` patch from an image.
###Code
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageRandomCrop
###Markdown
FixedCropCrop a fixed area of image
###Code
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFixedCrop
###Markdown
FillerFill part of image with certain pixel value
###Code
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageFiller
###Markdown
ExpandExpand image, fill the blank part with the meanR, meanG, meanB
###Code
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageExpand
###Markdown
HFlipFlip the image horizontally
###Code
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
###Output
creating: createImageHFlip
|
Year19-20/2019-06-24__GettingStarted/02_intro.ipynb | ###Markdown
Introduction**Prerequisites**- Good attitude - Good work ethic **Outcomes**- Understand what a programming language is. - Know why we chose Python - Know what the Jupyter notebook is - Know Jupyter notebook basics: cell modes, editing/evaluating cells WelcomeWelcome to the start of your path to learning how to work with data in thePython programming language!A programming language is, loosely speaking, a structured subset of naturallanguage (words) and special characters (e.g. `,` or `{`) that allow humansto describe operations they would like their computer to perform on their behalfIt is then the job of the programming language to translate these words andsymbols into instructions the computer can execute Why Python?Among the hundreds of programming languages, we chose to teach you Python for the following reasons- Easy to learn and use (relative to other programming languages) - Designed with readability in mind - Excellent tools for handling data efficiently and succinctly - Cemented as the world’s [third most popular](https://www.zdnet.com/article/programming-language-of-the-year-python-is-standout-in-latest-rankings/) programming language, the most popular scripting language, and an increasing standard for [data analysis in industry](https://medium.com/@data_driven/python-vs-r-for-data-science-and-the-winner-is-3ebb1a968197) - General purpose: Initially you will learn Python for data analysis, but it can also used for websites, database management, web scraping, financial modeling, data visualization, etc. In particular, it is the world’s best language for [gluing](https://en.wikipedia.org/wiki/Glue_code) those different pieces together. The general purpose nature of Python comes at a cost: it is often called “the second best languagefor everything”. But the flip-side of that argument is it is a great language to have in yourtoolbox to solve all sorts of problems and patch them together. Hence, a versatile “second-best”language is typically the best one to learn first.Some other languages to consider- R has a spectacular ecosystem of statistical packages, and is defensible as a choice for pure data science. However, it is a difficult general-purpose language to use and ill-suited towards the versatile glue code in which Python excels. Nevertheless, it can be a useful second-language to learn for projects that are entirely statistical. - Matlab has much more natural notation for writing linear algebra heavy code. However, it is: (a) expensive; (b) poor at dealing with data analysis; (3) grossly inferior to Python as a language; and (4) being left behind as Python and Julia ecosystems expand to more packages. - Julia is in part a far better version of Matlab, which can be as fast as Fortran or C. However, it has a young and immature environment and is currently more appropriate for academics and scientific computing specialists. Another consideration with programming language is runtime performance, where both Python and R canbe slow for general purpose code. For the most part this will not be an issue for doing datascience and machine learning, as most datascience packages in Python (and R) call out tohigh-performance code written in other languages in the background. If you are writing moretraditional scientific/technical computing in Python, there are [things that can help](http://numba.pydata.org/) make Python faster in limited cases, but other languages like Julia andMatlab can become more appealing. Why Open Source?Software development has changed radically in the last decade, increasingly becoming a process ofstitching together both established high quality libraries, and state-of-the-art research projectsA major disadvantage of Matlab, Stata, and other proprietary languages is that they are notopen-source, and unable to work within this new paradigmForgetting the cost for a moment, the benefits of using an open-source language are pragmatic ratherthan ideological- Open source languages are easier for everyone in the world to write and share packages because the code is accessible and available - With the right kinds of open source licenses, academics, businesses, and hobbyists all have incentives to contribute - Because open-source languages are managed on publicly accessible sites (e.g. GitHub), it is easier to built a community and collaborate - Package management systems (i.e. a way to find, download, install, and upgrade packages) in open-source languages can be very open and accessible since they don’t need to deal with proprietary software licenses Taking Matlab as an example: it has no package management system at all, and due to the license andlanguage limitations it is unlikely to ever catch up InstallationIf you are accessing these materials via a Jupyter(Hub) server, you can skipthis sectionIf you are not viewing them on an already established Jupyter server, pleasefollow the [local installation instructions](local_install.ipynb) and come backhere when you are finished Jupyter notebook basicsThere are a few things that we should know about Jupyter notebooks up front1. Notebooks are made up of cells 1. Cells have inputs and outputs 1. We use cells of two main types: 1. Markdown cells - Contain [markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Here-Cheatsheet) text - Output is rendered in place of the input when the cell is executed 1. Code cells - Contain Python (or other language) code - Inputs have an `In [ ]:` to the left - When executed, output placed below the input with `Out [ ]:` to the left
###Code
1 + 1
###Output
_____no_output_____
###Markdown
Editing cellsThe selected cells can be in one of two modes:1. Command mode: This mode is for making high level changes to the notebook itself. For example changing the order of cells, creating a new cell etc… - You know you’re in command mode when there is a blue sidebar on left of cell - Pressing keys tells Jupyter notebook to run commands. For example, `a` adds a new cell above the current cell, `b` adds one below the current cell, and `dd` deletes the current cell - up arrow (or `k`) changes the selected cell to the cell above the current one and down arrow (or `j`) changes to the cell below 1. Edit mode: Used when editing the content inside of cells. - When in edit mode the selected cell displays a green sidebar on left - Can edit the content of a cell Some useful commands- To go from command mode to edit mode press enter or double click the mouse - Go from edit mode to command mode by pressing escape - You can evaluate a cell by pressing shift+enter (meaning shift and enter at the same time) **Check for understanding**In the *code* cell below (notice the `In [ ]:` to the left) type a quote(`"`), your name, then another quote (`"`) and evaluate the cell>
###Code
# code here!
###Output
_____no_output_____ |
CNN-3Conv.ipynb | ###Markdown
Approach**[Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist)** is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. The dataset serves as a direct drop-in replacement for the original [MNIST dataset](http://yann.lecun.com/exdb/mnist/) for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.In this work, I will train a Convolutional Neural Network classifier with 3 convolution layers using the Keras deep learning library. The model is first trained for 10 epochs with batch size of 256, compiled with `categorical_crossentropy` loss function and `Adam` optimizer. Then, I added **data augmentation**, which generates new training samples by rotating, shifting and zooming on the training samples, and trained for another 50 epochs.I will first split the original training data (60,000 images) into 80% training (48,000 images) and 20% validation (12000 images) optimize the classifier, while keeping the test data (10,000 images) to finally evaluate the accuracy of the model on the data it has never seen. This helps to see whether I'm over-fitting on the training data and whether I should lower the learning rate and train for more epochs if validation accuracy is higher than training accuracy or stop over-training if training accuracy shift higher than the validation.
###Code
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
# Load training and test data into dataframes
data_train = pd.read_csv('data/fashion-mnist_train.csv')
data_test = pd.read_csv('data/fashion-mnist_test.csv')
# X forms the training images, and y forms the training labels
X = np.array(data_train.iloc[:, 1:])
y = to_categorical(np.array(data_train.iloc[:, 0]))
# Here I split original training data to sub-training (80%) and validation data (20%)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=13)
# X_test forms the test images, and y_test forms the test labels
X_test = np.array(data_test.iloc[:, 1:])
y_test = to_categorical(np.array(data_test.iloc[:, 0]))
###Output
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
Processing DataAfter loading and splitting the data, I preprocess them by reshaping them into the shape the network expects and scaling them so that all values are in the [0, 1] interval. Previously, for instance, the training data were stored in an array of shape (60000, 28, 28) of type uint8 with values in the [0, 255] interval. I transform it into a float32 array of shape (60000, 28 * 28) with values between 0 and 1.
###Code
# Each image's dimension is 28 x 28
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
# Prepare the training images
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_train /= 255
# Prepare the test images
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_test = X_test.astype('float32')
X_test /= 255
# Prepare the validation images
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)
X_val = X_val.astype('float32')
X_val /= 255
###Output
_____no_output_____
###Markdown
CNN with 3 Convolutional LayersThis CNN takes as input tensors of shape *(image_height, image_width, image_channels)*. In this case, I configure the CNN to process inputs of size *(28, 28, 1)*, which is the format of the FashionMNIST images. I do this by passing the argument *input_shape=(28, 28, 1)* to the first layer.* The 1st layer is a *Conv2D* layer for the **convolution** operation that extracts features from the input images by sliding a convolution filter over the input to produce a feature map. Here I choose feature map with size 3 x 3. * The 2nd layer is a *MaxPooling2D* layer for the **max-pooling** operation that reduces the dimensionality of each feature, which helps shorten training time and reduce number of parameters. Here I choose the pooling window with size 2 x 2.* To combat overfititng, I add a *Dropout* layer as the 3rd layer, a powerful regularization technique. **Dropout** is the method used to reduce overfitting. It forces the model to learn multiple independent representations of the same data by randomly disabling neurons in the learning phase. In this model, dropout will randomnly disable 20% of the outputs.* I repeat these steps to add more hidden layers: 2 *Conv2D* layers, 1 *MaxPooling2D* layers, and 2 *Dropout* layers.* The next step is to feed the last output tensor into a stack of *Dense* layers, otherwise known as **fully-connected** layers. These densely connected classifiers process vectors, which are 1D, whereas the current output is a 3D tensor. Thus, I need to **flatten** the 3D outputs to 1D, and then add 2 *Dense* layers on top.* I add another *Dropout* layer between these 2 *Dense* layers to disable 30% of the outputs.* I do a 10-way classification (as there are 10 classes of fashion images), using a final layer with 10 outputs and a softmax activation. **Softmax** activation enables me to calculate the output based on the probabilities. Each class is assigned a probability and the class with the maximum probability is the model’s output for the input.
###Code
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
cnn3 = Sequential()
cnn3.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
cnn3.add(MaxPooling2D((2, 2)))
cnn3.add(Dropout(0.25))
cnn3.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
cnn3.add(MaxPooling2D(pool_size=(2, 2)))
cnn3.add(Dropout(0.25))
cnn3.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
cnn3.add(Dropout(0.4))
cnn3.add(Flatten())
cnn3.add(Dense(128, activation='relu'))
cnn3.add(Dropout(0.3))
cnn3.add(Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
When compiling the model, I choose **categorical_crossentropy** as the loss function (which is relevent for multiclass, single-label classification problem) and **Adam** optimizer.* The cross-entropy loss calculates the error rate between the predicted value and the original value. The formula for calculating cross-entropy loss is given [here](https://en.wikipedia.org/wiki/Cross_entropy). Categorical is used because there are 10 classes to predict from. If there were 2 classes, I would have used binary_crossentropy.* The Adam optimizer is an improvement over SGD(Stochastic Gradient Descent). The optimizer is responsible for updating the weights of the neurons via backpropagation. It calculates the derivative of the loss function with respect to each weight and subtracts it from the weight. That is how a neural network learns.
###Code
cnn3.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Let’s look at how the dimensions of the feature maps change with every successive layer:
###Code
cnn3.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 13, 13, 32) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 13, 13, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 11, 11, 64) 18496
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 5, 5, 64) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 5, 5, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 3, 3, 128) 73856
_________________________________________________________________
dropout_3 (Dropout) (None, 3, 3, 128) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 1152) 0
_________________________________________________________________
dense_1 (Dense) (None, 128) 147584
_________________________________________________________________
dropout_4 (Dropout) (None, 128) 0
_________________________________________________________________
dense_2 (Dense) (None, 10) 1290
=================================================================
Total params: 241,546
Trainable params: 241,546
Non-trainable params: 0
_________________________________________________________________
###Markdown
* 241,546 parameters are available to be trained.* The output of the *Conv2D* and *MaxPooling2D* layers are 3D tensors of shape *(height, width, channels)*.* The number of channels is controlled by the 1st argument passed to the *Conv2D* layer (32).* The (3, 3, 128) outputs from the 3rd *Dropout* layer are flattened into vectors of shape (1152,) before going through 2 *Dense* layers. Training the ModelAs previously mentioned, I train the model with batch size of 256 and 10 epochs on both training and validation data.
###Code
history3 = cnn3.fit(X_train, y_train,
batch_size=256,
epochs=10,
verbose=1,
validation_data=(X_val, y_val))
score3 = cnn3.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score3[0])
print('Test accuracy:', score3[1])
###Output
Test loss: 0.24964626643657684
Test accuracy: 0.9079
###Markdown
My accuracy is 90.79%, pretty powerful! Data AugmentationOverfitting can be caused by having too few samples to learn from, making me unable to train a model that can generalize to new data. Given infinite data, my model would be exposed to every possible aspect of the data distribution at hand: I would never overfit. **Data augmentation** takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, my model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better.In Keras, this can be done by configuring a number of random transformations to be performed on the images read by the ImageDataGenerator instance.* *rotation_range* is a value in degrees (0–180), a range within which to randomly rotate pictures.* *width_shift* and *height_shift* are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.* *shear_range* is for randomly applying shearing transformations.* *zoom_range* is for randomly zooming inside pictures.
###Code
from keras.preprocessing.image import ImageDataGenerator
gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X_train, y_train, batch_size=256)
val_batches = gen.flow(X_val, y_val, batch_size=256)
###Output
_____no_output_____
###Markdown
Let's train the network using data augmentation.
###Code
history3 = cnn3.fit_generator(batches, steps_per_epoch=48000//256, epochs=50,
validation_data=val_batches, validation_steps=12000//256, use_multiprocessing=True)
score3 = cnn3.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score3[0])
print('Test accuracy:', score3[1])
###Output
Test loss: 0.22910109297037123
Test accuracy: 0.9117
###Markdown
Okay, I improved the accuracy to 91.17%! ResultsLet's plot training and validation accuracy as well as training and validation loss.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
accuracy = history3.history['acc']
val_accuracy = history3.history['val_acc']
loss = history3.history['loss']
val_loss = history3.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
These plots look decent: The training curves are closely tracking the validation curves. Classification ReportI can summarize the performance of my classifier as follows:
###Code
# get the predictions for the test data
predicted_classes = cnn3.predict_classes(X_test)
# get the indices to be plotted
y_true = data_test.iloc[:, 0]
correct = np.nonzero(predicted_classes==y_true)[0]
incorrect = np.nonzero(predicted_classes!=y_true)[0]
from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(10)]
print(classification_report(y_true, predicted_classes, target_names=target_names))
###Output
precision recall f1-score support
Class 0 0.85 0.86 0.86 1000
Class 1 0.99 0.99 0.99 1000
Class 2 0.92 0.83 0.87 1000
Class 3 0.93 0.94 0.93 1000
Class 4 0.88 0.83 0.85 1000
Class 5 0.98 0.98 0.98 1000
Class 6 0.68 0.78 0.73 1000
Class 7 0.95 0.96 0.96 1000
Class 8 0.99 0.99 0.99 1000
Class 9 0.98 0.96 0.97 1000
avg / total 0.91 0.91 0.91 10000
###Markdown
It's apparent that the classifier is underperforming for class 6 in terms of both precision and recall. For class 0, the classifier is slightly lacking precision; whereas for class 2 and 4, it is slightly lacking recall.Perhaps I would gain more insight after visualizing the correct and incorrect predictions.Here is a subset of correctly predicted classes.
###Code
for i, correct in enumerate(correct[:9]):
plt.subplot(3,3,i+1)
plt.imshow(X_test[correct].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[correct], y_true[correct]))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
And here is a subset of incorrectly predicted classes:
###Code
for i, incorrect in enumerate(incorrect[0:9]):
plt.subplot(3,3,i+1)
plt.imshow(X_test[incorrect].reshape(28,28), cmap='gray', interpolation='none')
plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], y_true[incorrect]))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Visualizing What My Model LearnsIt’s often said that deep-learning models are “black boxes”: learning representations that are difficult to extract and present in a human-readable form. Although this is partially true for certain types of deep-learning models, it’s definitely not true for convnets. The representations learned by convnets are highly amenable to visualization, in large part because they’re representations of visual concepts.Here I attempt to visualize the intermediate CNN outputs (intermediate activations). Visualizing intermediate activations consists of displaying the feature maps that are output by various convolution and pooling layers in a network, given a certain input (the output of a layer is often called its *activation*, the output of the activation function). This gives a view into how an input is decomposed into the different filters learned by the network. I want to visualize feature maps with three dimensions: width, height, and depth (channels). Each channel encodes relatively independent features, so the proper way to visualize these feature maps is by independently plotting the contents of every channel as a 2D image.I first get an input test image (1994).
###Code
test_im = X_train[1994]
plt.imshow(test_im.reshape(28,28), cmap='viridis', interpolation='none')
plt.show()
###Output
_____no_output_____
###Markdown
In order to extract the feature maps I want to look at, I create a Keras model that takes batches of images as input, and outputs the activations of all convolution and pooling layers. To do this, I use the Keras class Model. A model is instantiated using two arguments: an input tensor (or list of input tensors) and an output tensor (or list of output tensors). The resulting class is a Keras model, mapping the specified inputs to the specified outputs. When fed an image input, this model returns the values of the layer activations in the original model.
###Code
from keras import models
# extracts the outputs of the top 8 layers
layer_outputs = [layer.output for layer in cnn3.layers[:8]]
# creates a model that will return these outputs, given the model input
activation_model = models.Model(input=cnn3.input, output=layer_outputs)
# returns a list of Numpy arrays: one array per layer activation
activations = activation_model.predict(test_im.reshape(1,28,28,1))
# activation of the 1st convolution layer
first_layer_activation = activations[0]
# display the 3rd channel of the activation of the 1st layer of the original model
plt.matshow(first_layer_activation[0, :, :, 3], cmap='viridis')
# display the 6th channel of the activation of the 1st layer of the original model
plt.matshow(first_layer_activation[0, :, :, 6], cmap='viridis')
###Output
_____no_output_____
###Markdown
Let's plot a complete visualization of all the activations in the network. I extract and plot every channel in each of the eight activation maps, and then stack the results in one big image tensor, with channels stacked side by side.
###Code
layer_names = []
for layer in cnn3.layers[:-1]:
layer_names.append(layer.name)
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
if layer_name.startswith('conv'):
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,:, :, col * images_per_row + row]
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
###Output
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ipykernel_launcher.py:15: RuntimeWarning: invalid value encountered in true_divide
from ipykernel import kernelapp as app
|
tutorials/tts/FastPitch_MixerTTS_Training.ipynb | ###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'main'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode pynini==2.1.4 scipy==1.7.3
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch or Mixer-TTS model. This will not result in a trained model and is just used as an example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'main'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch and Mixer-TTS. This will not result in a trained model and is used to just as example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# BRANCH = 'main'
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). MixerTTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch and Mixer-TTS. This will not result in a trained model and is used to just as example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. MixerTTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'main'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch and Mixer-TTS. This will not result in a trained model and is used to just as example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'main'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode pynini==2.1.4 scipy==1.7.3
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch or Mixer-TTS model. This will not result in a trained model and is just used as an example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'r1.7.0'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch and Mixer-TTS. This will not result in a trained model and is used to just as example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/whitelist_lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/whitelist_lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____
###Markdown
FastPitch and Mixer-TTS TrainingThis notebook is designed to provide a guide on how to train FastPitch and Mixer-TTS as part of the TTS pipeline. It contains the following sections: 1. **Introduction**: FastPitch and Mixer-TTS in NeMo 2. **Preprocessing**: how to prepare data for FastPitch and Mixer-TTS 3. **Training**: example of FastPitch training and Mixer-TTS training License> Copyright 2022 NVIDIA. All Rights Reserved.> > Licensed under the Apache License, Version 2.0 (the "License");> you may not use this file except in compliance with the License.> You may obtain a copy of the License at> > http://www.apache.org/licenses/LICENSE-2.0> > Unless required by applicable law or agreed to in writing, software> distributed under the License is distributed on an "AS IS" BASIS,> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.> See the License for the specific language governing permissions and> limitations under the License.
###Code
"""
You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies# .
"""
BRANCH = 'main'
# # If you're using Colab and not running locally, uncomment and run this cell.
# !apt-get install sox libsndfile1 ffmpeg
# !pip install wget unidecode pynini==2.1.4 scipy==1.7.3
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]
import json
import nemo
import torch
import librosa
import numpy as np
from pathlib import Path
from tqdm.notebook import tqdm
###Output
_____no_output_____
###Markdown
Introduction FastPitchFastPitch is non-autoregressive model for mel-spectrogram generation based on FastSpeech, conditioned on fundamental frequency contours. For more details about model, please refer to the original [paper](https://arxiv.org/abs/2006.06873). NeMo re-implementation of FastPitch additionally uses unsupervised speech-text [aligner](https://arxiv.org/abs/2108.10447) which was originally implemented in [FastPitch 1.1](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch). Mixer-TTSMixer-TTS is another non-autoregressive model for mel-spectrogram generation. It is structurally similar to FastPitch: duration prediction, pitch prediction, unsupervised TTS alignment framework, but the main difference is that Mixer-TTS is based on the [MLP-Mixer](https://arxiv.org/abs/2105.01601) architecture adapted for speech synthesis.FastPitch and Mixer-TTS like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.Let's take a look using NeMo's pretrained models and how to use it to generate spectrograms.
###Code
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.models import FastPitchModel, MixerTTSModel
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
%matplotlib inline
# Let's see what pretrained models are available for FastPitch and Mixer-TTS
print("FastPitch pretrained models:")
print(FastPitchModel.list_available_models())
print("=====================================")
print("Mixer-TTS pretrained models:")
print(MixerTTSModel.list_available_models())
# We can load the pre-trained FastModel as follows
pretrained_model = "tts_en_fastpitch"
spec_gen = FastPitchModel.from_pretrained(pretrained_model)
spec_gen.eval();
# In the same way, we can load the pre-trained Mixer-TTS model as follows
pretrained_model = "tts_en_lj_mixertts"
spec_gen = MixerTTSModel.from_pretrained(pretrained_model)
spec_gen.eval();
assert isinstance(spec_gen, SpectrogramGenerator)
if isinstance(spec_gen, FastPitchModel):
tokens = spec_gen.parse(str_input="Hey, this produces speech!")
else:
tokens = spec_gen.parse(text="Hey, this produces speech!")
spectrogram = spec_gen.generate_spectrogram(tokens=tokens)
# Now we can visualize the generated spectrogram
# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.
# Refer to the Inference_ModelSelect notebook on how to convert spectrograms to speech.
imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing
###Code
from nemo.collections.tts.torch.g2ps import EnglishG2p
from nemo.collections.tts.torch.data import TTSDataset
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo.collections.tts.torch.tts_tokenizers import EnglishPhonemesTokenizer, EnglishCharsTokenizer
###Output
_____no_output_____
###Markdown
We will show example of preprocessing and training using small part of AN4 dataset. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. Let's download data, prepared manifests and supplementary files.*NOTE: The sample data is not enough data to properly train a FastPitch or Mixer-TTS model. This will not result in a trained model and is just used as an example.*Let's download everything that we need for this dataset.
###Code
# download data and manifests
!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data
# additional files
!mkdir -p tts_dataset_files && cd tts_dataset_files \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/cmudict-0.7b_nv22.01 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tts_dataset_files/heteronyms-030921 \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv \
&& cd ..
###Output
_____no_output_____
###Markdown
FastPitchNow that we looked at the FastPitch model, let's see how to prepare all data for training it. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/fastpitch.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/fastpitch_align_v1.05.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
TTS text preprocessing pipeline consists of two stages: text normalization and text tokenization. Both of them can be handled by `nemo.collections.tts.torch.data.TTSDataset` for training. Our current example dataset is in English, so let's use `nemo_text_processing.text_normalization.normalize.Normalizer` for normalization which supports English (and many other languages!) and `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`. So, our model will receive grapheme representation of text (graphemes) as input.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Text tokenizer
text_tokenizer = EnglishCharsTokenizer()
###Output
_____no_output_____
###Markdown
To accelerate and stabilize our training, we also need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework. To do this, all we need to do is iterate over our data one time.In the below method the arguments are as follows:- `sup_data_path` — path to the folder which contains supplementary data. If the supplementary data or the folder does not already exists then it will be created.- `sup_data_types` — types of supplementary data to be provided to the model.- `text_tokenizer` — text tokenizer object that we already created.- `text_normalizer` — text normalizer object that we already created.- `text_normalizer_call_kwargs` — dictionary of arguments to be used in calling the text normalizer that we already created.
###Code
def pre_calculate_supplementary_data(sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs):
# init train and val dataloaders
stages = ["train", "val"]
stage2dl = {}
for stage in stages:
ds = TTSDataset(
manifest_filepath=f"tests/data/asr/an4_{stage}.json",
sample_rate=16000,
sup_data_path=sup_data_path,
sup_data_types=sup_data_types,
n_fft=1024,
win_length=1024,
hop_length=256,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=8000,
text_tokenizer=text_tokenizer,
text_normalizer=text_normalizer,
text_normalizer_call_kwargs=text_normalizer_call_kwargs
)
stage2dl[stage] = torch.utils.data.DataLoader(ds, batch_size=1, collate_fn=ds._collate_fn, num_workers=1)
# iteration over dataloaders
pitch_mean, pitch_std, pitch_min, pitch_max = None, None, None, None
for stage, dl in stage2dl.items():
pitch_list = []
for batch in tqdm(dl, total=len(dl)):
tokens, tokens_lengths, audios, audio_lengths, attn_prior, pitches, pitches_lengths = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
if stage == "train":
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
return pitch_mean, pitch_std, pitch_min, pitch_max
fastpitch_sup_data_path = "fastpitch_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
fastpitch_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Mixer-TTSNow, let's see how to prepare data for training Mixer-TTS. Firstly, let's download all necessary training scripts and configs.
###Code
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/mixer_tts.py
!mkdir -p conf && cd conf \
&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/mixer-tts.yaml \
&& cd ..
###Output
_____no_output_____
###Markdown
In the FastPitch pipeline we used a char-based tokenizer, but in the Mixer-TTS training pipeline we would like to demonstrate a phoneme-based tokenizer `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`. Unlike char-based tokenizer, `EnglishPhonemesTokenizer` needs a phoneme dictionary and a heteronym dictionary. We will be using the same `nemo_text_processing.text_normalization.normalize.Normalizer` for normalizing the text as used in the FastPitch example.
###Code
# Text normalizer
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist="tts_dataset_files/lj_speech.tsv"
)
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True
}
# Grapheme-to-phoneme module
g2p = EnglishG2p(
phoneme_dict="tts_dataset_files/cmudict-0.7b_nv22.01",
heteronyms="tts_dataset_files/heteronyms-030921"
)
# Text tokenizer
text_tokenizer = EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
apostrophe=True,
pad_with_space=True,
g2p=g2p,
)
###Output
_____no_output_____
###Markdown
Just like in FastPitch we will need to extract pitch for every audio, estimate pitch statistics (mean and std) and pre-calculate alignment prior matrices for alignment framework.
###Code
mixer_tts_sup_data_path = "mixer_tts_sup_data_folder"
sup_data_types = ["align_prior_matrix", "pitch"]
pitch_mean, pitch_std, pitch_min, pitch_max = pre_calculate_supplementary_data(
mixer_tts_sup_data_path, sup_data_types, text_tokenizer, text_normalizer, text_normalizer_call_kwargs
)
###Output
_____no_output_____
###Markdown
Training FastPitchNow we are ready for training our model! Let's try to train FastPitch.*NOTE: The sample data is not enough data to properly train a FastPitch. This will not result in a trained FastPitch and is used to just as example.*
###Code
!(python fastpitch.py --config-name=fastpitch_align_v1.05.yaml \
sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={fastpitch_sup_data_path} \
whitelist_path=tts_dataset_files/lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
pitch_fmin={pitch_min} \
pitch_fmax={pitch_max} \
~model.text_tokenizer \
+model.text_tokenizer._target_=nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer \
+trainer.max_steps=100 ~trainer.max_epochs \
trainer.check_val_every_n_epoch=25 \
+trainer.max_epochs=5 \
model.train_ds.dataloader_params.batch_size=24 \
model.validation_ds.dataloader_params.batch_size=24 \
exp_manager.exp_dir=./fastpitch_log_dir \
model.n_speakers=1 trainer.devices=1 trainer.strategy=null \
)
###Output
_____no_output_____
###Markdown
Let's look at some of the options in the training command:- *`~model.text_tokenizer`* — remove default text tokenizer. The default tokenizer in the `fastpitch_align_v1.05.yaml` is `nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer`, but we want to use `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer`.- *`+model.text_tokenizer._target_`* — add `nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer` as text tokenizer class. Mixer-TTSNow we are ready for training our model! Let's try to train Mixer-TTS.*NOTE: The sample data is not enough data to properly train a Mixer-TTS. This will not result in a trained Mixer-TTS and is used to just as example.*
###Code
!python mixer_tts.py sample_rate=16000 \
train_dataset=tests/data/asr/an4_train.json \
validation_datasets=tests/data/asr/an4_val.json \
sup_data_types="['align_prior_matrix', 'pitch']" \
sup_data_path={mixer_tts_sup_data_path} \
phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.01 \
heteronyms_path=tts_dataset_files/heteronyms-030921 \
whitelist_path=tts_dataset_files/lj_speech.tsv \
pitch_mean={pitch_mean} \
pitch_std={pitch_std} \
model.train_ds.dataloader_params.batch_size=6 \
model.train_ds.dataloader_params.num_workers=0 \
model.validation_ds.dataloader_params.num_workers=0 \
trainer.max_epochs=3 \
trainer.strategy=null \
trainer.check_val_every_n_epoch=1
###Output
_____no_output_____ |
Python_Function_Arguments.ipynb | ###Markdown
**Python Function Arguments**- In Python, you can define a function that takes variable number of arguments. - In this article, you will learn to define such functions using default, keyword and arbitrary arguments. **1. Arguments**- In the [user-defined function](https://www.programiz.com/python-programming/user-defined-function) topic, we learned about defining a function and calling it. Otherwise, the function call will result in an error. Here is an example.
###Code
def greet(name, msg):
"""This function greets to
the person with the provided message"""
print("Hello", name + ', ' + msg)
greet("Monica", "Good morning!")
###Output
Hello Monica, Good morning!
###Markdown
- Here, the function **greet()** has two parameters.- Since we have called this function with two arguments, it runs smoothly and we do not get any error.- If we call it with a different number of arguments, the interpreter will show an error message. Below is a call to this function with one and no arguments along with their respective error messages. >>> greet("Monica") only one argumentTypeError: greet() missing 1 required positional argument: 'msg' >>> greet() no argumentsTypeError: greet() missing 2 required positional arguments: 'name' and 'msg' **2. Variable Function Arguments**- Up until now, functions had a fixed number of arguments. - In Python, there are other ways to define a function that can take variable number of arguments.- Three different forms of this type are described below. **2.1 Python Default Arguments**- Function arguments can have default values in Python.- We can provide a default value to an argument by using the assignment operator (=). Here is an example.
###Code
def greet(name, msg="Good morning!"):
"""
This function greets to
the person with the
provided message.
If the message is not provided,
it defaults to "Good
morning!"
"""
print("Hello", name + ', ' + msg)
greet("Kate")
greet("Bruce", "How do you do?")
###Output
Hello Kate, Good morning!
Hello Bruce, How do you do?
###Markdown
- In this function, the parameter name does not have a default value and is required (mandatory) during a call.- On the other hand, the parameter msg has a default value of "Good morning!". So, it is optional during a call. If a value is provided, it will overwrite the default value.- Any number of arguments in a function can have a default value. But once we have a default argument, all the arguments to its right must also have default values.- This means to say, non-default arguments cannot follow default arguments. For example, if we had defined the function header above as: def greet(msg = "Good morning!", name): - We would get an error as: SyntaxError: non-default argument follows default argument **2.2 Python Keyword Arguments**- When we call a function with some values, these values get assigned to the arguments according to their position. - For example, in the above function greet(), when we called it as - `greet("Bruce", "How do you do?")`, the value "Bruce" gets assigned to the argument `name` and similarly `"How do you do?"` to `msg`.- Python allows functions to be called using keyword arguments. When we call functions in this way, the order (position) of the arguments can be changed. Following calls to the above function are all valid and produce the same result.
###Code
# 2 keyword arguments
greet(name = "Bruce",msg = "How do you do?")
# 2 keyword arguments (out of order)
greet(msg = "How do you do?",name = "Bruce")
# 1 positional, 1 keyword argument
greet("Bruce", msg = "How do you do?")
###Output
Hello Bruce, How do you do?
###Markdown
- As we can see, we can mix positional arguments with keyword arguments during a function call. But we must keep in mind that keyword arguments must follow positional arguments. - Having a positional argument after keyword arguments will result in errors. For example, the function call as follows: `greet(name="Bruce","How do you do?")` - Will result in an error: `SyntaxError: non-keyword arg after keyword arg` **2.3 Python Arbitrary Arguments**- Sometimes, we do not know in advance the number of arguments that will be passed into a function. Python allows us to handle this kind of situation through function calls with an arbitrary number of arguments.- In the function definition, we use an asterisk (*) before the parameter name to denote this kind of argument. Here is an example.
###Code
def greet(*names):
"""This function greets all
the person in the names tuple."""
# names is a tuple with arguments
for name in names:
print("Hello", name)
greet("Monica", "Luke", "Steve", "John")
###Output
Hello Monica
Hello Luke
Hello Steve
Hello John
|
Week 07/timeseries.ipynb | ###Markdown
Time SeriesJust a notebook to replicate what was covered in the video lectures this week. We are looking at time series data in `pandas`. The first step will be to import the libraries that we will use.
###Code
import pandas as pd
import seaborn as sns
###Output
_____no_output_____
###Markdown
In the lectures, Ian imported a [dataset](http://cli.met.ie/cli/climate_data/webdata/hly4935.csv) from [Met Eireann](www.met.ie). I will do this too. Note the following:* in the data set the first 23 rows contain meta data - we won't import these (`skiprows=23`)* we will set `LowMemory=False` to deal with some different data types in some of the columns* we will `nrows=1000` as it is a huge dataset and we just want a flavour for the start of it
###Code
#import dataset
df = pd.read_csv("http://cli.met.ie/cli/climate_data/webdata/hly4935.csv", skiprows=23, low_memory=False, nrows=1000)
df
###Output
_____no_output_____
###Markdown
The next step check the `dtype` of the data column - if it is not a `datetime` we need to add column that is
###Code
# have a look at the date column
df['date']
###Output
_____no_output_____
###Markdown
The `dtype` is `object`. We need to add a `datetime` column
###Code
# add a datetime column by converting the date column
df['datetime'] = pd.to_datetime(df['date'])
###Output
_____no_output_____
###Markdown
Plot the `temp` data using `seaborn`
###Code
sns.lineplot(x="datetime", y="temp", data=df)
###Output
_____no_output_____
###Markdown
There seems to be an issue with the plot - the first point is earlier (probably from commissioning). Lets only plot from point 2 (indexed by 1!!) on.
###Code
sns.lineplot(x="datetime", y="temp", data=df[1:])
###Output
_____no_output_____
###Markdown
Creating a time series data frame using simulated dataUsing some of the pandas functions we will create a data frame which is indexed bt time
###Code
# Adapted from https://pandas.pydata.org/pandas-docs/stable/timeseries.html
# 72 hours starting with midnight Jan 1st, 2011
rng = pd.date_range('1/1/2011', periods=72, freq='H')
# import numpy
import numpy as np
# Create a times series, ts
ts = pd.DataFrame(np.random.poisson(10,len(rng)), index=rng, columns=["admissions"])
ts
###Output
_____no_output_____
###Markdown
Ian challenged us to add a column for alcohol related injuries that must be random and contain an integer value no bigger than the admissions value for each point. This is my attempt:
###Code
# Convert the values in 'admissions' to a list
adm = ts['admissions'].values.T.tolist()
# Create a blank dri list
dri = []
# Create a list of random ints with values from 0 to the value in admissions
for i in adm:
dri.append(np.random.randint(i+1))
# add dri column to the time series
ts['dri'] = dri
ts
## Aggregate to Days
ts.resample('D').mean()
# Pull a range
ts['2011-01-01 21:00:00':'2011-02-01 21:00:00']
###Output
_____no_output_____ |
taxi_availability/Taxi Availability Project part 4.ipynb | ###Markdown
IntroductionWith the cleaned data, we explore application of machine learning in prediction taxi counts in the various sectors.
###Code
# import the needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', None)
plt.style.use('seaborn-whitegrid')
plt.rc('figure', figsize=(10,4), titlesize=16, titleweight='bold')
plt.rc('axes', titlesize=16, titleweight='bold', titlepad=10, labelsize=14, labelweight='bold')
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
# load data
df = pd.read_csv('use_cleaned.csv')
df.head(1)
# get the independent and dependent variables.
X = df.iloc[:, [2,3,4]]
print(X.shape)
y1, y2, y4, y5, y8 = df['sector_1'], df['sector_2'], df['sector_4'], df['sector_5'], df['sector_8']
print(y1.shape)
print(y8.shape)
# import the libraries (try these for now)
from sklearn.model_selection import train_test_split
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
###Output
_____no_output_____
###Markdown
Approach* We go for an arbitary 80-20 split;* for each dependent variable, we'll train and do predict using each model; followed by* review of the RMSE and test linearity with a scatter plot to see predicted values versus the actual values
###Code
# function for train test splits
def traintestset(X, yN):
Xtrain, Xtest, ytrain, ytest = train_test_split(X, yN, test_size=0.2, random_state=0)
return Xtrain, Xtest, ytrain, ytest
# train test set for sector 1
X1_train, X1_test, y1_train, y1_test = traintestset(X, y1)
# instantiate the list of models to use
dummy = DummyRegressor()
lr = LinearRegression()
dt = DecisionTreeRegressor(random_state=0)
rf = RandomForestRegressor(random_state=0)
# consolidate as a list to iterate over
modelList = [dummy, lr, dt, rf]
# function for model fit, predictions
def modeltrainer(Xtrain, Xtest, ytrain, ytest):
predList = []
for model in modelList:
model.fit(Xtrain, ytrain)
pred = model.predict(Xtest)
predList.append(pred)
return predList
# iterate over the models, train and predict using sector 1 train test set
pred_sector1 = modeltrainer(X1_train, X1_test, y1_train, y1_test)
modelNameList = ['Dummy', 'LinearReg', 'DecisionTree', 'RandForest']
# function for RMSE of the model predictions
def RMSEreport(modelNameList, ytest, predList):
for i in range(0, len(modelNameList)):
print(f'{modelNameList[i]} RMSE: {mean_squared_error(ytest, predList[i], squared=False):3g}')
# function to get sector y_true and pred as dataframe
def ResDf(ytrue, predList, modelNameList):
df_res = ytrue.to_frame()
for i in range(0, len(modelNameList)):
df_res[f'{modelNameList[i]}'] = predList[i]
return df_res
# function to test linearity with a scatter plot for each model prediction
def scatterPlots(ytrueName, df_res):
for i in range(0, len(modelNameList)):
sns.lmplot(x=ytrueName, y=modelNameList[i], data=df_res, fit_reg=False)
d_line= np.arange(df_res.min().min(), df_res.max().max())
plt.plot(d_line, d_line, color='purple', linestyle='--', linewidth=2)
plt.show()
# sector 1 results as dataframe
df_res1 = ResDf(y1_test, pred_sector1, modelNameList)
df_res1
# Sector 1 RMSE
RMSEreport(modelNameList, y1_test, pred_sector1)
# Sector 1 scatter plots
scatterPlots('sector_1', df_res1)
# train test set for sectors2,4,5,8
X2_train, X2_test, y2_train, y2_test = traintestset(X, y2)
X4_train, X4_test, y4_train, y4_test = traintestset(X, y4)
X5_train, X5_test, y5_train, y5_test = traintestset(X, y5)
X8_train, X8_test, y8_train, y8_test = traintestset(X, y8)
# predictions
pred_sector2 = modeltrainer(X2_train, X2_test, y2_train, y2_test)
pred_sector4 = modeltrainer(X4_train, X4_test, y4_train, y4_test)
pred_sector5 = modeltrainer(X5_train, X5_test, y5_train, y5_test)
pred_sector8 = modeltrainer(X8_train, X8_test, y8_train, y8_test)
# results as dataframes
df_res2 = ResDf(y2_test, pred_sector2, modelNameList)
df_res4 = ResDf(y4_test, pred_sector4, modelNameList)
df_res5 = ResDf(y5_test, pred_sector5, modelNameList)
df_res8 = ResDf(y8_test, pred_sector8, modelNameList)
# Sector 2 RMSE
RMSEreport(modelNameList, y2_test, pred_sector2)
# Sector 2 scatter plots
scatterPlots('sector_2', df_res2)
# Sector 4 RMSE
RMSEreport(modelNameList, y4_test, pred_sector4)
# Sector 4 scatter plots
scatterPlots('sector_4', df_res4)
# Sector 5 RMSE
RMSEreport(modelNameList, y5_test, pred_sector5)
# Sector 5 scatter plots
scatterPlots('sector_5', df_res5)
# Sector 8 RMSE
RMSEreport(modelNameList, y8_test, pred_sector8)
# Sector 8 scatter plots
scatterPlots('sector_8', df_res8)
###Output
Dummy RMSE: 4.39636
LinearReg RMSE: 4.34061
DecisionTree RMSE: 4.83522
RandForest RMSE: 4.6306
|
Titanic_classification.ipynb | ###Markdown
Outliers DetectionOutliers detection is very important part of feature engineering because ouliers cause our models to not perform much effective as it can be. By removing outlier we can decrease skewness of our data set
###Code
def outlier_detection(df , n , features):
outlier_index = []
for column in features:
# 1st quartile(lower) range
Q1=np.percentile(df[column],25)
#2nd quartile(Upper) range
Q3=np.percentile(df[column],75)
#Interquartile range
IQR = Q3 - Q1
#Outlier Step
outlier_step = 1.5 * IQR
no_of_outliers_each_column = df[(df[column] < Q1 - outlier_step) | (df[column] > Q3 + outlier_step)].index
outlier_index.extend(no_of_outliers_each_column)
outlier_index = Counter(outlier_index)
multiple_outliers = list(k for k, v in outlier_index.items() if v > 2)
return multiple_outliers
Outliers_to_drop=outlier_detection(train,2,["Age","SibSp","Parch","Fare"])
train.loc[Outliers_to_drop]
#Drop Outliers
train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
Outliers_to_drop_test=outlier_detection(test,2,["Age","SibSp","Parch","Fare"])
test.loc[Outliers_to_drop_test]
###Output
_____no_output_____
###Markdown
Featur Engineering Feature Engineering is the most important piller to make good prediction. We will try to fetch out information from our categorical features
###Code
full_data=[train , test]
# We will make new column called name_length from name column.
train['name_length'] = train['Name'].apply(len)
test['name_length'] = test['Name'].apply(len)
# we will create new column 'has_cabin' and we code people with cabin to 1 otherwise 0.
train['has_cabin'] = train['Cabin'].apply(lambda x : 0 if type(x) == float else 1)
test['has_cabin'] = test['Cabin'].apply(lambda x : 0 if type(x) == float else 1)
# Create new feature FamilySize as a combination of SibSp and Parch
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Create new feature 'isAlone' from newly created feature familySize
for dataset in full_data:
dataset['isAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1 , 'isAlone'] = 1
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
#type(train['Cabin'].loc[1])
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
###Output
_____no_output_____
###Markdown
Still we have to deal with two null containing columns Age we will try to fill null values in Age features by taking random int values between (Avg. Age - Std. Age) to (Avg. Age + Std. Age)
###Code
for dataset in full_data:
avg_age = dataset['Age'].mean()
std_age = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(avg_age-std_age , avg_age+std_age , size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
dataset.loc[ dataset['Age'] <= 14, 'Age'] = 0
dataset.loc[(dataset['Age'] > 14) & (dataset['Age'] <= 30), 'Age'] = 1
dataset.loc[(dataset['Age'] > 30) & (dataset['Age'] <= 40), 'Age'] = 2
dataset.loc[(dataset['Age'] > 40) & (dataset['Age'] <= 50), 'Age'] = 3
dataset.loc[(dataset['Age'] > 50) & (dataset['Age'] <= 60), 'Age'] = 4
dataset.loc[ dataset['Age'] > 60, 'Age'] = 5
train['Age'].value_counts()
# Extract title from the name by defining pattern function
def get_title(name):
title_search = re.search(' ([A-Z,a-z]+)\.' , name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
plt.figure(figsize=(10,6))
sns.barplot(x='Title' , y='Survived' , data=train)
###Output
_____no_output_____
###Markdown
Now we can see that there are similer kind of titles so, better we can make them as one group.
###Code
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Mrs','Miss'] , 'MM')
dataset['Title'] = dataset['Title'].replace(['Dr', 'Major', 'Col'] , 'DMC')
dataset['Title'] = dataset['Title'].replace(['Don', 'Rev', 'Capt', 'Jonkheer'] , 'DRCJ')
dataset['Title'] = dataset['Title'].replace(['Mme', 'Ms', 'Lady', 'Sir', 'Mlle', 'Countess'] , 'MMLSMC')
#mapping titles with labls
title_mapping= {"MM": 1, "Master":2, "Mr": 5, "DMC": 4, "DRCJ": 3, "MMLSMC": 0}
dataset['Title']=dataset['Title'].map(title_mapping)
dataset['Title']=dataset['Title'].fillna(3)
dataset['Title']=dataset['Title'].astype(int)
plt.figure(figsize=(10,6))
sns.barplot(x='Title' , y='Survived' , data=train)
#Map Female to 0 and Male to 1
for dataset in full_data:
dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)
for dataset in full_data:
# Remove all NULLS in the Embarked column
dataset['Embarked'] = dataset['Embarked'].fillna('S')
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
#Mapping Fare
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train['Fare'].value_counts()
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)
test = test.drop(drop_elements, axis = 1)
###Output
_____no_output_____
###Markdown
All right so now we have cleaned the features and extracted relevant information from categorical features and dropped the categorical columns. Visualisations
###Code
train.head()
test.head()
###Output
_____no_output_____
###Markdown
Let's plot Pearson correlation heatmapLet's check relation of each feature to the next feature
###Code
plt.figure(figsize=(10,10))
plt.title('Pearson correlation of features')
sns.heatmap(train.astype(float).corr(),linewidths=0.1,vmax=1.0,
square=True, cmap='PuBuGn', linecolor='white', annot=True)
###Output
_____no_output_____
###Markdown
Conclusion of plotOne thing that that the Pearson Correlation plot can tell us is that there are not too many features strongly correlated with one another. This is good from a point of view of feeding these features into your learning model because this means that there isn't much redundant or superfluous data in our training set and we are happy that each feature carries with it some unique information
###Code
g = sns.pairplot(train[[u'Survived', u'Pclass', u'Sex', u'Age', u'Parch', u'Fare', u'Embarked',
u'FamilySize', u'Title']], hue='Survived', palette = 'viridis',size=1.2,diag_kind = 'kde',diag_kws=dict(shade=True),plot_kws=dict(s=10) )
g.set(xticklabels=[])
###Output
_____no_output_____
###Markdown
Now data cleaning and feature engineering have done, Let's move to apply Machine learning models to the dataset and see how well it is performing.
###Code
# define training and testing sets
X=train.drop('Survived',axis=1)
y=train['Survived']
## Logistic Regression
logmodel=LogisticRegression()
logmodel.fit(X,y)
prediction=logmodel.predict(test)
y_test=pd.read_csv('submission.csv')
y_test=y_test.drop('PassengerId',axis=1)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,prediction))
from sklearn.svm import SVC
## Support Vector Machines
svm_model=SVC()
svm_model.fit(X,y)
pred_svc=svm_model.predict(test)
print(accuracy_score(y_test,pred_svc))
###Output
0.9066985645933014
|
fmri-03/fmri-03-solutions.ipynb | ###Markdown
fMRI-03 Solutions
###Code
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
sns.set_context('notebook', font_scale=1.5)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Today's demonstration will be in two parts. In the first section, we will show you how to generate the predicted BOLD signal for analysis of a rapid event related design experiment. We will also show the sensitivity of these experiments to assumptions in the HRF. In the second section, we will discuss estimation efficiency and finite impulse response (FIR) designs. Part 1: Generating the predicted BOLD signalIn this first section, we will generate the predicted BOLD signal for a simple rapid event related (RER) experiment. In RER experiments, we have to consider two important design details: 1. The order of trials2. The order of null eventsIn this first example, we will provide for you a pre-optimized RER experiment design. In this experiment, we have two conditions. Each condition has 60 trials, and each trial lasts 1 second in duration. The total experiment length is 240 seconds. Let's make the design matrix for this experiment. [1] Define (super-sampled) timesHere we define the timing of the experiment. Importantly we first define the experiment in a "super-sampled" space, or we act like we have acquired far more data points than we actually have. We do this for several reasons. First, it functions to reduce the noisiness of our convolved regressors (more on that in a minute). Second, it allows us to model events that occur between TRs.
###Code
## Define experiment metadata.
n_times = 240
sfreq = 0.1
## Define (super-sampled) times.
sst = np.arange(0, n_times, sfreq)
###Output
_____no_output_____
###Markdown
[2] Generate boxcarsHere we define a "boxcar" timeseries. In this step we make a binary timeseries (comprised of 0s and 1s), where 0s represent neuronal silence and 1s represent neuronal activity. It is difficult to optimize the event timings *a priori*, so we have generated an optimized rapid event design for you. We can load in the event timings and generate the neural boxcars.
###Code
## Load experimental events.
npz = np.load('fmri-03-rer.npz')
events = npz['events']
events[:, -1] -= 1
## Generate boxcars.
boxcars = np.zeros((sst.size, 2))
for onset, offset, cond in events:
boxcars[np.logical_and(sst >= onset, sst < offset), int(cond)] = 1
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(sst, boxcars);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
[3] Define the HRFIn this step, we define the expected shape of the HRF. Following convention, we will use the **SPM HRF**.
###Code
from fmritools.hrf import spm_hrf
## Define HRF.
hrf = spm_hrf(sfreq)
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
ax.plot(sst[:hrf.size], hrf);
ax.set(xlabel='Time (s)', ylabel='AU')
sns.despine()
###Output
_____no_output_____
###Markdown
[4] ConvolutionConvolution describes a particular mathematical operation where we use two functions to produce a third function that expresses how the shape of one is modified by the other. In this case, we convolve the boxcars with the HRF to model how we expect the BOLD signal to change in response to the neural activity
###Code
## Convolve boxcars + HRF.
bold = np.apply_along_axis(np.convolve, 0, boxcars, hrf)[:sst.size]
## Normalize regressor.
bold /= bold.max(axis=0)
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(sst, bold);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
[5] DownsamplingIn this fifth and final step, we reduce the convolved timeseries to only those observations that we actually acquired.
###Code
## Define observation times.
tr = 1
times = np.arange(n_times) * tr
## Define downsampling indices.
ix = np.in1d(sst, times)
## Downsampling.
boxcars = boxcars[ix]
bold = bold[ix]
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, bold);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
Part 1.5: Simple RegressionNext, let's use the predicted BOLD timeseries we just made and use it perform simple linear regression. [1] Load and visualize data
###Code
## Load and extract data.
y1 = npz['y1']
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y1);
ax.set(xlim=(0,192), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
_____no_output_____
###Markdown
[2] Construct design matrixThe design matrix is collection of timeseries we will predicted the observed data here. Here we use the timeseries we made and an intercept, i.e. a column of 1s.
###Code
X = np.column_stack([np.ones(times.size), bold])
###Output
_____no_output_____
###Markdown
[3] Regression
###Code
## Perform regression.
b, _, _, _ = np.linalg.lstsq(X, y1, rcond=-1)
print('b1 = %0.3f, b2 = %0.3f' %(b[1], b[2]))
## Posterior predictive check.
yhat = X @ b
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y1);
ax.plot(times, yhat);
ax.set(xlim=(0,y1.size), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
b1 = 1.522, b2 = 2.452
###Markdown
[4] Sensitivity to HRFOne important difference between between block design experiments and rapid event designs is their relatively sensitivity to mis-modeling the HRF. For a block design, the repeated presentation of a stimulus means that minor inaccuracies in modeling the HRF do not matter much insofar that the summation of different HRF shapes will all yield the same asymptotic signal. The same is not true for rapid event designs.Next we we will load in a second fMRI timeseries that has been generated from an fMRI model with identical amplitude but different HRF shape.
###Code
## Load and extract data.
y2 = npz['y2']
## Perform regression.
b, _, _, _ = np.linalg.lstsq(X, y2, rcond=-1)
print('b1 = %0.3f, b2 = %0.3f' %(b[1], b[2]))
## Posterior predictive check.
yhat = X @ b
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y2);
ax.plot(times, yhat);
ax.set(xlim=(0,y2.size), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
b1 = 1.247, b2 = 2.153
###Markdown
Part 2: Estimating the HRFIn experiments, we might want to actually estimate the shape of the HRF. Why?1. As shown above, making assumptions might yield biased estimates of effects.2. The HRF has actual meaning! Being able to estimate it actually tells us something about neural processing.How do we do this? We use a finite impulse response (FIR) model. The logic is pretty straightforward. If we wanted to estimate the average HRF, we might cut out and align the HRF response starting from the onset of some event. An FIR design does the same thing, just through a design matrix. We create a binary matrix, where each column represents some window of HRF response for some condition. [1] Define FIR design matrixThe FIR design matrix is different than the design matrices we have discussed so far. Here we will create a binary matrix (containing only 0s and 1s). The matrix will have as many rows as observations, but each column will represent a different window of the HRF response. For example, if we have data at TR=1 and want to model 16s of the HRF response, we will have 16 columns per condition.
###Code
## Define metadata.
window = 16 # Number of time points to measure
k = 2 # Number of conditions
tr = 1 # Repetition time
## Preallocate space.
FIR = np.zeros((n_times, k, window))
## Iteratively construct design matrix.
for onset, _, cond in events:
## Find end of event. Events are `window` time units after `onset`,
## but can't last longer than the length of our data, `X.shape[0]`
event_end = min(onset+window, X.shape[0])
## Define row indices.
row_ix = np.arange(onset, event_end, dtype=int)
## Define col indices.
col_ix = np.arange(0, row_ix.size, dtype=int)
## Update.
FIR[row_ix, int(cond), col_ix] = 1
## Reshape to make this a design matrix, collapsing the non-time dimensions.
FIR = FIR.reshape((n_times, -1))
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,6))
sns.heatmap(FIR, vmin=0, vmax=1, cmap='binary_r', cbar=False, ax=ax)
ax.set(xticks=[window/2., window*(1+1/2)], xticklabels=['Condition 1', 'Condition 2'], yticks=[], ylabel='Time (s)');
###Output
_____no_output_____
###Markdown
[2] Estimate HRF Shape
###Code
## Append intercept.
X = np.column_stack([np.ones(FIR.shape[0]), FIR])
## Regression.
b, _, _, _ = np.linalg.lstsq(X, y1, rcond=-1)
b = b[1:] # remove intercept
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
ax.plot(b[:window], label='Condition 1')
ax.plot(b[window:], label='Condition 2')
ax.set(xlabel='Time (s)', ylabel='AU')
sns.despine()
plt.legend()
###Output
_____no_output_____
###Markdown
Part 3: Estimation EfficiencyIn this next section we will discuss estimation efficiency, defined as the ability to accurately model the shape of the HRF itself. We demonstrate why jittering events is essential to increasing estimation efficiency and being able to resolve the HRF. [1] Load DataTo make the point more clear, we will load in the FIR design matrices for three experiments. These three experiments all share the same qualities: (1) they all involve presenting stimuli from two conditions 60 times each; (2) each stimulus (120 total) is presented for 1s; and (3) each experiment is 240s in total length, such that each experiment is comprised of 120s of stimulus presentation time and 120s of null time. These three experiments differ in the order of stimuli presentation. Experiment 1 will present stimuli in a blocked design. Experiment 2 will present stimuli of alternating conditions every 2s (1s pause between each stimulus). experiment 3 will present stimuli in an (optimally) randomized design, with variable stimulus condition order and variable jitter. The experiments and their corresponding FIR design matrices are presented below. (We assume TR=1 and M=16.) As can easily be observed, the FIR design matrices of the first two experiments have strong, repeating structure whereas the third exhibits greater variability.
###Code
## Load experiment designs.
npz = np.load('fmri-03-efficiency.npz')
X1 = npz['X1']; X2 = npz['X2']; X3 = npz['X3']
times = npz['times']
## Plot designs.
fig, axes = plt.subplots(1,3,figsize=(12,4),sharex=True,sharey=True)
for ax, X in zip(axes, [X1, X2, X3]):
sns.heatmap(X, vmin=0, vmax=1, cmap='binary_r', cbar=False, ax=ax)
ax.set(xticks=[], yticks=[])
sns.despine()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Defining estimation efficiency[Liu & Frank (2004)](https://www.sciencedirect.com/science/article/pii/S1053811903005779) provided a formal definition of detection power:$$ C_{tot} = \frac{1}{ \frac{1}{N} \sum_{i \leq j} Tr[C_{ij}] } $$Put another way, the estimation efficiency of our experimental design, $C_{tot}$, is the inverse of the average trace of the contrasts of interest, $C_{ij}$.We define the estimation efficiency of a particular contrast as:$$ C_{ij} = L_{ij} \left( X^T X \right)^{-1} L_{ij} $$where $X^T X$ is the Fisher information matrix, and $L_{ij}$ is defined as:$$ L_{ij} = D_{ij} \otimes I_k $$where $D$ is a contrast vector as before and $\otimes$ is the Kronecker product.Ignoring the math for a second, we can observe that estimation efficiency has a formula similar to detection power, except now we are evaluating the orthogonality of a sets of columns. This means that the estimation efficiency of an experiment bears a relationship to its information matrix, $X^TX$, similar to that observed for detection power.Let's plot the the Fisher information matrix for each experiment is presented above.
###Code
fig, axes = plt.subplots(1,3,figsize=(12,4))
L = np.kron([1,-1], np.identity(16))
for i, (ax, X) in enumerate(zip(axes, [X1, X2, X3])):
xx = X.T @ X
xinv = np.linalg.pinv(xx)
ax = sns.heatmap(X.T @ X, square=True, cbar=False, ax=ax)
ax.set(xticks=[], yticks=[], title='Experiment %s' %(i+1))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
The diagonal elements reflect the number of observations for a given FIR window segment and condition (60 in all of the above). The off-diagonal elements reflect the covariance of two non-identical FIR window segments. As a result of the matrix inversion, experiments will be more efficient to the extent that their diagonals are large and their off-diagonals are small. Intuitively, this makes sense. In order to efficiently estimate a given FIR window, we would like it to be unconfounded by any other FIR window. If experiments have regular structure, then given FIR windows will always occur at the same point relative to one another, making it difficult to infer whether the BOLD signal at a given point reflects the contribution of the HRF response from the $i$th or $j$th window. Thus we can surmise that the block design and regular rapid event designs will have low estimation efficiency. Let's plot the design efficiency for each experiment.
###Code
from fmritools.design import design_efficiency
## Initialize plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
## Iteratively compute and plot.
for i, X in enumerate([X1,X2,X3]):
C = design_efficiency(X, k, window)
ax.bar(i,C,1,color='#1f77b4')
## Add details to plot.
ax.set(xticks=range(4), xticklabels=['X1','X2','X3'], ylim=(0,3),ylabel=r'$C$')
sns.despine()
###Output
_____no_output_____
###Markdown
fMRI-03 Solutions
###Code
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
sns.set_context('notebook', font_scale=1.5)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Today's demonstration will be in two parts. In the first section, we will show you how to generate the predicted BOLD signal for analysis of a rapid event related design experiment. We will also show the sensitivity of these experiments to assumptions in the HRF. In the second section, we will discuss estimation efficiency and finite impulse response (FIR) designs. Part 1: Generating the predicted BOLD signalIn this first section, we will generate the predicted BOLD signal for a simple rapid event related (RER) experiment. In RER experiments, we have to consider two important design details: 1. The order of trials2. The order of null eventsIn this first example, we will provide for you a pre-optimized RER experiment design. In this experiment, we have two conditions. Each condition has 60 trials, and each trial lasts 1 second in duration. The total experiment length is 240 seconds. Let's make the design matrix for this experiment. [1] Define (super-sampled) timesHere we define the timing of the experiment. Importantly we first define the experiment in a "super-sampled" space, or we act like we have acquired far more data points than we actually have. We do this for several reasons. First, it functions to reduce the noisiness of our convolved regressors (more on that in a minute). Second, it allows us to model events that occur between TRs.
###Code
## Define experiment metadata.
n_times = 240
sfreq = 0.1
## Define (super-sampled) times.
sst = np.arange(0, n_times, sfreq)
###Output
_____no_output_____
###Markdown
[2] Generate boxcarsHere we define a "boxcar" timeseries. In this step we make a binary timeseries (comprised of 0s and 1s), where 0s represent neuronal silence and 1s represent neuronal activity. It is difficult to optimize the event timings *a priori*, so we have generated an optimized rapid event design for you. We can load in the event timings and generate the neural boxcars.
###Code
## Load experimental events.
npz = np.load('fmri-03-rer.npz')
events = npz['events']
events[:, -1] -= 1
## Generate boxcars.
boxcars = np.zeros((sst.size, 2))
for onset, offset, cond in events:
boxcars[np.logical_and(sst >= onset, sst < offset), int(cond)] = 1
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(sst, boxcars);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
[3] Define the HRFIn this step, we define the expected shape of the HRF. Following convention, we will use the **SPM HRF**.
###Code
from fmritools.hrf import spm_hrf
## Define HRF.
hrf = spm_hrf(sfreq)
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
ax.plot(sst[:hrf.size], hrf);
ax.set(xlabel='Time (s)', ylabel='AU')
sns.despine()
###Output
_____no_output_____
###Markdown
[4] ConvolutionConvolution describes a particular mathematical operation where we use two functions to produce a third function that expresses how the shape of one is modified by the other. In this case, we convolve the boxcars with the HRF to model how we expect the BOLD signal to change in response to the neural activity
###Code
## Convolve boxcars + HRF.
bold = np.apply_along_axis(np.convolve, 0, boxcars, hrf)[:sst.size]
## Normalize regressor.
bold /= bold.max(axis=0)
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(sst, bold);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
[5] DownsamplingIn this fifth and final step, we reduce the convolved timeseries to only those observations that we actually acquired.
###Code
## Define observation times.
tr = 1
times = np.arange(n_times) * tr
## Define downsampling indices.
ix = np.in1d(sst, times)
## Downsampling.
boxcars = boxcars[ix]
bold = bold[ix]
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, bold);
ax.set(xlim=(0,192), xlabel='Time (s)', yticks=[], ylabel='Neural Activity')
sns.despine()
###Output
_____no_output_____
###Markdown
Part 1.5: Simple RegressionNext, let's use the predicted BOLD timeseries we just made and use it perform simple linear regression. [1] Load and visualize data
###Code
## Load and extract data.
y1 = npz['y1']
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y1);
ax.set(xlim=(0,192), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
_____no_output_____
###Markdown
[2] Construct design matrixThe design matrix is collection of timeseries we will predicted the observed data here. Here we use the timeseries we made and an intercept, i.e. a column of 1s.
###Code
X = np.column_stack([np.ones(times.size), bold])
###Output
_____no_output_____
###Markdown
[3] Regression
###Code
## Perform regression.
b, _, _, _ = np.linalg.lstsq(X, y1, rcond=-1)
print('b1 = %0.3f, b2 = %0.3f' %(b[1], b[2]))
## Posterior predictive check.
yhat = X @ b
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y1);
ax.plot(times, yhat);
ax.set(xlim=(0,y1.size), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
b1 = 1.522, b2 = 2.452
###Markdown
[4] Sensitivity to HRFOne important difference between between block design experiments and rapid event designs is their relatively sensitivity to mis-modeling the HRF. For a block design, the repeated presentation of a stimulus means that minor inaccuracies in modeling the HRF do not matter much insofar that the summation of different HRF shapes will all yield the same asymptotic signal. The same is not true for rapid event designs.Next we we will load in a second fMRI timeseries that has been generated from an fMRI model with identical amplitude but different HRF shape.
###Code
## Load and extract data.
y2 = npz['y2']
## Perform regression.
b, _, _, _ = np.linalg.lstsq(X, y2, rcond=-1)
print('b1 = %0.3f, b2 = %0.3f' %(b[1], b[2]))
## Posterior predictive check.
yhat = X @ b
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,3))
ax.plot(times, y2);
ax.plot(times, yhat);
ax.set(xlim=(0,y2.size), xlabel='Time (s)', ylabel='BOLD (au)')
sns.despine()
###Output
b1 = 1.247, b2 = 2.153
###Markdown
Part 2: Estimating the HRFIn experiments, we might want to actually estimate the shape of the HRF. Why?1. As shown above, making assumptions might yield biased estimates of effects.2. The HRF has actual meaning! Being able to estimate it actually tells us something about neural processing.How do we do this? We use a finite impulse response (FIR) model. The logic is pretty straightforward. If we wanted to estimate the average HRF, we might cut out and align the HRF response starting from the onset of some event. An FIR design does the same thing, just through a design matrix. We create a binary matrix, where each column represents some window of HRF response for some condition. [1] Define FIR design matrixThe FIR design matrix is different than the design matrices we have discussed so far. Here we will create a binary matrix (containing only 0s and 1s). The matrix will have as many rows as observations, but each column will represent a different window of the HRF response. For example, if we have data at TR=1 and want to model 16s of the HRF response, we will have 16 columns per condition.
###Code
## Define metadata.
window = 16 # Number of time points to measure
k = 2 # Number of conditions
tr = 1 # Repetition time
## Preallocate space.
FIR = np.zeros((n_times, k, window))
## Iteratively construct design matrix.
for onset, _, cond in events:
## Find end of event. Events are `window` time units after `onset`,
## but can't last longer than the length of our data, `X.shape[0]`
event_end = min(onset+window, X.shape[0])
## Define row indices.
row_ix = np.arange(onset, event_end, dtype=int)
## Define col indices.
col_ix = np.arange(0, row_ix.size, dtype=int)
## Update.
FIR[row_ix, int(cond), col_ix] = 1
## Reshape to make this a design matrix, collapsing the non-time dimensions.
FIR = FIR.reshape((n_times, -1))
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,6))
sns.heatmap(FIR, vmin=0, vmax=1, cmap='binary_r', cbar=False, ax=ax)
ax.set(xticks=[window/2., window*(1+1/2)], xticklabels=['Condition 1', 'Condition 2'], yticks=[], ylabel='Time (s)');
###Output
_____no_output_____
###Markdown
[2] Estimate HRF Shape
###Code
## Append intercept.
X = np.column_stack([np.ones(FIR.shape[0]), FIR])
## Regression.
b, _, _, _ = np.linalg.lstsq(X, y1, rcond=-1)
b = b[1:] # remove intercept
## Plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
ax.plot(b[:window], label='Condition 1')
ax.plot(b[window:], label='Condition 2')
ax.set(xlabel='Time (s)', ylabel='AU')
sns.despine()
plt.legend()
###Output
_____no_output_____
###Markdown
Part 3: Estimation EfficiencyIn this next section we will discuss estimation efficiency, defined as the ability to accurately model the shape of the HRF itself. We demonstrate why jittering events is essential to increasing estimation efficiency and being able to resolve the HRF. [1] Load DataTo make the point more clear, we will load in the FIR design matrices for three experiments. These three experiments all share the same qualities: (1) they all involve presenting stimuli from two conditions 60 times each; (2) each stimulus (120 total) is presented for 1s; and (3) each experiment is 240s in total length, such that each experiment is comprised of 120s of stimulus presentation time and 120s of null time. These three experiments differ in the order of stimuli presentation. Experiment 1 will present stimuli in a blocked design. Experiment 2 will present stimuli of alternating conditions every 2s (1s pause between each stimulus). experiment 3 will present stimuli in an (optimally) randomized design, with variable stimulus condition order and variable jitter. The experiments and their corresponding FIR design matrices are presented below. (We assume TR=1 and M=16.) As can easily be observed, the FIR design matrices of the first two experiments have strong, repeating structure whereas the third exhibits greater variability.
###Code
## Load experiment designs.
npz = np.load('fmri-03-efficiency.npz')
X1 = npz['X1']; X2 = npz['X2']; X3 = npz['X3']
times = npz['times']
## Plot designs.
fig, axes = plt.subplots(1,3,figsize=(12,4),sharex=True,sharey=True)
for ax, X in zip(axes, [X1, X2, X3]):
sns.heatmap(X, vmin=0, vmax=1, cmap='binary_r', cbar=False, ax=ax)
ax.set(xticks=[], yticks=[])
sns.despine()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Defining estimation efficiency[Liu & Frank (2004)](https://www.sciencedirect.com/science/article/pii/S1053811903005779) provided a formal definition of detection power:$$ C_{tot} = \frac{1}{ \frac{1}{N} \sum_{i \leq j} Tr[C_{ij}] } $$Put another way, the estimation efficiency of our experimental design, $C_{tot}$, is the inverse of the average trace of the contrasts of interest, $C_{ij}$.We define the estimation efficiency of a particular contrast as:$$ C_{ij} = L_{ij} \left( X^T X \right)^{-1} L_{ij} $$where $X^T X$ is the Fisher information matrix, and $L_{ij}$ is defined as:$$ L_{ij} = D_{ij} \otimes I_k $$where $D$ is a contrast vector as before and $\otimes$ is the Kronecker product.Ignoring the math for a second, we can observe that estimation efficiency has a formula similar to detection power, except now we are evaluating the orthogonality of a sets of columns. This means that the estimation efficiency of an experiment bears a relationship to its information matrix, $X^TX$, similar to that observed for detection power.Let's plot the the Fisher information matrix for each experiment is presented above.
###Code
fig, axes = plt.subplots(1,3,figsize=(12,4))
L = np.kron([1,-1], np.identity(16))
for i, (ax, X) in enumerate(zip(axes, [X1, X2, X3])):
xx = X.T @ X
xinv = np.linalg.pinv(xx)
ax = sns.heatmap(X.T @ X, square=True, cbar=False, ax=ax)
ax.set(xticks=[], yticks=[], title='Experiment %s' %(i+1))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
The diagonal elements reflect the number of observations for a given FIR window segment and condition (60 in all of the above). The off-diagonal elements reflect the covariance of two non-identical FIR window segments. As a result of the matrix inversion, experiments will be more efficient to the extent that their diagonals are large and their off-diagonals are small. Intuitively, this makes sense. In order to efficiently estimate a given FIR window, we would like it to be unconfounded by any other FIR window. If experiments have regular structure, then given FIR windows will always occur at the same point relative to one another, making it difficult to infer whether the BOLD signal at a given point reflects the contribution of the HRF response from the $i$th or $j$th window. Thus we can surmise that the block design and regular rapid event designs will have low estimation efficiency. Let's plot the design efficiency for each experiment.
###Code
from fmritools.design import design_efficiency
## Initialize plot.
fig, ax = plt.subplots(1,1,figsize=(6,3))
## Iteratively compute and plot.
for i, X in enumerate([X1,X2,X3]):
C = design_efficiency(X, k, window)
ax.bar(i,C,1,color='#1f77b4')
## Add details to plot.
ax.set(xticks=range(3), xticklabels=['X1','X2','X3'], ylim=(0,3),ylabel=r'$C$')
sns.despine()
###Output
_____no_output_____ |
PlotlyandPython/Lessons/(01) Intro to Python/Notebooks/(10) If-Else Statements.ipynb | ###Markdown
If-Else Statements In this lesson we're going to move away from variable types and data structures and instead look at conditional programming. We'll see how to make decisions using the if, elif and else statements.This is one of the most powerful constructs we can use when programming and it's especially helpful when making different charts. For example, we might want to set a bar on a barchart to be a different colour if it shows a value above a certain threshhold. We would use an if statement to achieve this. How do If statements work?The if statement in Python is constructed as follows:````if : ````The condition must be an expression which evaluates to a boolean (True or False) value. If the condition evaluates to True, then the code below is run. In Python, we must indent the code to run with four spaces (or a tab). Any code that is not indented after the statement is run anyway.In the example below, we're checking if a is greater than three, and if it is we return a string confirming this:
###Code
a = 5
if a > 3:
print("a is greater than 3")
print("This code runs anyway")
###Output
a is greater than 3
This code runs anyway
###Markdown
When a is not greater than three, nothing happens in the if statemnt because there is no code to run (notice that the code outside of the if statement still runs):
###Code
a = 2
if a > 3:
print("a is greater than 3")
print("This code runs anyway")
###Output
This code runs anyway
###Markdown
Adding an Else StatementIf we want to run some code when the condition evaluates to False, we need to add an else statement. The code that comes after this statement must be indented by four spaces (or a tab). The syntax is as follows:````if : else: ````The code after the else statement only runs when the if statement evaluates to False:
###Code
a = 2
if a > 3:
print("a is greater than 3")
else:
print("a is less than 3")
print("This code runs anyway")
###Output
a is less than 3
This code runs anyway
###Markdown
Adding other conditionsWe can also introduce more conditions using the elif statement (else if). These conditions work in the same way as the ifstatement; the code after each is only run if the condition evaluates to True. The syntax is as follows:````if : elif : elif : else: ````You might have noticed an error with the code in the cell above; what happens when a is 3? The statement "a is less than 3" is clearly not true here! We can use an elif statement to add another condition:
###Code
a = 3
if a > 3:
print("a is greater than 3")
elif a == 3:
print("a is equal to 3")
else:
print("a is less than 3")
print("This code runs anyway")
###Output
a is equal to 3
This code runs anyway
###Markdown
We can add as many elif statements as we wish:
###Code
a = 2
if a > 3:
print("a is greater than 3")
elif a == 3:
print("a is equal to 3")
elif a == 2:
print("a is equal to 2")
else:
print("a is less than 2")
print("This code runs anyway")
###Output
a is equal to 2
This code runs anyway
###Markdown
Nested If StatementsWe can place an If-Else statement as the code to run inside an if statement. The code to run in the nested if statement must be indented by 8 spaces (or two tabs). The syntax is as follows:````if : if : else: else: ````Creating nested if statements allows us to create complex decision trees and to tailor our code to deal with many different situations.In the cell below, we first check if a is greater than 3, if this condition is True, we then invoke the nested if statment and check if a is then subsequently greater than 4; if not, we know that it must be equal to 4, and this is captured in the Else part of the nested If statement.
###Code
a = 6
if a > 3:
if a > 4:
print("a is greater than 4")
else:
print("a is equal to 4")
elif a == 3:
print("a is equal to 3")
else:
print("a is less than 3")
print("This code runs anyway")
###Output
a is greater than 4
This code runs anyway
|
kaggle-comp/ds1-tree-ensembles/Kaggle-comp-analysis-1.ipynb | ###Markdown
Kaggle Comp Loading the data
###Code
import pandas as pd
pd.options.display.max_columns = 500
df = pd.read_csv('test_features.csv')
df.head(10)
df.drop(['id', 'member_id'], axis=1, inplace=True)
df.shape
df.dtypes
df.term.value_counts()
###Output
_____no_output_____
###Markdown
I considered stripping the "months" out the TERM column, and making this a numeric - but because there are only two values, it should be fine or even better to leave as a categorical
###Code
##df.rename(columns={'term':'term_in_months'}, inplace=True)
""" REMOVE ALL NON NUMERIC CHARACTERS FROM A STRING """
#df['term_in_months'].replace(regex=True,inplace=True,to_replace=r'\D',value=r'')
df['int_rate'].replace(regex=True,inplace=True,to_replace=r'\D',value=r'')
df.describe()
###Output
_____no_output_____ |
examples/cp/jupyter/SteelMill.ipynb | ###Markdown
Building steel coilsThis tutorial includes everything you need to set up decision optimization engines, build constraint programming models.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of the **[Prescriptive Analytics for Python](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)**>It requires a **local installation of CPLEX Optimizers**. Table of contents:- [Describe the business problem](Describe-the-business-problem)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Download the library](Step-1:-Download-the-library) * [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Set up the prescriptive model](Step-3:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization solve service](Solve-with-Decision-Optimization-solve-service) * [Step 4: Investigate the solution and run an example analysis](Step-4:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary)**** Describe the business problem* The problem is to build steel coils from slabs that are available in a work-in-process inventory of semi-finished products. There is no limitation in the number of slabs that can be requested, but only a finite number of slab sizes is available (sizes 11, 13, 16, 17, 19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 40, 43, 45). * The problem is to select a number of slabs to build the coil orders, and to satisfy the following constraints: * A coil order can be built from only one slab. * Each coil order requires a specific process to build it from a slab. This process is encoded by a color. * Several coil orders can be built from the same slab. But a slab can be used to produce at most two different "colors" of coils. * The sum of the sizes of each coil order built from a slab must not exceed the slab size.* Finally, the production plan should minimize the unused capacity of the selected slabs.* This problem is based on **"prob038: Steel mill slab design problem" from CSPLib (www.csplib.org). It is a simplification of an industrial problem described in J. R. Kalagnanam, M. W. Dawande, M. Trumbo, H. S. Lee. "Inventory Matching Problems in the Steel Industry," IBM Research Report RC 21171, 1998**.* Please refer to documentation for appropriate setup of solving configuration. ***** How decision optimization can help* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. + For example: + Automate complex decisions and trade-offs to better manage limited resources. + Take advantage of a future opportunity or mitigate a future risk. + Proactively update recommendations based on changing events. + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Download the libraryRun the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
###Code
import sys
try:
import docplex.cp
except:
if hasattr(sys, 'real_prefix'):
#we are in a virtual env.
!pip install docplex
else:
!pip install --user docplex
###Output
_____no_output_____
###Markdown
Note that the more global package docplex contains another subpackage docplex.mp that is dedicated to Mathematical Programming, another branch of optimization. Step 2: Model the data
###Code
from docplex.cp.model import *
###Output
_____no_output_____
###Markdown
Set model parameter
###Code
from collections import namedtuple
##############################################################################
# Model configuration
##############################################################################
# The number of coils to produce
TUPLE_ORDER = namedtuple("TUPLE_ORDER", ["index", "weight", "color"])
orders = [ TUPLE_ORDER(1, 22, 5),
TUPLE_ORDER(2, 9, 3),
TUPLE_ORDER(3, 9, 4),
TUPLE_ORDER(4, 8, 5),
TUPLE_ORDER(5, 8, 7),
TUPLE_ORDER(6, 6, 3),
TUPLE_ORDER(7, 5, 6),
TUPLE_ORDER(8, 3, 0),
TUPLE_ORDER(9, 3, 2),
TUPLE_ORDER(10, 3, 3),
TUPLE_ORDER(11, 2, 1),
TUPLE_ORDER(12, 2, 5)
]
NB_SLABS = 12
MAX_COLOR_PER_SLAB = 2
# The total number of slabs available. In theory this can be unlimited,
# but we impose a reasonable upper bound in order to produce a practical
# optimization model.
# The different slab weights available.
slab_weights = [ 0, 11, 13, 16, 17, 19, 20, 23, 24, 25,
26, 27, 28, 29, 30, 33, 34, 40, 43, 45 ]
nb_orders = len(orders)
slabs = range(NB_SLABS)
allcolors = set([ o.color for o in orders ])
# CPO needs lists for pack constraint
order_weights = [ o.weight for o in orders ]
# The heaviest slab
max_slab_weight = max(slab_weights)
# The amount of loss incurred for different amounts of slab use
# The loss will depend on how much less steel is used than the slab
# just large enough to produce the coils.
loss = [ min([sw-use for sw in slab_weights if sw >= use]) for use in range(max_slab_weight+1)]
###Output
_____no_output_____
###Markdown
Step 3: Set up the prescriptive model Create CPO model
###Code
mdl = CpoModel(name="trucks")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
# Which slab is used to produce each coil
production_slab = integer_var_dict(orders, 0, NB_SLABS-1, "production_slab")
# How much of each slab is used
slab_use = integer_var_list(NB_SLABS, 0, max_slab_weight, "slab_use")
###Output
_____no_output_____
###Markdown
Express the business constraints
###Code
# The total loss is
total_loss = sum([element(slab_use[s], loss) for s in slabs])
# The orders are allocated to the slabs with capacity
mdl.add(pack(slab_use, [production_slab[o] for o in orders], order_weights))
# At most MAX_COLOR_PER_SLAB colors per slab
for s in slabs:
su = 0
for c in allcolors:
lo = False
for o in orders:
if o.color==c:
lo = (production_slab[o] == s) | lo
su += lo
mdl.add(su <= MAX_COLOR_PER_SLAB)
###Output
_____no_output_____
###Markdown
Express the objective
###Code
# Add minimization objective
mdl.add(minimize(total_loss))
###Output
_____no_output_____
###Markdown
Solve the model
###Code
print("\nSolving model....")
# Search strategy
mdl.set_search_phases([search_phase([production_slab[o] for o in orders])])
msol = mdl.solve(FailLimit=100000, TimeLimit=10)
###Output
_____no_output_____
###Markdown
Step 4: Investigate the solution and then run an example analysis
###Code
# Print solution
if msol:
print("Solution: ")
from_slabs = [set([o.index for o in orders if msol[production_slab[o]]== s])for s in slabs]
slab_colors = [set([o.color for o in orders if o.index in from_slabs[s]])for s in slabs]
for s in slabs:
if len(from_slabs[s]) > 0:
print("Slab = " + str(s))
print("\tLoss = " + str(loss[msol[slab_use[s]]]))
print("\tcolors = " + str(slab_colors[s]))
print("\tOrders = " + str(from_slabs[s]) + "\n")
else:
print("No solution found")
###Output
_____no_output_____
###Markdown
Building steel coilsThis tutorial includes everything you need to set up decision optimization engines, build constraint programming models.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**>>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/>) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)and you can start using Watson Studio Cloud right away).Table of contents:- [Describe the business problem](Describe-the-business-problem)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Download the library](Step-1:-Download-the-library) * [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Set up the prescriptive model](Step-3:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization solve service](Solve-with-Decision-Optimization-solve-service) * [Step 4: Investigate the solution and run an example analysis](Step-4:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary)**** Describe the business problem* The problem is to build steel coils from slabs that are available in a work-in-process inventory of semi-finished products. There is no limitation in the number of slabs that can be requested, but only a finite number of slab sizes is available (sizes 11, 13, 16, 17, 19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 40, 43, 45). * The problem is to select a number of slabs to build the coil orders, and to satisfy the following constraints: * A coil order can be built from only one slab. * Each coil order requires a specific process to build it from a slab. This process is encoded by a color. * Several coil orders can be built from the same slab. But a slab can be used to produce at most two different "colors" of coils. * The sum of the sizes of each coil order built from a slab must not exceed the slab size.* Finally, the production plan should minimize the unused capacity of the selected slabs.* This problem is based on **"prob038: Steel mill slab design problem" from CSPLib (www.csplib.org). It is a simplification of an industrial problem described in J. R. Kalagnanam, M. W. Dawande, M. Trumbo, H. S. Lee. "Inventory Matching Problems in the Steel Industry," IBM Research Report RC 21171, 1998**.* Please refer to documentation for appropriate setup of solving configuration. ***** How decision optimization can help* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. + For example: + Automate complex decisions and trade-offs to better manage limited resources. + Take advantage of a future opportunity or mitigate a future risk. + Proactively update recommendations based on changing events. + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Download the libraryRun the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
###Code
import sys
try:
import docplex.cp
except:
if hasattr(sys, 'real_prefix'):
#we are in a virtual env.
!pip install docplex
else:
!pip install --user docplex
###Output
_____no_output_____
###Markdown
Note that the more global package docplex contains another subpackage docplex.mp that is dedicated to Mathematical Programming, another branch of optimization. Step 2: Model the data
###Code
from docplex.cp.model import *
###Output
_____no_output_____
###Markdown
Set model parameter
###Code
from collections import namedtuple
##############################################################################
# Model configuration
##############################################################################
# The number of coils to produce
TUPLE_ORDER = namedtuple("TUPLE_ORDER", ["index", "weight", "color"])
orders = [ TUPLE_ORDER(1, 22, 5),
TUPLE_ORDER(2, 9, 3),
TUPLE_ORDER(3, 9, 4),
TUPLE_ORDER(4, 8, 5),
TUPLE_ORDER(5, 8, 7),
TUPLE_ORDER(6, 6, 3),
TUPLE_ORDER(7, 5, 6),
TUPLE_ORDER(8, 3, 0),
TUPLE_ORDER(9, 3, 2),
TUPLE_ORDER(10, 3, 3),
TUPLE_ORDER(11, 2, 1),
TUPLE_ORDER(12, 2, 5)
]
NB_SLABS = 12
MAX_COLOR_PER_SLAB = 2
# The total number of slabs available. In theory this can be unlimited,
# but we impose a reasonable upper bound in order to produce a practical
# optimization model.
# The different slab weights available.
slab_weights = [ 0, 11, 13, 16, 17, 19, 20, 23, 24, 25,
26, 27, 28, 29, 30, 33, 34, 40, 43, 45 ]
nb_orders = len(orders)
slabs = range(NB_SLABS)
allcolors = set([ o.color for o in orders ])
# CPO needs lists for pack constraint
order_weights = [ o.weight for o in orders ]
# The heaviest slab
max_slab_weight = max(slab_weights)
# The amount of loss incurred for different amounts of slab use
# The loss will depend on how much less steel is used than the slab
# just large enough to produce the coils.
loss = [ min([sw-use for sw in slab_weights if sw >= use]) for use in range(max_slab_weight+1)]
###Output
_____no_output_____
###Markdown
Step 3: Set up the prescriptive model Create CPO model
###Code
mdl = CpoModel(name="trucks")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
# Which slab is used to produce each coil
production_slab = integer_var_dict(orders, 0, NB_SLABS-1, "production_slab")
# How much of each slab is used
slab_use = integer_var_list(NB_SLABS, 0, max_slab_weight, "slab_use")
###Output
_____no_output_____
###Markdown
Express the business constraints
###Code
# The total loss is
total_loss = sum([element(slab_use[s], loss) for s in slabs])
# The orders are allocated to the slabs with capacity
mdl.add(pack(slab_use, [production_slab[o] for o in orders], order_weights))
# At most MAX_COLOR_PER_SLAB colors per slab
for s in slabs:
su = 0
for c in allcolors:
lo = False
for o in orders:
if o.color==c:
lo = (production_slab[o] == s) | lo
su += lo
mdl.add(su <= MAX_COLOR_PER_SLAB)
###Output
_____no_output_____
###Markdown
Express the objective
###Code
# Add minimization objective
mdl.add(minimize(total_loss))
###Output
_____no_output_____
###Markdown
Solve the model
###Code
print("\nSolving model....")
# Search strategy
mdl.set_search_phases([search_phase([production_slab[o] for o in orders])])
msol = mdl.solve(FailLimit=100000, TimeLimit=10)
###Output
_____no_output_____
###Markdown
Step 4: Investigate the solution and then run an example analysis
###Code
# Print solution
if msol:
print("Solution: ")
from_slabs = [set([o.index for o in orders if msol[production_slab[o]]== s])for s in slabs]
slab_colors = [set([o.color for o in orders if o.index in from_slabs[s]])for s in slabs]
for s in slabs:
if len(from_slabs[s]) > 0:
print("Slab = " + str(s))
print("\tLoss = " + str(loss[msol[slab_use[s]]]))
print("\tcolors = " + str(slab_colors[s]))
print("\tOrders = " + str(from_slabs[s]) + "\n")
else:
print("No solution found")
###Output
_____no_output_____
###Markdown
Building steel coilsThis tutorial includes everything you need to set up decision optimization engines, build constraint programming models.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**>>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Cloud Pak for Data as a Service](https://www.ibm.com/products/cloud-pak-for-data/as-a-service/) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)and you can start using `IBM Cloud Pak for Data as a Service` right away).>> CPLEX is available on IBM Cloud Pack for Data and IBM Cloud Pak for Data as a Service:> - IBM Cloud Pak for Data as a Service: Depends on the runtime used:> - Python 3.x runtime: Community edition> - Python 3.x + DO runtime: full edition> - Cloud Pack for Data: Community edition is installed by default. Please install `DO` addon in `Watson Studio Premium` for the full editionTable of contents:- [Describe the business problem](Describe-the-business-problem)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Download the library](Step-1:-Download-the-library) * [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Set up the prescriptive model](Step-3:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization solve service](Solve-with-Decision-Optimization-solve-service) * [Step 4: Investigate the solution and run an example analysis](Step-4:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary)**** Describe the business problem* The problem is to build steel coils from slabs that are available in a work-in-process inventory of semi-finished products. There is no limitation in the number of slabs that can be requested, but only a finite number of slab sizes is available (sizes 11, 13, 16, 17, 19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 40, 43, 45). * The problem is to select a number of slabs to build the coil orders, and to satisfy the following constraints: * A coil order can be built from only one slab. * Each coil order requires a specific process to build it from a slab. This process is encoded by a color. * Several coil orders can be built from the same slab. But a slab can be used to produce at most two different "colors" of coils. * The sum of the sizes of each coil order built from a slab must not exceed the slab size.* Finally, the production plan should minimize the unused capacity of the selected slabs.* This problem is based on **"prob038: Steel mill slab design problem" from CSPLib (www.csplib.org). It is a simplification of an industrial problem described in J. R. Kalagnanam, M. W. Dawande, M. Trumbo, H. S. Lee. "Inventory Matching Problems in the Steel Industry," IBM Research Report RC 21171, 1998**.* Please refer to documentation for appropriate setup of solving configuration. ***** How decision optimization can help* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. + For example: + Automate complex decisions and trade-offs to better manage limited resources. + Take advantage of a future opportunity or mitigate a future risk. + Proactively update recommendations based on changing events. + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Download the libraryRun the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
###Code
import sys
try:
import docplex.cp
except:
if hasattr(sys, 'real_prefix'):
#we are in a virtual env.
!pip install docplex
else:
!pip install --user docplex
###Output
_____no_output_____
###Markdown
Note that the more global package docplex contains another subpackage docplex.mp that is dedicated to Mathematical Programming, another branch of optimization. Step 2: Model the data
###Code
from docplex.cp.model import *
###Output
_____no_output_____
###Markdown
Set model parameter
###Code
from collections import namedtuple
##############################################################################
# Model configuration
##############################################################################
# The number of coils to produce
TUPLE_ORDER = namedtuple("TUPLE_ORDER", ["index", "weight", "color"])
orders = [ TUPLE_ORDER(1, 22, 5),
TUPLE_ORDER(2, 9, 3),
TUPLE_ORDER(3, 9, 4),
TUPLE_ORDER(4, 8, 5),
TUPLE_ORDER(5, 8, 7),
TUPLE_ORDER(6, 6, 3),
TUPLE_ORDER(7, 5, 6),
TUPLE_ORDER(8, 3, 0),
TUPLE_ORDER(9, 3, 2),
TUPLE_ORDER(10, 3, 3),
TUPLE_ORDER(11, 2, 1),
TUPLE_ORDER(12, 2, 5)
]
NB_SLABS = 12
MAX_COLOR_PER_SLAB = 2
# The total number of slabs available. In theory this can be unlimited,
# but we impose a reasonable upper bound in order to produce a practical
# optimization model.
# The different slab weights available.
slab_weights = [ 0, 11, 13, 16, 17, 19, 20, 23, 24, 25,
26, 27, 28, 29, 30, 33, 34, 40, 43, 45 ]
nb_orders = len(orders)
slabs = range(NB_SLABS)
allcolors = set([ o.color for o in orders ])
# CPO needs lists for pack constraint
order_weights = [ o.weight for o in orders ]
# The heaviest slab
max_slab_weight = max(slab_weights)
# The amount of loss incurred for different amounts of slab use
# The loss will depend on how much less steel is used than the slab
# just large enough to produce the coils.
loss = [ min([sw-use for sw in slab_weights if sw >= use]) for use in range(max_slab_weight+1)]
###Output
_____no_output_____
###Markdown
Step 3: Set up the prescriptive model Create CPO model
###Code
mdl = CpoModel(name="trucks")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
# Which slab is used to produce each coil
production_slab = integer_var_dict(orders, 0, NB_SLABS-1, "production_slab")
# How much of each slab is used
slab_use = integer_var_list(NB_SLABS, 0, max_slab_weight, "slab_use")
###Output
_____no_output_____
###Markdown
Express the business constraints
###Code
# The total loss is
total_loss = sum([element(slab_use[s], loss) for s in slabs])
# The orders are allocated to the slabs with capacity
mdl.add(pack(slab_use, [production_slab[o] for o in orders], order_weights))
# At most MAX_COLOR_PER_SLAB colors per slab
for s in slabs:
su = 0
for c in allcolors:
lo = False
for o in orders:
if o.color==c:
lo = (production_slab[o] == s) | lo
su += lo
mdl.add(su <= MAX_COLOR_PER_SLAB)
###Output
_____no_output_____
###Markdown
Express the objective
###Code
# Add minimization objective
mdl.add(minimize(total_loss))
###Output
_____no_output_____
###Markdown
Solve the model
###Code
print("\nSolving model....")
# Search strategy
mdl.set_search_phases([search_phase([production_slab[o] for o in orders])])
msol = mdl.solve(FailLimit=100000, TimeLimit=10)
###Output
_____no_output_____
###Markdown
Step 4: Investigate the solution and then run an example analysis
###Code
# Print solution
if msol:
print("Solution: ")
from_slabs = [set([o.index for o in orders if msol[production_slab[o]]== s])for s in slabs]
slab_colors = [set([o.color for o in orders if o.index in from_slabs[s]])for s in slabs]
for s in slabs:
if len(from_slabs[s]) > 0:
print("Slab = " + str(s))
print("\tLoss = " + str(loss[msol[slab_use[s]]]))
print("\tcolors = " + str(slab_colors[s]))
print("\tOrders = " + str(from_slabs[s]) + "\n")
else:
print("No solution found")
###Output
_____no_output_____ |
notebooks/210407_ctx_space_subsampling.ipynb | ###Markdown
context effect space subsamplingin this pseudoexperiment I test how increasing the samplesd context effect space (number of probes and or contexts)affect the frequency, amplitude and duration of contextual effects.This is achieved by randomly selecting a subset of te stimuli used as contexta and or probesand looking at the number of instances (cell * context * probe) with significant effects and the mean amplitude andduration of said effects.
###Code
from pathlib import Path
import itertools as itt
import joblib as jl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import numpy as np
import statsmodels.api as sm
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.formula.api import ols
from bioinfokit.analys import stat
import pandas as pd
import seaborn as sns
from IPython.display import display
from statannot import add_stat_annotation
from cycler import cycler
from src.data.region_map import region_map
from src.metrics.consolidated_dprimes import _load_site_formated_raste, single_cell_dprimes, full_dPCA_dprimes
from src.data.dPCA import _cpp_dPCA, format_raster
from src.visualization.fancy_plots import _raster, unit_line, savefig
from src.metrics.significance import _significance
from src.metrics.dprime import flip_dprimes
from src.metrics.reliability import signal_reliability
from src.data.rasters import raster_from_sig
from src.data.load import load
#general plottin formating
plt.style.use('dark_background')
light_color_cycle = cycler(color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'])
trans_color_map = {'silence': '#377eb8', # blue
'continuous': '#ff7f00', # orange
'similar': '#4daf4a', # green
'sharp': '#a65628'} # brown
params = {'axes.labelsize': 15,
'axes.titlesize': 20,
'axes.spines.top': False,
'axes.spines.right': False,
'axes.prop_cycle': light_color_cycle,
'xtick.labelsize': 11,
'ytick.labelsize': 11,
'lines.markersize': 8,
'figure.titlesize': 30,
'figure.figsize': [4,4],
'figure.autolayout':True,
'svg.fonttype': 'none',
'font.sans-serif': 'Arial',
'legend.loc': 'upper right',
'legend.frameon': False,
'legend.fontsize': 15,
'legend.markerscale': 3,
}
widescreen = [13.3, 7.5]
plt.rcParams.update(params)
# pulls DF, removes unused columns and data, refactor redunant id columns and create probe_id
DF = jl.load(Path(f'../data/210302_consolidated_summary_DF_alpha_0.05/dprime_absolute-None_montecarlo-1000_raster_fs-30_reliability-0.1_smoothing_window-0_zscore-True'))
dim_reduction = 'pdPCA'
# no need for means or mean policy, working on permutations only
ff_probe = DF.probe != 'mean'
ff_pairs = DF.context_pair != 'mean'
ff_stim = DF.stim_type == 'permutations'
ff_mean = DF.mean_signif_type == 'shuffles'
ff_analylis = DF.analysis.isin(['SC', dim_reduction])
ff_corr = DF.mult_comp_corr == 'consecutive_3'
ff_metric = DF.metric.isin(['significant_abs_mass_center', 'significant_abs_sum'])
good_cols =['analysis', 'mult_comp_corr', 'region', 'siteid', 'cellid', 'context_pair',
'probe', 'metric', 'value']
filtered = DF.loc[ff_stim & ff_mean & ff_pairs & ff_probe & ff_analylis & ff_corr & ff_metric, good_cols]
# rename metrics and analysis for ease of ploting
filtered['metric'] = filtered['metric'].replace({'significant_abs_mass_center': 'center of mass (ms)',
'significant_abs_sum': "integral (d'*ms)"})
filtered['analysis'] = filtered['analysis'].replace({'SC': 'single cell',
dim_reduction: 'population'})
filtered['id'] = filtered['cellid'].fillna(value=filtered['siteid'])
filtered = filtered.drop(columns=['cellid', 'siteid'])
filtered['value'] = filtered['value'].fillna(value=0)
# creates a new column relating probe with context pairs
ctx = np.asarray([row.split('_') for row in filtered.context_pair], dtype=int)
prb = np.asarray(filtered.probe, dtype=int)
silence = ctx == 0
same = ctx == prb[:,None]
different = np.logical_and(~silence, ~same)
name_arr = np.full_like(ctx, np.nan, dtype=object)
name_arr[silence] = 'silence'
name_arr[same] = 'same'
name_arr[different] = 'diff'
comp_name_arr = np.apply_along_axis('_'.join, 1, name_arr)
# swaps clasification names to not have repetitions i.e. diff_same == same_diff
comp_name_arr[np.where(comp_name_arr == 'same_silence')] = 'silence_same'
comp_name_arr[np.where(comp_name_arr == 'diff_silence')] = 'silence_diff'
comp_name_arr[np.where(comp_name_arr == 'diff_same')] = 'same_diff'
comp_name_arr[np.where(comp_name_arr == 'same_silence')] = 'silence_same'
filtered['trans_pair'] = comp_name_arr
ord_cols = ['analysis', 'region', 'id', 'context_pair', 'trans_pair', 'probe', 'metric', 'value']
pivot_idx = [col for col in ord_cols if col not in ['value', 'metric']]
pivoted = filtered.pivot_table(index=pivot_idx, columns='metric', values='value', aggfunc='first').reset_index()
full_long = filtered
def nozero_mean(arr):
arr[arr==0] = np.nan
return np.nanmean(arr)
def nozero_count(arr):
return np.sum(arr>0)
def nonan_proportion(arr):
return np.sum(~np.isnan(arr))/arr.size * 100
def signif_proportion(arr):
return np.sum(arr>0) / np.size(arr) * 100
###Output
_____no_output_____
###Markdown
systematic subsampling of contexts and probesIterates over each contexts or probe subsampling, considering all possible combinations for each subsampling.Counts the number definese the number of sigificant isntances. then storese in a dataframe with specific subsamples asindices.
###Code
# filteres dataframe and adds required columns
filtered = pivoted.loc[pivoted.analysis=='single cell', :]
filtered['site'] = filtered.id.apply(lambda x: x[:7])
ctx_pairs = [pair.split('_') for pair in filtered.context_pair]
ctx_pairs = np.stack(ctx_pairs, axis=0)
filtered['ctx_0'] = ctx_pairs[:, 0]
filtered['ctx_1'] = ctx_pairs[:, 1]
# function to aggregate each single cell. gives the proportion of significant instances
agg_funcs = {"signif_proportion": ("integral (d'*ms)", signif_proportion)}
# iterates over all sizes of probe subsampling, i.e. 1, 2, 3 and 4 probes.
all_probes = filtered.probe.sort_values().unique()
prb_subsamp_signif = pd.DataFrame()
for num_probe in range(len(all_probes)):
num_probe += 1
n_probe_groups = list(itt.combinations(all_probes, num_probe))
# iterates over all possible combinations of n probes
for pg, probe_group in enumerate(n_probe_groups):
probes_df = filtered.loc[filtered.probe.isin(probe_group), :].set_index('region', 'site', 'id')
grouped = probes_df.groupby(['region', 'site', 'id']).agg(**agg_funcs).copy()
grouped.loc[grouped.signif_proportion == 0] = np.nan
grouped['n_probes'] = num_probe
grouped['prb_group'] = '_'.join(probe_group)
prb_subsamp_signif = prb_subsamp_signif.append(grouped.reset_index())
# iterates over all sizes of context subsampling, i.e. 2, 3, 4 and 5 contexts.
all_contexts = np.unique(ctx_pairs)
ctx_subsamp_signif = pd.DataFrame()
for num_ctx in range(2, len(all_contexts)+1):
n_ctx_groups = list(itt.combinations(all_contexts, num_ctx))
# iterates over all possible combinations of n probes
for cg, ctx_group in enumerate(n_ctx_groups):
ctx_df = filtered.loc[(filtered.ctx_0.isin(ctx_group)) &
((filtered.ctx_1.isin(ctx_group))), :].set_index('region', 'site', 'id')
grouped = ctx_df.groupby(['region', 'site', 'id']).agg(**agg_funcs).copy()
grouped.loc[grouped.signif_proportion == 0] = np.nan
grouped['n_contexts'] = num_ctx
grouped['ctx_group'] = '_'.join(ctx_group)
ctx_subsamp_signif = ctx_subsamp_signif.append(grouped.reset_index())
# probes
display(ctx_subsamp_signif)
###Output
_____no_output_____
###Markdown
Proportion of significant instances for individual cellsonly looking at cells with at least one significant instance
###Code
fig, (ctx_ax, prb_ax) = plt.subplots(1, 2, sharey=True, figsize=[10,5])
_ = sns.pointplot(x='n_contexts', y='signif_proportion', data=ctx_subsamp_signif, hue='region', dodge=True, ax=ctx_ax)
_ = sns.pointplot(x='n_probes', y='signif_proportion', data=prb_subsamp_signif, hue='region', dodge=True, ax=prb_ax)
fig.suptitle('Proportion of significant instances\nfor individual cells', fontsize=20)
###Output
_____no_output_____
###Markdown
count of significant cells by site and or region
###Code
# collapses across neurons. Keeps information about region and ctx / prb subsamples.
ctx_count_by_site = ctx_subsamp_signif.set_index(['region','site', 'n_contexts','ctx_group']
).groupby(['region', 'site', 'n_contexts','ctx_group']
).agg(signfi_cell_prop=('signif_proportion', nonan_proportion)).reset_index()
prb_count_by_site = prb_subsamp_signif.set_index(['region','site', 'n_probes','prb_group']
).groupby(['region', 'site', 'n_probes','prb_group']
).agg(signfi_cell_prop=('signif_proportion', nonan_proportion)).reset_index()
# collapses across neurons, grouping by site. Keeps information about region and ctx / prb subsamples.
# ctx_count_by_site = ctx_subsamp_signif.set_index(['region','site', 'n_contexts','ctx_group']
# ).groupby(['region', 'site', 'n_contexts','ctx_group']
# ).agg(signfi_cell_prop=('signif_proportion', nonan_proportion)
# ).groupby(['region', 'site', 'n_contexts']
# ).agg(signfi_cell_prop=('signfi_cell_prop', np.nanmean)).reset_index()
#
# prb_count_by_site = prb_subsamp_signif.set_index(['region','site', 'n_probes','prb_group']
# ).groupby(['region', 'site', 'n_probes','prb_group']
# ).agg(signfi_cell_prop=('signif_proportion', nonan_proportion)
# ).groupby(['region', 'site', 'n_probes']
# ).agg(signfi_cell_prop=('signfi_cell_prop', np.nanmean)).reset_index()
fig, (ctx_ax, prb_ax) = plt.subplots(1, 2, sharey=True, figsize=[10,5])
_ = sns.pointplot(x='n_contexts', y='signfi_cell_prop', hue='region', data=ctx_count_by_site,
dodge=True, s=2, capsize=0.2, ci=68, ax=ctx_ax,)
_ = sns.stripplot(x='n_contexts', y='signfi_cell_prop', hue='region', data=ctx_count_by_site,
dodge=True, alpha=0.1, ax=ctx_ax)
ctx_ax.legend([],[], frameon=False)
_ = sns.pointplot(x='n_probes', y='signfi_cell_prop', hue='region', data=prb_count_by_site,
dodge=True, s=2, capsize=0.2, ci=68, ax=prb_ax)
_ = sns.stripplot(x='n_probes', y='signfi_cell_prop', hue='region', data=prb_count_by_site,
dodge=True, alpha=0.1, ax=prb_ax)
prb_ax.legend([],[], frameon=False)
fig.suptitle('count of significant cells\npers site', fontsize=20)
###Output
_____no_output_____
###Markdown
visualization of all cells in siteexplore an option of showinge all cells vs all combinations of context_pair * probe for a given site
###Code
# cell_descriptio = filtered.pivot_table
# display(full_long)
pivot_ready = full_long.loc[full_long.analysis == 'single cell',
['region', 'id', 'trans_pair', 'context_pair', 'probe', 'metric', 'value']]
pivot_ready['site'] = pivot_ready.id.apply(lambda x: x[:7])
summary = pivot_ready.pivot_table(values='value', index=['region', 'site', 'id'],
columns=['metric', 'probe', 'trans_pair', 'context_pair'], fill_value=np.nan)
summary.index = summary.index.droplevel(0)
display(summary)
site = 'CRD012b'
# site = 'ARM021b'
site_eg = summary.loc[[site], ["integral (d'*ms)",]].droplevel(level=0, axis=0)
site_eg.columns = range(site_eg.shape[1])
site_eg
sns.heatmap(data=site_eg, xticklabels=False)
###Output
_____no_output_____
###Markdown
is context or probes driving the diversity in contextual effects??
###Code
# display(filtered)
group_ready = filtered.set_index(['region', 'id', 'context_pair', 'trans_pair', 'probe'])
prb_diversity = group_ready.groupby(['region', 'id', 'context_pair']).agg(signif_proportion=("integral (d'*ms)", signif_proportion))
ctx_diversity = group_ready.groupby(['region', 'id', 'probe']).agg(signif_proportion=("integral (d'*ms)", signif_proportion))
# display(prb_diversity)
# display(ctx_diversity)
diversity = pd.concat([prb_diversity, ctx_diversity], axis=0,keys=['probe', 'context'],
names = ['diversity', 'region', 'id', 'sound_id'])
# take average diversity per cell
diversity.loc[diversity.signif_proportion == 0, ['signif_proportion']] = np.nan
diversity = diversity.groupby(['diversity', 'region', 'id']).agg(np.nanmean)
diversity.reset_index(inplace=True)
display(diversity)
fig, ax = plt.subplots(figsize=(5,5))
# sns.stripplot(x='diversity', y='signif_proportion', hue='region', data=diversity,
# dodge=True, alpha=0.1, ax=ax)
sns.pointplot(x='diversity', y='signif_proportion', hue='region', data=diversity,
dodge=True, s=2, capsize=0.2, ci=68, ax=ax)
ax.legend([],[], frameon=False)
ax.set_xlabel('diversity source')
ax.set_ylabel('proportion significant (%)')
title = 'single cell diversity source'
savefig(fig, 'DAC4', title)
###Output
_____no_output_____ |
algoExpert/pattern_matcher/solution.ipynb | ###Markdown
Pattern Matcher[link](https://www.algoexpert.io/questions/Pattern%20Matcher) My Solution
###Code
def patternMatcher(pattern, string):
# Write your code here.
# O(n^2 + m) time, o(n + m) space
# where n is the length of the string
# and m is the length of the pattern
isStartWithX = True if pattern[0] == 'x' else False
if not isStartWithX:
pattern = swapPatternXY(pattern)
countX = countPatternCharacter(pattern, 'x')
countY = len(pattern) - countX
patterXStart = countPatternCharacterStart(pattern, 'x')
maxXLength = len(string) // countX
for nX in range(1, maxXLength + 1):
x = string[:nX]
start = "".join([x for i in range(patterXStart)])
numStart = nX * patterXStart
if string[:numStart] != start:
continue
numberCharLeft = len(string) - nX * countX
if countY == 0:
yLength = 0
elif numberCharLeft % countY != 0:
continue
else:
yLength = numberCharLeft // countY
y = string[numStart:numStart + yLength]
newString = createStringBaseOnPattern(pattern, x, y)
if string != newString:
continue
else:
return output(x, y, isStartWithX)
return []
def swapPatternXY(pattern):
res = []
for c in pattern:
if c == 'x':
res.append('y')
else:
res.append('x')
return ''.join(res)
def countPatternCharacter(pattern, character):
count = 0
for c in pattern:
if c == character:
count += 1
return count
def countPatternCharacterStart(pattern, character):
count = 0
for c in pattern:
if c == character:
count += 1
else:
break
return count
def createStringBaseOnPattern(pattern, x, y):
l = []
for c in pattern:
if c == 'x':
l.append(x)
else:
l.append(y)
return ''.join(l)
def output(x, y, isNotSwaped):
if not isNotSwaped:
x, y = y, x
return [x, y]
def patternMatcher(pattern, string):
# Write your code here.
isStartWithX = True if pattern[0] == 'x' else False
if not isStartWithX:
pattern = swapPatternXY(pattern)
countX = countPatternCharacter(pattern, 'x')
countY = len(pattern) - countX
patterXStart = countPatternCharacterStart(pattern, 'x')
maxXLength = len(string) // countX
if countY != 0:
for nX in range(1, maxXLength + 1):
x = string[:nX]
start = "".join([x for i in range(patterXStart)])
numStart = nX * patterXStart
if string[:numStart] != start:
continue
numberCharLeft = len(string) - nX * countX
if numberCharLeft % countY != 0:
continue
yLength = int(numberCharLeft / countY)
y = string[numStart:numStart + yLength]
newString = createStringBaseOnPattern(pattern, x, y)
if string != newString:
continue
else:
return output(x, y, isStartWithX)
else:
nX = maxXLength
x = string[:nX]
newString = createStringBaseOnPattern(pattern, x, '')
if string != newString:
return []
else:
return output(x, '', isStartWithX)
return []
def swapPatternXY(pattern):
res = []
for c in pattern:
if c == 'x':
res.append('y')
else:
res.append('x')
return ''.join(res)
def countPatternCharacter(pattern, character):
count = 0
for c in pattern:
if c == character:
count += 1
return count
def countPatternCharacterStart(pattern, character):
count = 0
for c in pattern:
if c == character:
count += 1
else:
break
return count
def createStringBaseOnPattern(pattern, x, y):
l = []
for c in pattern:
if c == 'x':
l.append(x)
else:
l.append(y)
return ''.join(l)
def output(x, y, isNotSwaped):
if not isNotSwaped:
x, y = y, x
return [x, y]
###Output
_____no_output_____
###Markdown
Expert Solution
###Code
# O(n^2 + m) time, o(n + m) space
def patternMatcher(pattern, string):
if len(pattern) > len(string):
return []
newPattern = getNewPattern(pattern)
didSwitch = newPattern[0] != pattern[0]
counts = {"x" : 0, "y": 0}
firstYPos = getCountsAndFirstYPos(newPattern, counts)
if counts["y"] != 0:
for lenOfX in range(1, len(string)):
lenOfY = (len(string) - lenOfX * counts["x"]) / counts["y"]
if lenOfY <= 0 or lenOfY % 1 != 0:
continue
lenOfY = int(lenOfY)
yIdx = firstYPos * lenOfX
x = string[:lenOfX]
y = string[yIdx : yIdx + lenOfY]
potentialMatch = map(lambda char: x if char == "x" else y, newPattern)
if string == "".join(potentialMatch):
return [x, y] if not didSwitch else [y, x]
else:
lenOfX = len(string) / counts["x"]
if lenOfX % 1 == 0:
lenOfX = int(lenOfX)
x = string[:lenOfX]
potentialMatch = map(lambda char: x, newPattern)
if string == "".join(potentialMatch):
return [x, ""] if not didSwitch else ["", x]
return []
def getNewPattern(pattern):
patternLetters = list(pattern)
if pattern[0] == "x":
return patternLetters
else:
return list(map(lambda char: "x" if char == "y" else "y", patternLetters))
def getCountsAndFirstYPos(pattern, counts):
firstYPos = None
for i, char in enumerate(pattern):
counts[char] += 1
if char == "y" and firstYPos is None:
firstYPos = i
return firstYPos
###Output
_____no_output_____ |
yelp_regression_solution.ipynb | ###Markdown
Project: Yelp Rating Regression PredictorThe restaurant industry is tougher than ever, with restaurant reviews blazing across the Internet from day one of a restaurant's opening. But as a lover of food, you and your friend decide to break into the industry and open up your own restaurant, Danielle's Delicious Delicacies. Since a restaurant's success is highly correlated with its reputation, you want to make sure Danielle's Delicious Delicacies has the best reviews on the most queried restaurant review site: Yelp! While you know your food will be delicious, you think there are other factors that play into a Yelp rating and will ultimately determine your business's success. With a dataset of different restaurant features and their Yelp ratings, you decide to use a Multiple Linear Regression model to investigate what factors most affect a restaurant's Yelp rating and predict the Yelp rating for your restaurant!In this project we'll be working with a real dataset provided by Yelp. We have provided six files, listed below with a brief description:* `yelp_business.json`: establishment data regarding location and attributes for all businesses in the dataset* `yelp_review.json`: Yelp review metadata by business* `yelp_user.json`: user profile metadata by business* `yelp_checkin.json`: online checkin metadata by business* `yelp_tip.json`: tip metadata by business* `yelp_photo.json`: photo metadata by businessFor a more detailed explanation of the features in each `.json` file, see the accompanying [explanatory feature document](https://docs.google.com/document/d/1V6FjJpKspVBOOBs4E7fBfp_yzHn0--XJkC2uUtWuRgM/edit).Let's get started by exploring the data in each of these files to see what we are working with. Load the Data and Take a PeekTo get a better understanding of the dataset we can use Pandas to explore the data in DataFrame form. In the code block below we have imported Pandas for you. The `read_json()` method reads data from a json file into a DataFrame, as shown below:```pythondf = pd.read_json('file_name.json', lines=True)```Load the data from each of the json files with the following naming conventions:* `yelp_business.json` into a DataFrame named `businesses`* `yelp_review.json` into a DataFrame named `reviews`* `yelp_user.json` into a DataFrame named `users`* `yelp_checkin.json` into a DataFrame named `checkins`* `yelp_tip.json` into a DataFrame named `tips`* `yelp_photo.json` into a DataFrame named `photos`Importing that data could take 10 to 20 seconds to run depending on your computer, but don't worry, once it's loaded in you're ready to go!
###Code
import pandas as pd
businesses = pd.read_json('yelp_business.json',lines=True)
reviews = pd.read_json('yelp_review.json',lines=True)
users = pd.read_json('yelp_user.json',lines=True)
checkins = pd.read_json('yelp_checkin.json',lines=True)
tips = pd.read_json('yelp_tip.json',lines=True)
photos = pd.read_json('yelp_photo.json',lines=True)
###Output
_____no_output_____
###Markdown
In order to more clearly see the information in our DataFrame, we can adjust the number of columns shown (`max_columns`) and the number of characters shown in a column (`max_colwidth`) with the below code:```pythonpd.options.display.max_columns = number_of_columns_to_displaypd.options.display.max_colwidth = number_of_characters_to_display```Set `max_columns` to `60` and `max_colwidth` to `500`. We are working with some BIG data here!
###Code
pd.options.display.max_columns = 60
pd.options.display.max_colwidth = 500
###Output
_____no_output_____
###Markdown
Inspect the first five rows of each DataFrame using the `.head()` method to get an overview of the data (make sure to check each DataFrame in a separate cell in order to view it properly).
###Code
businesses.head()
reviews.head()
users.head()
checkins.head()
tips.head()
photos.head()
###Output
_____no_output_____
###Markdown
How many different businesses are in the dataset? What are the different features in the review DataFrame?
###Code
print(len(businesses))
print(reviews.columns)
###Output
188593
Index(['business_id', 'average_review_age', 'average_review_length',
'average_review_sentiment', 'number_funny_votes', 'number_cool_votes',
'number_useful_votes'],
dtype='object')
###Markdown
What is the range of values for the features in the user DataFrame?
###Code
users.describe()
###Output
_____no_output_____
###Markdown
What is the Yelp rating, or `stars`, of the establishment with `business_id` = `5EvUIR4IzCWUOm0PsUZXjA`. Use Pandas boolean indexing to find the Yelp rating, using the syntax below:```pythondf[df['column_we_know'] == 'value_we_know']['column_we_want']```
###Code
businesses[businesses['business_id'] == '5EvUIR4IzCWUOm0PsUZXjA']['stars']
###Output
_____no_output_____
###Markdown
What feature, or column, do the DataFrames have in common? Merge the DataSince we are working with data from several files, we need to combine the data into a single DataFrame that allows us to analyze the different features with respect to our target variable, the Yelp rating. We can do this by merging the multiple DataFrames we have together, joining them on the columns they have in common. In our case, this unique identifying column is the `business_id`. We can merge two DataFrames together with the following syntax:```pythonpd.merge(left, right, how='inner/outer/left/right', on='column(s)_to_merge_on')```* `left` is the DataFrame on the left side of our merge* `right` is the DataFrame on the right side of our merge* `how` describes the style of merge we want to complete (similar to inner/outer/left/right joins in SQL)* `on` is the column or columns to perform the merge on (the column connecting the two tables)Given our six DataFrames, we will need to perform 5 merges to combine all the data into one DataFrame. In the cell below we merged the business table and the review table into a new DataFrame, `df`, for you. After the merge we've added all the rows from `businesses` and `reviews` together, but kept the same total number of rows! Run the cell to perform the merge and confirm the number of rows in `df`.
###Code
df = pd.merge(businesses, reviews, how='left', on='business_id')
print(len(df))
###Output
188593
###Markdown
Merge each of the other 4 DataFrames into our new DataFrame `df` to combine all the data together. Make sure that `df` is the left DataFrame in each merge and `how=left` since not every DataFrame includes every business in the dataset (this way we won't lose any data during the merges). Once combined, print out the columns of `df`. What features are in this new DataFrame?
###Code
df = pd.merge(df, users, how='left', on='business_id')
df = pd.merge(df, checkins, how='left', on='business_id')
df = pd.merge(df, tips, how='left', on='business_id')
df = pd.merge(df, photos, how='left', on='business_id')
print(df.columns)
###Output
Index(['address', 'alcohol?', 'attributes', 'business_id', 'categories',
'city', 'good_for_kids', 'has_bike_parking', 'has_wifi', 'hours',
'is_open', 'latitude', 'longitude', 'name', 'neighborhood',
'postal_code', 'price_range', 'review_count', 'stars', 'state',
'take_reservations', 'takes_credit_cards', 'average_review_age',
'average_review_length', 'average_review_sentiment',
'number_funny_votes', 'number_cool_votes', 'number_useful_votes',
'average_number_friends', 'average_days_on_yelp', 'average_number_fans',
'average_review_count', 'average_number_years_elite', 'time',
'weekday_checkins', 'weekend_checkins', 'average_tip_length',
'number_tips', 'average_caption_length', 'number_pics'],
dtype='object')
###Markdown
Clean the DataWe are getting really close to the fun analysis part! We just have to clean our data a bit so we can focus on the features that might have predictive power for determining an establishment's Yelp rating.In a Linear Regression model, our features will ideally be continuous variables that have an affect on our dependent variable, the Yelp rating. For this project with will also be working with some features that are binary, on the scale [0,1]. With this information, we can remove any columns in the dataset that are not continuous or binary, and that we do not want to make predictions on. The cell below contains a list of these unnecessary features. Drop them from `df` with Pandas' drop syntax, provided below:```pythondf.drop(list_of_features_to_remove, axis=1, inplace=True)```* `list_of_features_to_remove` is, you guessed it, the list of features we want to remove!* `axis=1` lets Pandas know we want to drop columns, not rows, from our DataFrame (axis=0 is used for computations along rows!) * `inplace=True` lets us drop the columns right here in our DataFrame, instead of returning a new DataFrame that we could store in a new variable
###Code
features_to_remove = ['address','attributes','business_id','categories','city','hours','is_open','latitude','longitude','name','neighborhood','postal_code','state','time']
df.drop(labels=features_to_remove, axis=1, inplace=True)
###Output
_____no_output_____
###Markdown
Now we just have to check our data to make sure we don't have any missing values, or `NaN`s, which will prevent the Linear Regression model from running correctly. To do this we can use the statement `df.isna().any()`. This will check all of our columns and return `True` if there are any missing values or `NaN`s, or `False` if there are no missing values. Check if `df` is missing any values.
###Code
df.isna().any()
###Output
_____no_output_____
###Markdown
As you can see, there are a few columns with missing values. Since our dataset has no information recorded for some businesses in these columns, we will assume the Yelp pages did not display these features. For example, if there is a `NaN` value for `number_pics`, it means that the associated business did not have any pictures posted on its Yelp page. Thus we can replace all of our `NaN`s with `0`s. To do this we can use the `.fillna()` method, which takes a dictionary as shown below:```pythondf.fillna({'column_1':val_to_replace_na, 'column_2':val_to_replace_na, 'column_3':val_to_replace_na}, inplace=True)```* `column_1`, `column_2`, and `column_3` are the columns with missing values that we want to fill. We can include as many columns as we like in the dictionary that is passed to `.fill_na()`* `val_to_replace_na` is the value that will replace the missing values, or `NaN`s* `inplace=True` since we want to perform our changes in place and not return a new DataFrameFill the missing values in `df` with `0`. Afterwards, confirm the missing values have been filled with `df.isna().any()`.
###Code
df.fillna({'weekday_checkins':0,
'weekend_checkins':0,
'average_tip_length':0,
'number_tips':0,
'average_caption_length':0,
'number_pics':0},
inplace=True)
df.isna().any()
###Output
_____no_output_____
###Markdown
Exploratory AnalysisNow that our data is all together, let's investigate some of the different features to see what might correlate most with our dependent variable, the Yelp rating (called `stars` in our DataFrame). The features with the best correlations could prove to be the most helpful for our Linear Regression model! Pandas DataFrames have a really helpful method, `.corr()`, that allows us to see the correlation coefficients for each pair of our different features. Remember, a correlation of `0` indicates that two features have no linear relationship, a correlation coefficient of `1` indicates two features have a perfect positive linear relationship, and a correlation coefficient of `-1` indicates two features have a perfect negative linear relationship. Call `.corr()` on `df`. You'll see that `number_funny_votes` has a correlation coefficient of `0.001320` with respect to `stars`, our Yelp rating. This is a very weak correlation. What features best correlate, both positively and negatively, with Yelp rating?
###Code
df.corr()
###Output
_____no_output_____
###Markdown
To further visualize these relationships, we can plot certain features against our dependent variable, the Yelp rating. In the cell below we have provided the code to import Matplotlib. We can use Matplotlib's `.scatter()` method with the below syntax to plot what these correlations look like:```pythonplt.scatter(x_values_to_plot, y_values_to_plot, alpha=blending_val)```* `x_values_to_plot` are the values to be plotted along the x-axis* `y_values_to_plot` are the values to be plotted along the y-axis* `alpha=blending_val` is the blending value, or how transparent (0) or opaque (1) a plotted point is. This will help us distinguish areas of the plot with high point densities and low point densitiesPlot the three features that correlate most with Yelp rating (`average_review_sentiment`, `average_review_length`, `average_review_age`) against `stars`, our Yelp rating. Then plot a lowly correlating feature, such as `number_funny_votes`, against `stars`.>What is `average_review_sentiment`, you ask? `average_review_sentiment` is the average sentiment score for all reviews on a business' Yelp page. The sentiment score for a review was calculated using the sentiment analysis tool [VADER](https://github.com/cjhutto/vaderSentiment). VADER uses a labeled set of positive and negative words, along with codified rules of grammar, to estimate how positive or negative a statement is. Scores range from `-1`, most negative, to `+1`, most positive, with a score of `0` indicating a neutral statement. While not perfect, VADER does a good job at guessing the sentiment of text data!What kind of relationships do you see from the plots? Do you think these variables are good or bad features for our Yelp rating prediction model?
###Code
from matplotlib import pyplot as plt
# plot average_review_sentiment against stars here
plt.scatter(df['average_review_sentiment'],df['stars'],alpha=0.1)
plt.xlabel('average_review_sentiment')
plt.ylabel('Yelp Rating')
plt.show()
# plot average_review_length against stars here
plt.scatter(df['average_review_length'],df['stars'],alpha=0.1)
plt.xlabel('average_review_length')
plt.ylabel('Yelp Rating')
plt.show()
# plot average_review_age against stars here
plt.scatter(df['average_review_age'],df['stars'],alpha=0.1)
plt.xlabel('average_review_age')
plt.ylabel('Yelp Rating')
plt.show()
# plot number_funny_votes against stars here
plt.scatter(df['number_funny_votes'],df['stars'],alpha=0.1)
plt.xlabel('number_funny_votes')
plt.ylabel('Yelp Rating')
plt.show()
###Output
_____no_output_____
###Markdown
Why do you think `average_review_sentiment` correlates so well with Yelp rating? Data SelectionIn order to put our data into a Linear Regression model, we need to separate out our features to model on and the Yelp ratings. From our correlation analysis we saw that the three features with the strongest correlations to Yelp rating are `average_review_sentiment`, `average_review_length`, and `average_review_age`. Since we want to dig a little deeper than `average_review_sentiment`, which understandably has a very high correlation with Yelp rating, let's choose to create our first model with `average_review_length` and `average_review_age` as features.Pandas lets us select one column of a DataFrame with the following syntax:```pythonsubset_of_data = df['feature_to_select']```Pandas also lets us select multiple columns from a DataFrame with this syntax:```pythonsubset_of_data = df[list_of_features_to_select]```Create a new DataFrame `features` that contains the columns we want to model on: `average_review_length` and `average_review_age`. Then create another DataFrame `ratings` that stores the value we want to predict, Yelp rating, or `stars` in `df`.
###Code
features = df[['average_review_length','average_review_age']]
ratings = df['stars']
###Output
_____no_output_____
###Markdown
Split the Data into Training and Testing SetsWe are just about ready to model! But first, we need to break our data into a training set and a test set so we can evaluate how well our model performs. We'll use scikit-learn's `train_test_split` function to do this split, which is provided in the cell below. This function takes two required parameters: the data, or our features, followed by our dependent variable, in our case the Yelp rating. Set the optional parameter `test_size` to be `0.2`. Finally, set the optional parameter `random_state` to `1`. This will make it so your data is split in the same way as the data in our solution code. Remember, this function returns 4 items in this order:1. The training data (features), which we can assign to `X_train`2. The testing data (features), which we can assign to `X_test`3. The training dependent variable (Yelp rating), which we can assign to `y_train`4. The testing dependent variable (Yelp rating), which we can assign to `y_test`
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, ratings, test_size = 0.2, random_state = 1)
###Output
_____no_output_____
###Markdown
Create and Train the ModelNow that our data is split into training and testing sets, we can finally model! In the cell below we have provided the code to import `LinearRegression` from scikit-learn's `linear_model` module. Create a new `LinearRegression` object named model. The `.fit()` method will fit our Linear Regression model to our training data and calculate the coefficients for our features. Call the `.fit()` method on `model` with `X_train` and `y_train` as parameters. Just like that our model has now been trained on our training data!
###Code
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Evaluate and Understand the ModelNow we can evaluate our model in a variety of ways. The first way will be by using the `.score()` method, which provides the R^2 value for our model. Remember, R^2 is the coefficient of determination, or a measure of how much of the variance in our dependent variable, the predicted Yelp rating, is explained by our independent variables, our feature data. R^2 values range from `0` to `1`, with `0` indicating that the created model does not fit our data at all, and with `1` indicating the model perfectly fits our feature data. Call `.score()` on our model with `X_train` and `y_train` as parameters to calculate our training R^2 score. Then call `.score()` again on model with `X_test` and `y_test` as parameters to calculate R^2 for our testing data. What do these R^2 values say about our model? Do you think these features alone are able to effectively predict Yelp ratings?
###Code
model.score(X_train,y_train)
model.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
After all that hard work, we can finally take a look at the coefficients on our different features! The model has an attribute `.coef_` which is an array of the feature coefficients determined by fitting our model to the training data. To make it easier for you to see which feature corresponds to which coefficient, we have provided some code in the cell that `zip`s together a list of our features with the coefficients and sorts them in descending order from most predictive to least predictive.
###Code
sorted(list(zip(['average_review_length','average_review_age'],model.coef_)),key = lambda x: abs(x[1]),reverse=True)
###Output
_____no_output_____
###Markdown
Lastly we can calculate the predicted Yelp ratings for our testing data and compare them to their actual Yelp ratings! Our model has a `.predict()` method which uses the model's coefficients to calculate the predicted Yelp rating. Call `.predict()` on `X_test` and assign the values to `y_predicted`. Use Matplotlib to plot `y_test` vs `y_predicted`. For a perfect linear regression model we would expect to see the data plotted along the line `y = x`, indicating homoscedasticity. Is this the case? If not, why not? Would you call this model heteroscedastic or homoscedastic?
###Code
y_predicted = model.predict(X_test)
plt.scatter(y_test,y_predicted)
plt.xlabel('Yelp Rating')
plt.ylabel('Predicted Yelp Rating')
plt.ylim(1,5)
plt.show()
###Output
_____no_output_____
###Markdown
Define Different Subsets of DataAfter evaluating the first model, you can see that `average_review_length` and `average_review_age` alone are not the best predictors for Yelp rating. Let's go do some more modeling with different subsets of features and see if we can achieve a more accurate model! In the cells below we have provided different lists of subsets of features that we will model with and evaluate. What other subsets of features would you want to test? Why do you think those feature sets are more predictive of Yelp rating than others? Create at least one more subset of features that you want to predict Yelp ratings from.
###Code
# subset of only average review sentiment
sentiment = ['average_review_sentiment']
# subset of all features that have a response range [0,1]
binary_features = ['alcohol?','has_bike_parking','takes_credit_cards','good_for_kids','take_reservations','has_wifi']
# subset of all features that vary on a greater range than [0,1]
numeric_features = ['review_count','price_range','average_caption_length','number_pics','average_review_age','average_review_length','average_review_sentiment','number_funny_votes','number_cool_votes','number_useful_votes','average_tip_length','number_tips','average_number_friends','average_days_on_yelp','average_number_fans','average_review_count','average_number_years_elite','weekday_checkins','weekend_checkins']
# all features
all_features = binary_features + numeric_features
# add your own feature subset here
feature_subset =
###Output
_____no_output_____
###Markdown
Further Modeling Now that we have lists of different feature subsets, we can create new models from them. In order to more easily compare the performance of these new models, we have created a function for you below called `model_these_features()`. This function replicates the model building process you just completed with our first model! Take some time to review how the function works, analyzing it line by line. Fill in the empty comments with an explanation of the task the code beneath it is performing.
###Code
import numpy as np
# take a list of features to model as a parameter
def model_these_features(feature_list):
# define ratings and features, with the features limited to our chosen subset of data
ratings = df.loc[:,'stars']
features = df.loc[:,feature_list]
# perform train, test, split on the data
X_train, X_test, y_train, y_test = train_test_split(features, ratings, test_size = 0.2, random_state = 1)
# don't worry too much about these lines, just know that they allow the model to work when
# we model on just one feature instead of multiple features. Trust us on this one :)
if len(X_train.shape) < 2:
X_train = np.array(X_train).reshape(-1,1)
X_test = np.array(X_test).reshape(-1,1)
# create and fit the model to the training data
model = LinearRegression()
model.fit(X_train,y_train)
# print the train and test scores
print('Train Score:', model.score(X_train,y_train))
print('Test Score:', model.score(X_test,y_test))
# print the model features and their corresponding coefficients, from most predictive to least predictive
print(sorted(list(zip(feature_list,model.coef_)),key = lambda x: abs(x[1]),reverse=True))
# calculate the predicted Yelp ratings from the test data
y_predicted = model.predict(X_test)
# plot the actual Yelp Ratings vs the predicted Yelp ratings for the test data
plt.scatter(y_test,y_predicted)
plt.xlabel('Yelp Rating')
plt.ylabel('Predicted Yelp Rating')
plt.ylim(1,5)
plt.show()
###Output
_____no_output_____
###Markdown
Once you feel comfortable with the steps of the function, run models on the following subsets of data using `model_these_features()`:* `sentiment`: only `average_review_sentiment`* `binary_features`: all features that have a response range [0,1]* `numeric_features`: all features that vary on a greater range than [0,1]* `all_features`: all features* `feature_subset`: your own feature subsetHow does changing the feature sets affect the model's R^2 value? Which features are most important to predicting Yelp rating in the different models? Which models appear more or less homoscedastic?
###Code
# create a model on sentiment here
model_these_features(sentiment)
# create a model on all binary features here
model_these_features(binary_features)
# create a model on all numeric features here
model_these_features(numeric_features)
# create a model on all features here
model_these_features(all_features)
# create a model on your feature subset here
model_these_features(feature_subset)
###Output
_____no_output_____
###Markdown
Danielle's Delicious Delicacies' DebutYou've loaded the data, cleaned it, modeled it, and evaluated it. You're tired, but glowing with pride after all the hard work. You close your eyes and can clearly see opening day of Danielle's Delicious Delicacies with a line out the door. But what will your Yelp rating be? Let's use our model to make a prediction.Our best model was the model using all features, so we'll work with this model again. In the cell below print `all_features` to get a reminder of what features we are working with.
###Code
print(all_features)
###Output
['alcohol?', 'has_bike_parking', 'takes_credit_cards', 'good_for_kids', 'take_reservations', 'has_wifi', 'review_count', 'price_range', 'average_caption_length', 'number_pics', 'average_review_age', 'average_review_length', 'average_review_sentiment', 'number_funny_votes', 'number_cool_votes', 'number_useful_votes', 'average_tip_length', 'number_tips', 'average_number_friends', 'average_days_on_yelp', 'average_number_fans', 'average_review_count', 'average_number_years_elite', 'weekday_checkins', 'weekend_checkins']
###Markdown
Run the cell below to grab all the features and retrain our model on them.
###Code
features = df.loc[:,all_features]
ratings = df.loc[:,'stars']
X_train, X_test, y_train, y_test = train_test_split(features, ratings, test_size = 0.2, random_state = 1)
model = LinearRegression()
model.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
To give you some perspective on the restaurants already out there, we have provided the mean, minimum, and maximum values for each feature below. Will Danielle's Delicious Delicacies be just another average restaurant, or will it be a 5 star behemoth amongst the masses?
###Code
pd.DataFrame(list(zip(features.columns,features.describe().loc['mean'],features.describe().loc['min'],features.describe().loc['max'])),columns=['Feature','Mean','Min','Max'])
###Output
_____no_output_____
###Markdown
Based on your plans for the restaurant, how you expect your customers to post on your Yelp page, and the values above, fill in the blanks in the NumPy array below with your desired values. The first blank corresponds with the feature at `index=0` in the DataFrame above, `alcohol?`, and the last blank corresponds to the feature at ``index=24``, `weekend_checkins`. Make sure to enter either `0` or `1` for all binary features, and if you aren't sure of what value to put for a feature, select the mean from the DataFrame above. After you enter the values, run the prediction cell below to receive your Yelp rating! How is Danielle's Delicious Delicacies debut going to be?
###Code
# these are example values. You can use any set of values for your restaurant!
danielles_delicious_delicacies = np.array([0,1,1,1,1,1,10,2,3,10,10,1200,0.9,3,6,5,50,3,50,1800,12,123,0.5,0,0]).reshape(1,-1)
model.predict(danielles_delicious_delicacies)
###Output
_____no_output_____ |
C3 Machine Learning I/LABS_PROJECT/Tech_Fun_C3_L2_Practice_with_Supervised_Learners.ipynb | ###Markdown
Technology Fundamentals Course 3, Lab 2: Practice with Supervised Learners**Instructor**: Wesley Beckner**Contact**: [email protected]**Teaching Assitants**: Varsha Bang, Harsha Vardhan**Contact**: [email protected], [email protected] this lab we will continue to practice creation of pipelines, feature engineering, and applying learning algorithms.Now that we have covered supervised learning methods, and we've covered Grid Search, we will use these tools to do a sophisticated, search of hyperparameter optimization.--- L3 Q1: Create train and test datasets for wine qualityCreate new train/test datasets that are normalized (but have the same indices as the original train/test sets for comparison)
###Code
# Code Cell for L1 Q1
###Output
_____no_output_____
###Markdown
L3 Q2:Evaluate the performance of a Random Forest on classifying wine quality
###Code
# Code Cell for L1 Q2
###Output
_____no_output_____
###Markdown
L3 Q3:Do a grid search to optimize your Random Forest model, use whatever hyperparameters you would like
###Code
# Code Cell for L1 Q3
###Output
_____no_output_____ |
Par3/Par3.ipynb | ###Markdown
3.2.2 准备数据
###Code
# 1. 导入必要的模块
import numpy as np
import torch
# 导入 PyTorch 内置的 mnist 数据
from torchvision.datasets import mnist
# 导入预处理模块
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# 导入 nn 及优化器
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
from tensorboardX import SummaryWriter
# 2. 定义一些超参数
# 定义一些超参数
train_batch_size = 64
test_batch_size = 128
learning_rate = 0.01
num_epoches = 20
lr = 0.01
momentum = 0.5
# 3. 下载数据并对数据进行预处理
# 定义预处理函数,这些预处理一次放在 Compose 函数中
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
# 下载数据,并对数据进行预处理
train_dataset = mnist.MNIST('./data', train=True, transform=transform, download=False)
test_dataset = mnist.MNIST('./data', train=False, transform=transform)
test_dataset
type(train_dataset)
train_dataset
# dataloader 是一个可迭代对象,可以使用迭代器一样使用
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False)
train_loader
###Output
_____no_output_____
###Markdown
3.2.3 可视化源数据
###Code
import matplotlib.pyplot as plt
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
# plt.show()
###Output
_____no_output_____
###Markdown
3.2.4 构建模型
###Code
# 1) 构建网络
class Net(nn.Module):
'''
使用 sequential 构建网络,Sequential() 函数的功能是将网络的层组合到一起
'''
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1))
self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2))
self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
lr = 0.01
momentum = 0.9
# 2) 实例化网络
# 检查是否有可用的 GPU,有则使用,否则使用 CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs")
# # dim = 0[20, xxx] --> [10, ...], [10, ...] on 2GPUs
# model = nn.DataParallel(model)
# 实例化网络
model = Net(28 * 28, 300, 100, 10)
model.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
###Output
_____no_output_____
###Markdown
3.2.5 训练模型
###Code
# 1. 训练模型
losses = []
acces = []
eval_losses = []
eval_acces = []
writer = SummaryWriter(log_dir='logs4', comment='train-loss')
for epoch in range(num_epoches):
train_loss = 0
train_acc = 0
model.train()
# 动态修改参数学习率
if epoch%5 == 0:
optimizer.param_groups[0]['lr'] *= 0.1
for img, label in train_loader:
img = img.to(device)
label = label.to(device)
img = img.view(img.size(0), -1)
# 前向传播
out = model(img)
loss = criterion(out, label)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 记录误差
train_loss += loss.item()
# 计算分类的准确率
_, pred = out.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / img.shape[0]
train_acc += acc
losses.append(train_loss / len(train_loader))
acces.append(train_acc / len(train_loader))
# 在测试集上验证效果
eval_loss = 0
eval_acc = 0
# 将模型改为预测模式
model.eval()
for img, label in test_loader:
img = img.to(device)
label = label.to(device)
img = img.view(img.size(0), -1)
out = model(img)
loss = criterion(out, label)
# 记录误差
eval_loss += loss.item()
# 记录准确率
_, pred = out.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / img.shape[0]
eval_acc += acc
eval_losses.append(eval_loss / len(test_loader))
eval_acces.append(eval_acc / len(test_loader))
print('epoch: {}, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
.format(epoch, train_loss/len(train_loader), train_acc/len(train_loader),
eval_loss/len(test_loader),eval_acc/len(test_loader)))
# 2. 可视化训练及测试损失值
plt.title('trainloss')
plt.plot(np.arange(len(losses)), losses)
plt.legend(['Train Loss'], loc = 'upper right')
###Output
_____no_output_____
###Markdown
3.6 动态修改学习率参数
###Code
for epoch in range(num_epoches):
# 动态修改参数学习率
if epoch%5 == 0:
optimizer.param_groups[0]['lr'] *= 0.1
print(optimizer.param_groups[0]['lr'])
###Output
1.0000000000000005e-09
1.0000000000000006e-10
1.0000000000000006e-11
1.0000000000000006e-12
###Markdown
3.7 优化器比较
###Code
# 1) 导入需要的模块
import torch
import torch.utils.data as Data
import torch.nn.functional as F
import matplotlib.pyplot as plt
# 超参数
LR = 0.01
BATCH_SIZE = 32
EPOCH = 12
# 2) 生成数据
# 生成训练数据
# torch.unsqueeze() 的作用是将一维变为二维,torch 只能处理二维的数据
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
# 0.1 * torch.normal(x.size()) 增加噪点
y = x.pow(2) + 0.1 * torch.normal(torch.zeros(*x.size()))
torch_dataset = Data.TensorDataset(x,y)
# 得到一个代批量的生成器
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True)
# 3) 构建神经网络
class Net(torch.nn.Module):
# 初始化
def __init__(self):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(1, 20)
self.predict = torch.nn.Linear(20, 1)
# 前向传递
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
# 4) 使用多种优化器
net_SGD = Net()
net_Momentum = Net()
net_RMSProp = Net()
net_Adam = Net()
nets = [net_SGD, net_Momentum, net_RMSProp, net_Adam]
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.9)
opt_RMSProp = torch.optim.RMSprop(net_RMSProp.parameters(), lr=LR, alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSProp, opt_Adam]
# 5) 训练模型
loss_func = torch.nn.MSELoss()
loss_his = [[], [], [], []] # 记录损失
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader):
for net, opt, l_his in zip(nets, optimizers, loss_his):
output = net(batch_x) # Get output for every net
loss = loss_func(output, batch_y) # Compute loss for every net
opt.zero_grad() # Clear gradients for next train
loss.backward() # Backpropagation, compute gradients
opt.step() # Apply gradients
l_his.append(loss.data.numpy()) # Loss recoder
labels = ['SGD', 'Momentum', 'RMSProp', 'Adam']
# 6) 可视化结构
for i, l_his in enumerate(loss_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0, 0.2))
plt.show()
###Output
_____no_output_____ |
C3_W1_Lab_2_Transfer_Learning_CIFAR_10.ipynb | ###Markdown
Transfer LearningIn this notebook, you will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras. Imports
###Code
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
###Output
_____no_output_____
###Markdown
Parameters - Define the batch size- Define the class (category) names
###Code
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
###Output
_____no_output_____
###Markdown
Define some functions that will help you to create some visualizations. (These will be used later)
###Code
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
###Output
_____no_output_____
###Markdown
Loading and Preprocessing Data[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
###Code
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
###Output
_____no_output_____
###Markdown
Visualize DatasetUse the `display_image` to view some of the images and their class labels.
###Code
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
###Output
_____no_output_____
###Markdown
Preprocess DatasetHere, you'll perform normalization on images in training and validation set. - You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
###Code
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
###Output
_____no_output_____
###Markdown
Define the NetworkYou will be performing transfer learning on **ResNet50** available in Keras.- You'll load pre-trained **imagenet weights** to the model.- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
###Code
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
###Output
_____no_output_____
###Markdown
Evaluate the ModelCalculate the loss and accuracy metrics using the model's `.evaluate` function.
###Code
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
###Output
_____no_output_____
###Markdown
Plot Loss and Accuracy CurvesPlot the loss (in blue) and validation loss (in green).
###Code
plot_metrics("loss", "Loss")
print(loss,accuracy)
print(model.metrics_names)
###Output
_____no_output_____
###Markdown
Plot the training accuracy (blue) as well as the validation accuracy (green).
###Code
plot_metrics("accuracy", "Accuracy")
###Output
_____no_output_____
###Markdown
Visualize predictionsYou can take a look at the predictions on the validation set.
###Code
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.")
###Output
_____no_output_____
###Markdown
Transfer LearningIn this notebook, you will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras. Imports
###Code
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
###Output
_____no_output_____
###Markdown
Parameters - Define the batch size- Define the class (category) names
###Code
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
###Output
_____no_output_____
###Markdown
Define some functions that will help you to create some visualizations. (These will be used later)
###Code
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
###Output
_____no_output_____
###Markdown
Loading and Preprocessing Data[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
###Code
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
###Output
_____no_output_____
###Markdown
Visualize DatasetUse the `display_image` to view some of the images and their class labels.
###Code
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
###Output
_____no_output_____
###Markdown
Preprocess DatasetHere, you'll perform normalization on images in training and validation set. - You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
###Code
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
###Output
_____no_output_____
###Markdown
Define the NetworkYou will be performing transfer learning on **ResNet50** available in Keras.- You'll load pre-trained **imagenet weights** to the model.- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
###Code
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
###Output
_____no_output_____
###Markdown
Evaluate the ModelCalculate the loss and accuracy metrics using the model's `.evaluate` function.
###Code
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
###Output
_____no_output_____
###Markdown
Plot Loss and Accuracy CurvesPlot the loss (in blue) and validation loss (in green).
###Code
plot_metrics("loss", "Loss")
###Output
_____no_output_____
###Markdown
Plot the training accuracy (blue) as well as the validation accuracy (green).
###Code
plot_metrics("accuracy", "Accuracy")
###Output
_____no_output_____
###Markdown
Visualize predictionsYou can take a look at the predictions on the validation set.
###Code
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.")
###Output
_____no_output_____
###Markdown
Transfer LearningIn this notebook, you will perform transfer learning to train CIFAR-10 dataset on ResNet50 model available in Keras. Imports
###Code
import os, re, time, json
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import numpy as np
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from matplotlib import pyplot as plt
import tensorflow_datasets as tfds
print("Tensorflow version " + tf.__version__)
###Output
_____no_output_____
###Markdown
Parameters - Define the batch size- Define the class (category) names
###Code
BATCH_SIZE = 32
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
###Output
_____no_output_____
###Markdown
Define some functions that will help you to create some visualizations. (These will be used later)
###Code
#@title Visualization Utilities[RUN ME]
#Matplotlib config
plt.rc('image', cmap='gray')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# utility to display a row of digits with their predictions
def display_images(digits, predictions, labels, title):
n = 10
indexes = np.random.choice(len(predictions), size=n)
n_digits = digits[indexes]
n_predictions = predictions[indexes]
n_predictions = n_predictions.reshape((n,))
n_labels = labels[indexes]
fig = plt.figure(figsize=(20, 4))
plt.title(title)
plt.yticks([])
plt.xticks([])
for i in range(10):
ax = fig.add_subplot(1, 10, i+1)
class_index = n_predictions[i]
plt.xlabel(classes[class_index])
plt.xticks([])
plt.yticks([])
plt.imshow(n_digits[i])
# utility to display training and validation curves
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
###Output
_____no_output_____
###Markdown
Loading and Preprocessing Data[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset has 32 x 32 RGB images belonging to 10 classes. You will load the dataset from Keras.
###Code
(training_images, training_labels) , (validation_images, validation_labels) = tf.keras.datasets.cifar10.load_data()
###Output
_____no_output_____
###Markdown
Visualize DatasetUse the `display_image` to view some of the images and their class labels.
###Code
display_images(training_images, training_labels, training_labels, "Training Data" )
display_images(validation_images, validation_labels, validation_labels, "Training Data" )
###Output
_____no_output_____
###Markdown
Preprocess DatasetHere, you'll perform normalization on images in training and validation set. - You'll use the function [preprocess_input](https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py) from the ResNet50 model in Keras.
###Code
def preprocess_image_input(input_images):
input_images = input_images.astype('float32')
output_ims = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_ims
train_X = preprocess_image_input(training_images)
valid_X = preprocess_image_input(validation_images)
###Output
_____no_output_____
###Markdown
Define the NetworkYou will be performing transfer learning on **ResNet50** available in Keras.- You'll load pre-trained **imagenet weights** to the model.- You'll choose to retain all layers of **ResNet50** along with the final classification layers.
###Code
'''
Feature Extraction is performed by ResNet50 pretrained on imagenet weights.
Input size is 224 x 224.
'''
def feature_extractor(inputs):
feature_extractor = tf.keras.applications.resnet.ResNet50(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')(inputs)
return feature_extractor
'''
Defines final dense layers and subsequent softmax layer for classification.
'''
def classifier(inputs):
x = tf.keras.layers.GlobalAveragePooling2D()(inputs)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(10, activation="softmax", name="classification")(x)
return x
'''
Since input image size is (32 x 32), first upsample the image by factor of (7x7) to transform it to (224 x 224)
Connect the feature extraction and "classifier" layers to build the model.
'''
def final_model(inputs):
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
resnet_feature_extractor = feature_extractor(resize)
classification_output = classifier(resnet_feature_extractor)
return classification_output
'''
Define the model and compile it.
Use Stochastic Gradient Descent as the optimizer.
Use Sparse Categorical CrossEntropy as the loss function.
'''
def define_compile_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
classification_output = final_model(inputs)
model = tf.keras.Model(inputs=inputs, outputs = classification_output)
model.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
model = define_compile_model()
model.summary()
###Output
_____no_output_____
###Markdown
Train the model
###Code
# this will take around 20 minutes to complete
EPOCHS = 4
history = model.fit(train_X, training_labels, epochs=EPOCHS, validation_data = (valid_X, validation_labels), batch_size=64)
###Output
_____no_output_____
###Markdown
Evaluate the ModelCalculate the loss and accuracy metrics using the model's `.evaluate` function.
###Code
loss, accuracy = model.evaluate(valid_X, validation_labels, batch_size=64)
###Output
_____no_output_____
###Markdown
Plot Loss and Accuracy CurvesPlot the loss (in blue) and validation loss (in green).
###Code
plot_metrics("loss", "Loss")
###Output
_____no_output_____
###Markdown
Plot the training accuracy (blue) as well as the validation accuracy (green).
###Code
plot_metrics("accuracy", "Accuracy")
###Output
_____no_output_____
###Markdown
Visualize predictionsYou can take a look at the predictions on the validation set.
###Code
probabilities = model.predict(valid_X, batch_size=64)
probabilities = np.argmax(probabilities, axis = 1)
display_images(validation_images, probabilities, validation_labels, "Bad predictions indicated in red.")
###Output
_____no_output_____ |
arrays_strings/fizz_buzz/fizz_buzz_solution.ipynb | ###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement Fizz Buzz.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* What is fizz buzz? * Return the string representation of numbers from 1 to n * Multiples of 3 -> 'Fizz' * Multiples of 5 -> 'Buzz' * Multiples of 3 and 5 -> 'FizzBuzz'* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Exception* 15 ->[ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz'] AlgorithmThere is no fancy algorithm to solve fizz buzz.* Iterate from 1 through n* Use the mod operator to determine if the current iteration is divisible by: * 3 and 5 -> 'FizzBuzz' * 3 -> 'Fizz' * 5 -> 'Buzz' * else -> string of current iteration* return the resultsComplexity:* Time: O(n)* Space: O(n) Code
###Code
class Solution(object):
def fizz_buzz(self, num):
if num is None:
raise TypeError('num cannot be None')
if num < 1:
raise ValueError('num cannot be less than one')
results = []
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
results.append('FizzBuzz')
elif i % 3 == 0:
results.append('Fizz')
elif i % 5 == 0:
results.append('Buzz')
else:
results.append(str(i))
return results
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_fizz_buzz.py
import unittest
class TestFizzBuzz(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
%run -i test_fizz_buzz.py
###Output
Success: test_fizz_buzz
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement Fizz Buzz.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* What is fizz buzz? * Return the string representation of numbers from 1 to n * Multiples of 3 -> 'Fizz' * Multiples of 5 -> 'Buzz' * Multiples of 3 and 5 -> 'FizzBuzz'* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Exception* 15 ->[ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz'] AlgorithmThere is no fancy algorithm to solve fizz buzz.* Iterate from 1 through n* Use the mod operator to determine if the current iteration is divisible by: * 3 and 5 -> 'FizzBuzz' * 3 -> 'Fizz' * 5 -> 'Buzz' * else -> string of current iteration* return the resultsComplexity:* Time: O(n)* Space: O(n) Code
###Code
class Solution(object):
def fizz_buzz(self, num):
if num is None:
raise TypeError('num cannot be None')
if num < 1:
raise ValueError('num cannot be less than one')
results = []
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
results.append('FizzBuzz')
elif i % 3 == 0:
results.append('Fizz')
elif i % 5 == 0:
results.append('Buzz')
else:
results.append(str(i))
return results
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_fizz_buzz.py
from nose.tools import assert_equal, assert_raises
class TestFizzBuzz(object):
def test_fizz_buzz(self):
solution = Solution()
assert_raises(TypeError, solution.fizz_buzz, None)
assert_raises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
assert_equal(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
%run -i test_fizz_buzz.py
###Output
Success: test_fizz_buzz
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement Fizz Buzz.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* What is fizz buzz? * Return the string representation of numbers from 1 to n * Multiples of 3 -> 'Fizz' * Multiples of 5 -> 'Buzz' * Multiples of 3 and 5 -> 'FizzBuzz'* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Exception* 15 ->[ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz'] AlgorithmThere is no fancy algorithm to solve fizz buzz.* Iterate from 1 through n* Use the mod operator to determine if the current iteration is divisible by: * 3 and 5 -> 'FizzBuzz' * 3 -> 'Fizz' * 5 -> 'Buzz' * else -> string of current iteration* return the resultsComplexity:* Time: O(n)* Space: O(n) Code
###Code
class Solution(object):
def fizz_buzz(self, num):
if num is None:
raise TypeError('num cannot be None')
if num < 1:
raise ValueError('num cannot be less than one')
results = []
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
results.append('FizzBuzz')
elif i % 3 == 0:
results.append('Fizz')
elif i % 5 == 0:
results.append('Buzz')
else:
results.append(str(i))
return results
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_fizz_buzz.py
import unittest
class TestFizzBuzz(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
%run -i test_fizz_buzz.py
###Output
Success: test_fizz_buzz
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement Fizz Buzz.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* What is fizz buzz? * Return the string representation of numbers from 1 to n * Multiples of 3 -> 'Fizz' * Multiples of 5 -> 'Buzz' * Multiples of 3 and 5 -> 'FizzBuzz'* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Exception* 15 ->[ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz'] AlgorithmThere is no fancy algorithm to solve fizz buzz.* Iterate from 1 through n* Use the mod operator to determine if the current iteration is divisible by: * 3 and 5 -> 'FizzBuzz' * 3 -> 'Fizz' * 5 -> 'Buzz' * else -> string of current iteration* return the resultsComplexity:* Time: O(n)* Space: O(n) Code
###Code
class Solution(object):
def fizz_buzz(self, num):
if num is None:
raise TypeError('num cannot be None')
if num < 1:
raise ValueError('num cannot be less than one')
results = []
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
results.append('FizzBuzz')
elif i % 3 == 0:
results.append('Fizz')
elif i % 5 == 0:
results.append('Buzz')
else:
results.append(str(i))
return results
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_fizz_buzz.py
import unittest
class TestFizzBuzz(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
%run -i test_fizz_buzz.py
###Output
Success: test_fizz_buzz
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Implement Fizz Buzz.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* What is fizz buzz? * Return the string representation of numbers from 1 to n * Multiples of 3 -> 'Fizz' * Multiples of 5 -> 'Buzz' * Multiples of 3 and 5 -> 'FizzBuzz'* Can we assume the inputs are valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Exception* 15 ->[ '1', '2', 'Fizz', '4', 'Buzz', 'Fizz', '7', '8', 'Fizz', 'Buzz', '11', 'Fizz', '13', '14', 'FizzBuzz'] AlgorithmThere is no fancy algorithm to solve fizz buzz.* Iterate from 1 through n* Use the mod operator to determine if the current iteration is divisible by: * 3 and 5 -> 'FizzBuzz' * 3 -> 'Fizz' * 5 -> 'Buzz' * else -> string of current iteration* return the resultsComplexity:* Time: O(n)* Space: O(n) Code
###Code
class Solution(object):
def fizz_buzz(self, num):
if num is None:
raise TypeError('num cannot be None')
if num < 1:
raise ValueError('num cannot be less than one')
results = []
for i in range(1, num + 1):
if i % 3 == 0 and i % 5 == 0:
results.append('FizzBuzz')
elif i % 3 == 0:
results.append('Fizz')
elif i % 5 == 0:
results.append('Buzz')
else:
results.append(str(i))
return results
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_fizz_buzz.py
import unittest
class TestFizzBuzz(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
%run -i test_fizz_buzz.py
###Output
Success: test_fizz_buzz
|
course_2/edited_material/Part_5_Advanced_Statistical_Methods_(Machine_Learning)/S38_L256/A Simple Example of Clustering - Solution.ipynb | ###Markdown
A Simple Example of Clustering You are given much more country data. Using the same methodology as the one in the lecture, group all the countries in 2 clusters. Try with other numbers of clusters and see if they match your expectations. Maybe 7 is going to be a cool one!Plot the data using the c parameter to separate the data by the clusters we defined. Note: c stands for color Import the relevant libraries
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
###Output
_____no_output_____
###Markdown
Load the data Load data from the csv file: 'Countries_exercise.csv'.
###Code
# Load the data
raw_data = pd.read_csv('Countries-exercise.csv')
# Check the data
raw_data
###Output
_____no_output_____
###Markdown
Remove the duplicate index column from the dataset.
###Code
data = raw_data.copy()
###Output
_____no_output_____
###Markdown
Plot the data Plot the 'Longtitude' and 'Latitude' columns.
###Code
plt.scatter(data['Longitude'], data['Latitude'])
plt.xlim(-180,180)
plt.ylim(-90, 90)
plt.show()
###Output
_____no_output_____
###Markdown
Select the features Create a copy of that data and remove all parameters apart from Longitude and Latitude.
###Code
x = data.iloc[:,1:3]
x
###Output
_____no_output_____
###Markdown
Clustering Here's the actual solution: Simply change kmeans = KMeans(2) to kmeans = KMeans(3) . Then run the remaining kernels until the end.
###Code
kmeans = KMeans(7)
kmeans.fit(x)
###Output
_____no_output_____
###Markdown
Clustering Resutls
###Code
identified_clusters = kmeans.fit_predict(x)
identified_clusters
data_with_clusters = data.copy()
data_with_clusters['Cluster'] = identified_clusters
data_with_clusters
plt.scatter(data['Longitude'], data['Latitude'],c=data_with_clusters['Cluster'], cmap = 'rainbow')
plt.xlim(-180,180)
plt.ylim(-90, 90)
plt.show()
###Output
_____no_output_____ |
metaspace/python-client/docs/source/content/examples/submit-dataset.ipynb | ###Markdown
Submit dataset example Please note that to submit a dataset you will need AWS S3 Metaspace bucket credentials. If you don't have any, please [contact us over email](mailto:[email protected]). Log in to METASPACETo authenticate with METASPACE, generate an API key from your [account page](https://metaspace2020.eu/user/me) and enter it below
###Code
import json, pprint, getpass
from metaspace import SMInstance
sm = SMInstance()
sm
# This will prompt you to enter your API key.
# Note that API keys should be kept secret like passwords.
# You can alternatively save your API key in a config file - see config.template for more details.
if not sm.logged_in():
# Using getpass here prevents the API key from being accidentally saved with this notebook.
api_key = getpass.getpass(prompt='API key: ', stream=None)
sm.login(api_key=api_key)
###Output
_____no_output_____
###Markdown
Provide local paths to your imzML and Ibd files
###Code
imzml_fn = 'your_path_toimzMLFile/Name.imzML'
ibd_fn = 'your_path_toIbdFile/Name.ibd'
###Output
_____no_output_____
###Markdown
Provide metadata for your dataset
###Code
dataset_name = "Sample Name"
metadata = {'Data_Type': 'Imaging MS', #Shouldn't be changed
'MS_Analysis': {'Analyzer': 'E.g. FTICR, Orbitrap',
'Detector_Resolving_Power': {'Resolving_Power': 130000,
'mz': 400},
'Ionisation_Source': 'E.g. MALDI, DESI',
'Polarity': 'Ion polarity mode[Positive/Negative]',
'Pixel_Size': {
'Xaxis': 20,
'Yaxis': 40}
},
'Sample_Information': {
'Organism': 'Species',
'Organism_Part': 'Organ or organism part',
'Sample_Growth_Conditions': 'E.g. intervention, treatment', #This is an extra field
'Condition': 'E.g. wildtype, diseased'}, #This is an extra field
'Sample_Preparation': {'MALDI_Matrix': '2,5-dihydroxybenzoic acid (DHB)',
'MALDI_Matrix_Application': 'ImagePrep',
'Sample_Stabilisation': 'Preservation method',
'Solvent': 'none',
'Tissue_Modification': 'E.g. chemical modification'}, #This is an extra field
'Submitted_By': {'Institution': 'University X',
'Principal_Investigator': {'Email': '[email protected]',
'First_Name': 'PI_Name',
'Surname': 'PI_Surname'},
'Submitter': {'Email': 'your Email', #Email which you used to register on MS
'First_Name': 'Name',
'Surname': 'Surname'}}}
###Output
_____no_output_____
###Markdown
Specify list of databases against which you want to analyze your dataset
###Code
# Available databases:
# Please notice that if you choose more than 3 databases the processing may take a while
# BraChemDB-2018-01
# ChEBI-2018-01
# HMDB-v4
# HMDB-v4-cotton
# HMDB-v4-dev
# HMDB-v4-endogenous
# HMDB-v2.5
# LipidMaps-2017-12-12
# PAMDB-v1.0
# SwissLipids-2018-02-02
databases = ['HMDB-v4', 'ChEBI-2018-01']
###Output
_____no_output_____
###Markdown
Dataset visibility(Private/Public) We are currently allow users to choose if they want their datasets and annotations to be available publicly or not. Set the value below to False if you want it to be private
###Code
is_public = True # or False
###Output
_____no_output_____
###Markdown
Upload and submit the dataset
###Code
Make sure you put your AWS user credentials into the credentials file according to the `boto3` [documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#shared-credentials-file)
if metadata['MS_Analysis']['Polarity'] == 'Positive':
adducts = ['+H', '+Na', '+K']
else:
adducts = ['-H', '+Cl']
sm.submit_dataset_v2(
imzml_fn, ibd_fn, dataset_name, json.dumps(metadata), bucket,
is_public=is_public, moldbs=databases, adducts=adducts
)
###Output
_____no_output_____
###Markdown
Submit dataset example Log in to METASPACETo authenticate with METASPACE, generate an API key from your[account page](https://metaspace2020.eu/user/me) and enter it below
###Code
import json
from metaspace import SMInstance
sm = SMInstance()
# This will prompt you to enter your API key if needed and it will save it to a config file.
# Note that API keys should be kept secret like passwords.
sm.save_login()
###Output
_____no_output_____
###Markdown
Provide local paths to your imzML and Ibd files
###Code
imzml_fn = 'your_path_to_imzMLFile/Name.imzML'
ibd_fn = 'your_path_to_IbdFile/Name.ibd'
###Output
_____no_output_____
###Markdown
Provide metadata for your dataset
###Code
dataset_name = 'Sample Name'
metadata = {
'Data_Type': 'Imaging MS', # shouldn't be changed
'Sample_Information': {
'Organism': 'Species',
'Organism_Part': 'Organ or organism part',
'Condition': 'E.g. wildtype, diseased',
'Sample_Growth_Conditions': 'E.g. intervention, treatment' # this is an extra field
},
'Sample_Preparation': {
'Sample_Stabilisation': 'Preservation method',
'Tissue_Modification': 'E.g. chemical modification',
'MALDI_Matrix': '2,5-dihydroxybenzoic acid (DHB)',
'MALDI_Matrix_Application': 'ImagePrep',
'Solvent': 'none' # this is an extra field
},
'MS_Analysis': {
'Polarity': 'Ion polarity mode[Positive/Negative]',
'Ionisation_Source': 'E.g. MALDI, DESI',
'Analyzer': 'E.g. FTICR, Orbitrap',
'Detector_Resolving_Power': {
'mz': 400,
'Resolving_Power': 130000
},
'Pixel_Size': {
'Xaxis': 20,
'Yaxis': 40
}
}
}
###Output
_____no_output_____
###Markdown
Specify list of databases against which you want to analyze your dataset
###Code
# Get list of available databases:
print(sm.databases())
###Output
_____no_output_____
###Markdown
Public databases:```[, , , , , , , , , ]```
###Code
# Databases may be specified either as an integer ID, or in ('name', 'version') form
# Please notice that if you choose more than 3 databases the processing may take a while
databases = [
22, # ID 22 corresponds to HMDB v4
('ChEBI', '2018-01'), # ('name', 'version') style is also accepted
]
###Output
_____no_output_____
###Markdown
Dataset visibility (Public/Private) We are currently allow users to choose if they want their datasets and annotations to be available publicly or not. Set the value below to `True` if you want dataset to be public.If `False`, dataset will only be visible to yourself, other members of your Group,METASPACE administrators, and members of any Projects you add it to.
###Code
is_public = True # or False
###Output
_____no_output_____
###Markdown
Submit the dataset
###Code
sm.submit_dataset(
imzml_fn, ibd_fn, dataset_name,
metadata, is_public, databases
)
###Output
_____no_output_____
###Markdown
Submit dataset example Log in to METASPACETo authenticate with METASPACE, generate an API key from your[account page](https://metaspace2020.eu/user/me) and enter it below
###Code
import json, getpass
from metaspace import SMInstance
sm = SMInstance()
# This will prompt you to enter your API key.
# Note that API keys should be kept secret like passwords.
# You can alternatively save your API key in a config file - see config.template for more details.
if not sm.logged_in():
# Using getpass here prevents the API key from being accidentally saved with this notebook.
api_key = getpass.getpass(prompt='API key: ', stream=None)
sm.login(api_key=api_key)
###Output
_____no_output_____
###Markdown
Provide local paths to your imzML and Ibd files
###Code
imzml_fn = 'your_path_to_imzMLFile/Name.imzML'
ibd_fn = 'your_path_to_IbdFile/Name.ibd'
###Output
_____no_output_____
###Markdown
Provide metadata for your dataset
###Code
dataset_name = 'Sample Name'
metadata = {
'Data_Type': 'Imaging MS', # shouldn't be changed
'Sample_Information': {
'Organism': 'Species',
'Organism_Part': 'Organ or organism part',
'Condition': 'E.g. wildtype, diseased',
'Sample_Growth_Conditions': 'E.g. intervention, treatment' # this is an extra field
},
'Sample_Preparation': {
'Sample_Stabilisation': 'Preservation method',
'Tissue_Modification': 'E.g. chemical modification',
'MALDI_Matrix': '2,5-dihydroxybenzoic acid (DHB)',
'MALDI_Matrix_Application': 'ImagePrep',
'Solvent': 'none' # this is an extra field
},
'MS_Analysis': {
'Polarity': 'Ion polarity mode[Positive/Negative]',
'Ionisation_Source': 'E.g. MALDI, DESI',
'Analyzer': 'E.g. FTICR, Orbitrap',
'Detector_Resolving_Power': {
'mz': 400,
'Resolving_Power': 130000
},
'Pixel_Size': {
'Xaxis': 20,
'Yaxis': 40
}
}
}
###Output
_____no_output_____
###Markdown
Specify list of databases against which you want to analyze your dataset
###Code
# Get list of available databases:
print(sm.databases())
###Output
_____no_output_____
###Markdown
Public databases:```[, , , , , , , , , ]```
###Code
# Databases may be specified either as an integer ID, or in ('name', 'version') form
# Please notice that if you choose more than 3 databases the processing may take a while
databases = [
22, # ID 22 corresponds to HMDB v4
('ChEBI', '2018-01'), # ('name', 'version') style is also accepted
]
###Output
_____no_output_____
###Markdown
Dataset visibility (Public/Private) We are currently allow users to choose if they want their datasets and annotations to be available publicly or not. Set the value below to `True` if you want dataset to be public.If `False`, dataset will only be visible to yourself, other members of your Group,METASPACE administrators, and members of any Projects you add it to.
###Code
is_public = True # or False
###Output
_____no_output_____
###Markdown
Submit the dataset
###Code
sm.submit_dataset(
imzml_fn, ibd_fn, dataset_name,
metadata, is_public, databases
)
###Output
_____no_output_____ |
Regression/Simple_linear1.ipynb | ###Markdown
Also do this in PYMC3
###Code
import pymc3 as mc3
np.array(float_df['weight'])
# NOTE: the linear regression model we're trying to solve for is
# given by:
# y = b0 + b1(x) + error
# where b0 is the intercept term, b1 is the slope, and error is
# the error
# model the intercept/slope terms of our model as
# normal random variables with comically large variances
with mc3.Model() as model:
b0 = mc3.Normal('b0', 0, 0.0003)
b1 = mc3.Normal('b1', 0, 0.0003)
# model our error term as a uniform random variable
err = mc3.Uniform('err', 0, 500)
# "model" the observed x values as a normal random variable
# in reality, because x is observed, it doesn't actually matter
# how we choose to model x -- PyMC isn't going to change x's values
x_weight = mc3.Normal('weight', 0, 0.0003, observed=np.array(float_df['weight']))
# this is the heart of our model: given our b0, b1 and our x observations, we want
# to predict y
pred = mc3.Deterministic('pred', b0 + b1*x_weight)
# @mc3.deterministic
# def pred(b0=b0, b1=b1, x=x_weight):
# return b0 + b1*x
# "model" the observed y values: again, I reiterate that PyMC treats y as
# evidence -- as fixed; it's going to use this as evidence in updating our belief
# about the "unobserved" parameters (b0, b1, and err), which are the
# things we're interested in inferring after all
y = mc3.Normal('y', pred, err, observed=np.array(float_df['mpg']))
trace = mc3.sample(10000)
mc3.summary(trace)
mc3.traceplot(trace)
###Output
Applied interval-transform to err and added transformed err_interval_ to model.
Assigned NUTS to b0
Assigned NUTS to b1
Assigned NUTS to err_interval_
[-----------------100%-----------------] 10000 of 10000 complete in 2.2 sec
b0:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.000 0.000 0.000 [0.000, 0.000]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.000 0.000 0.000 0.000 0.000
b1:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.000 0.000 0.000 [0.000, 0.000]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.000 0.000 0.000 0.000 0.000
err:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
250.000 0.000 0.000 [250.000, 250.000]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
250.000 250.000 250.000 250.000 250.000
pred:
Mean SD MC Error 95% HPD interval
-------------------------------------------------------------------
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
0.000 0.000 0.000 [0.000, 0.000]
Posterior quantiles:
2.5 25 50 75 97.5
|--------------|==============|==============|--------------|
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000
|
I. Creating an algoirthm from scratch.ipynb | ###Markdown
I. Creating an algoirthm from scratchIn this notebook, I create an algoirthm from scratch to classify different dog breeds Initially, we shall start with: 0. Importing libraries 1. Importing datasets 2. Create a CNN to Classify Canine Breeds from scratch 2.1 Evaluate algorithm created. 3. Opportunity for improvement 0.Import libraries
###Code
# We start by importing libraries
import time
import json
import copy
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import PIL
from PIL import Image
from collections import OrderedDict
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.nn.functional as F
import os
import random
import requests
import ast
from glob import glob
import cv2
!pip3 install torch torchvision
import PIL
print(PIL.PILLOW_VERSION)
!apt-get install
###Output
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following package was automatically installed and is no longer required:
libnvidia-common-410
Use 'apt autoremove' to remove it.
0 upgraded, 0 newly installed, 0 to remove and 10 not upgraded.
###Markdown
1.Import datasetsThe following datasets are imported:- Dog images- Humans- Pre-trained face detector
###Code
# Downloading the dog and human dataset
from IPython.display import clear_output
!wget -cq -O dogImages.zip https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip
!wget -cq -O lfw.tgz http://vis-www.cs.umass.edu/lfw/lfw.tgz
clear_output()
print("Downloaded Successfully")
# Extractting the datasets
!unzip -n dogImages.zip
!tar -xvzf lfw.tgz
clear_output()
print("Extracted Successfully")
#Find out the number of images for each category
import numpy as np
from glob import glob
# load filenames for human and dog images
human_files = np.array(glob("lfw/*/*"))
dog_files = np.array(glob("dogImages/*/*/*"))
# print number of images in each dataset
print('There are %d total human images.' % len(human_files))
print('There are %d total dog images.' % len(dog_files))
###Output
There are 13233 total human images.
There are 8351 total dog images.
###Markdown
2.Create Classifier from ScratchBefore doing so, we are going to run some code in order to help us :* Run faster the code using GPU if available* Prepare images for Classification* Specify data directories
###Code
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Is GPU available: ', 'Yes' if torch.cuda.is_available() else 'No')
# check if CUDA is available
use_cuda = torch.cuda.is_available()
###Output
Is GPU available: Yes
###Markdown
2.1 Prepare images for classification
###Code
#The following code will help us when dealing with images to be used in different classification models#
#from PIL import Image
#import torchvision.transforms as transforms
#from torch.autograd import Variable
def process_image_to_tensor(image):
# define transforms for the training data and testing data
prediction_transforms = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
img_pil = Image.open( image ).convert('RGB')
img_tensor = prediction_transforms( img_pil )[:3,:,:].unsqueeze(0)
return img_tensor
# helper function for un-normalizing an image
# and converting it from a Tensor image to a NumPy image for display
def image_convert(tensor):
""" This is to display a tensor as an image. """
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1,2,0)
image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
image = image.clip(0, 1)
return image
###Output
_____no_output_____
###Markdown
2.2 Specify the directories
###Code
# Specify directories
data_dir = 'dogImages'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
import os
from torchvision import datasets
### TODO: Write data loaders for training, validation, and test sets
## Specify appropriate transforms, and batch_sizes
# Batch size
batch_size = 20
# For faster computation, setting num_workers
num_workers = 0
# Transforms for the training, validation, and testing sets
data_transforms = {
'train' : transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'valid' : transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'test' : transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
}
# Loading the datasets with ImageFolder
image_datasets = {
'train' : datasets.ImageFolder(train_dir, transform=data_transforms['train']),
'valid' : datasets.ImageFolder(valid_dir, transform=data_transforms['valid']),
'test' : datasets.ImageFolder(test_dir, transform=data_transforms['test'])
}
# Using the image datasets and the trainforms to define dataloaders
loaders = {
'train' : torch.utils.data.DataLoader(image_datasets['train'], batch_size = 32, shuffle=True, num_workers = num_workers),
'valid' : torch.utils.data.DataLoader(image_datasets['valid'], batch_size = 16),
'test' : torch.utils.data.DataLoader(image_datasets['test'], batch_size = 16)}
###Output
_____no_output_____
###Markdown
2.3 Define Data Architecture for scratch model
###Code
# Model Architecture
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 224*224*3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 112*112*32 tensor)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
# convolutional layer (sees 56*56*64 tensor)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 28 * 28 -> 500)
self.fc1 = nn.Linear(64*28*28, 500)
# linear layer (500 -> 133)
self.fc2 = nn.Linear(500, 133)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(x.size(0), -1)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
#-#-# You so NOT have to modify the code below this line. #-#-#
# instantiate the CNN
model_scratch = Net()
# move tensors to GPU if CUDA is available
if use_cuda:
model_scratch.cuda()
print(model_scratch)
###Output
Net(
(conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(fc1): Linear(in_features=50176, out_features=500, bias=True)
(fc2): Linear(in_features=500, out_features=133, bias=True)
(dropout): Dropout(p=0.25)
)
###Markdown
2.4 Define Loss Function and Optimizer
###Code
# Specifiy Loss Function and Optimizer
import torch.optim as optim
### TODO: select loss function
criterion_scratch = nn.CrossEntropyLoss()
### TODO: select optimizer
optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
2.5 Train and validate the model
###Code
# this part helps building a robust training to deal with truncated images
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):
"""returns trained model"""
print("start training for {} epochs ...".format(n_epochs))
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf
# exist save-file, load save file
if os.path.exists(save_path):
print("load previous saved model ...")
model.load_state_dict(torch.load(save_path))
for epoch in range(1, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
## record the average training loss, using something like
## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss))
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## update the average validation loss
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
# print training/validation statistics
print('\n-----------------------------------------------------------------------------\nEpoch: {} \nTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
## TODO: save the model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss has decreased from ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), save_path)
valid_loss_min = valid_loss
# return trained model
return model
# train the model
loaders_scratch = loaders
model_scratch = train(5, loaders_scratch, model_scratch, optimizer_scratch,
criterion_scratch, use_cuda, 'model_scratch.pt')
# load the model that got the best validation accuracy
model_scratch.load_state_dict(torch.load('model_scratch.pt'))
###Output
start training for 5 epochs ...
load previous saved model ...
-----------------------------------------------------------------------------
Epoch: 1
Training Loss: 4.014856 Validation Loss: 3.895001
Validation loss has decreased from (inf --> 3.895001). Saving model ...
-----------------------------------------------------------------------------
Epoch: 2
Training Loss: 4.004414 Validation Loss: 3.926379
-----------------------------------------------------------------------------
Epoch: 3
Training Loss: 3.975496 Validation Loss: 3.936420
-----------------------------------------------------------------------------
Epoch: 4
Training Loss: 3.960790 Validation Loss: 3.941092
-----------------------------------------------------------------------------
Epoch: 5
Training Loss: 3.944167 Validation Loss: 3.911684
###Markdown
The model has been trained for 22 epochs through different rounds. 2.6 Test the model
###Code
def test(loaders, model, criterion, use_cuda):
# monitor test loss and accuracy
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for batch_idx, (data, target) in enumerate(loaders['test']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
# call test function
test(loaders_scratch, model_scratch, criterion_scratch, use_cuda)
###Output
Test Loss: 3.908582
Test Accuracy: 11% (98/836)
|
numba_tutorial_scipy2016-master/notebooks/exercises/04.Direct.Summation.Exercises.ipynb | ###Markdown
Exercise 1Write a function `create_n_random_particles` that takes the arguments `n` (number of particles), `m` (mass of every particle) and a domain within to generate a random number (as in the class above).It should create an array with `n` elements and `dtype=particle_dtype` and then return that array.For each particle, the mass should be initialized to the value of `m` and the potential `phi` initialized to zero.For the `x` component of a given particle `p`, you might do something like```pythonp['x'] = domain * numpy.random.random()```
###Code
@njit
def create_n_random_particles(n, m, domain=1):
'''
Creates `n` particles with mass `m` with random coordinates
between 0 and `domain`
'''
parts = numpy.zeros((n), dtype=particle_dtype)
for p in parts:
p.m = m
p.phi = 0
p.x = domain*numpy.random.random()
p.y = domain*numpy.random.random()
p.y = domain*numpy.random.random()
#your code here
return parts
###Output
_____no_output_____
###Markdown
Test it out!
###Code
time_taken = %timeit -o parts = create_n_random_particles(1000, .001, 1)
###Output
10000 loops, best of 3: 63.5 µs per loop
###Markdown
Exercise 2 Write a JITted function `distance` to calculate the distance between two particles of dtype `particle_dtype` Here's the `distance` method from the `Particle` class as a reference:```pythondef distance(self, other): return ((self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2)**.5```
###Code
@njit
def distance(part1, part2):
'''calculate the distance between two particles'''
return ((part1['x'] - part2['x'])**2 +
(part1['x'] - part2['x'])**2 +
(part1['x'] - part2['x'])**2 )**.5
# your code here
###Output
_____no_output_____
###Markdown
Try it out!
###Code
distance(parts[0], parts[1])
###Output
_____no_output_____
###Markdown
Exercise 3 Modify the original `direct_sum` function (copied below for reference) to instead work a NumPy array of particles. Loop over each element in the array and calculate its total potential.```pythondef direct_sum(particles): """ Calculate the potential at each particle using direct summation method. Arguments: particles: the list of particles """ for i, target in enumerate(particles): for source in (particles[:i] + particles[i+1:]): r = target.distance(source) target.phi += source.m / r```
###Code
@njit
def direct_sum(particles):
# take it away
for i,target in enumerate(particles):
for j,source in enumerate(particles):
if i!=j:
r = distance(target,source)
target['phi'] += source['m']/r
numba_time = %timeit -o direct_sum(parts)
###Output
The slowest run took 10.02 times longer than the fastest. This could mean that an intermediate result is being cached.
1 loop, best of 3: 33.1 ms per loop
|
diseaseRule.ipynb | ###Markdown
Confidence is a conditional probability $P(Y|X) =C(X,Y)/C(X) $ which means if a patient got disease X first, then what is the probability this patient got disease B next.The Lift measures the probability of X and Y occurring together divided by the probability of X and Y occurring if they were independent events.That is, $P(Y|X) =P(X,Y)/P(X)*P(Y)$. If X and Y are independent then the Lift == 1. If they occur together more often than if they were independent, then Lift > 1. Not Group Concurrent Disease
###Code
# Not group concurrent disease means if multiple disease happens concurrently, we think it is happened in seqence.
# Grouping concurrent disease is preferable.
data['PBSDISEASEGROUP'] = data['PBSDISEASEGROUP'].apply(toli)
data = data.groupby(['MASTERPATIENTID'])['PBSDISEASEGROUP'].apply(list).reset_index()
df = prefix_rule(data)
AssoRule,freqTable = create_table(df)
AssoRule = generate_statistics(AssoRule,freqTable)
###Output
_____no_output_____
###Markdown
Group Concurrent Disease
###Code
# groupby the diseases of the same date
data = data.groupby(['MASTERPATIENTID', 'DISPENSECALENDARDATE'])['PBSDISEASEGROUP'].apply(list).reset_index()
# group all the records for each person in sequence
data = data.groupby(['MASTERPATIENTID'])['PBSDISEASEGROUP'].apply(list).reset_index()
df3 = prefix_rule(data,3)
AssoRule3,freqTable3 = create_table(df3)
AssoRule3 = generate_statistics(AssoRule3,freqTable3)
AssoRule3.sort_values(by="confidence" , ascending=False)[1:10]
df4 = prefix_rule(data,4)
AssoRule4,freqTable4 = create_table(df4)
AssoRule4 = generate_statistics(AssoRule4,freqTable4)
AssoRule4.sort_values(by="confidence" , ascending=False)[1:10]
df5 = prefix_rule(data,5)
AssoRule5,freqTable5 = create_table(df5)
AssoRule5 = generate_statistics(AssoRule5,freqTable5)
AssoRule5.sort_values(by="confidence" , ascending=False)[1:10]
df11 = prefix_rule(data,11)
AssoRule11,freqTable11 = create_table(df11)
AssoRule11 = generate_statistics(AssoRule11,freqTable11)
AssoRule11.sort_values(by="confidence" , ascending=False)[1:10]
top50 = AssoRule11.sort_values(by="confidence" , ascending=False)[1:51]
outputpath="/Users/lina/desktop/CAPSTONE/top50_rules_disease.csv"
top50.to_csv(outputpath,sep=',',index=False,header=True)
###Output
_____no_output_____ |
community_detection/group_segments/notebooks/group_filteration_based_on_Noun_graphs.ipynb | ###Markdown
with updated algorithm
###Code
%load_ext autoreload
%autoreload 2
import sys
sys.path.append("/home/ray__/ssd/BERT/")
sys.path.append("/home/ray__/CS/org/etherlabs/ai-engine/pkg/")
sys.path.append("../")
from gpt_feat_utils import GPT_Inference
gpt_model = GPT_Inference("/home/ray__/ssd/BERT/models/ai/epoch3/", device="cpu")
#gpt_model = GPT_Inference("/home/ray__/ssd/BERT/models/ether/", device="cuda")
# with open('../topic_testing/cullen_test.json','rb') as f:
# request = json.load(f)
# if isinstance(request, str):
# request = json.loads(request)
import text_preprocessing.preprocess as tp
from extra_preprocess import preprocess_text
sys.path.append("../helper_functions/")
from get_groups import call_gs
with open('../topic_testing/sync_eng_2020_01_20.txt','rb') as f:
request = json.load(f)
if isinstance(request, str):
request = json.loads(request)
group = call_gs(request)
# request = request["body"]
# request["segments"] = sorted(request['segments'], key=lambda kv:kv['startTime'])
# for index, seg in enumerate(request["segments"]):
# request["segments"][index]["originalText"] = " ".join(preprocess_text(seg["originalText"]))
# segments_map = {}
# for index, seg in enumerate(request["segments"]):
# if seg["originalText"] != "":
# segments_map[seg['id']] = seg
# # if len(seg["originalText"].split(". "))==1 and len(seg["originalText"].split(" "))<=6 :
# #continue
# segments_map[seg['id']]["order"] = index
# text = list(map(lambda seg: (seg["originalText"], seg["id"]), [segment for segment in request['segments'] if segment["originalText"]!=""]))
# seg_list = [sent for sent, id in text]
# segid_list = [id for sent, id in text]
# sent_list = list(map(lambda seg, segid:([sent + ". " for sent in seg.split(". ")],segid), seg_list, segid_list))
# sent_list = [(sent, segid) for seg, segid in sent_list for sent in seg]
import networkx as nx
import pandas as pd
import numpy as np
from nltk.tokenize import sent_tokenize
import json
import nltk, string,itertools
from nltk.corpus import stopwords
import itertools
import pickle
import re
import random
WEB_URL_REGEX = r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))"""
stop_words = set(stopwords.words('english'))
stop_words_spacy = list("""
a about above across after afterwards again against all almost alone along
already also although always am among amongst amount an and another any anyhow
anyone anything anyway anywhere are around as at
back be became because become becomes becoming been before beforehand behind
being below beside besides between beyond both bottom but by
call can cannot ca could
did do does doing done down due during
each eight either eleven else elsewhere empty enough even ever every
everyone everything everywhere except
few fifteen fifty first five for former formerly forty four from front full
further
get give go
had has have he hence her here hereafter hereby herein hereupon hers herself
him himself his how however hundred
i if in indeed into is it its itself
keep
last latter latterly least less
just
made make many may me meanwhile might mine more moreover most mostly move much
must my myself
name namely neither never nevertheless next nine no nobody none noone nor not
nothing now nowhere n't
of off often on once one only onto or other others otherwise our ours ourselves
out over own
part per perhaps please put
quite
rather re really regarding
same say see seem seemed seeming seems serious several she should show side
since six sixty so some somehow someone something sometime sometimes somewhere
still such
take ten than that the their them themselves then thence there thereafter
thereby therefore therein thereupon these they third this those though three
through throughout thru thus to together too top toward towards twelve twenty
two
under until up unless upon us used using
various very via was we well were what whatever when whence whenever where
whereafter whereas whereby wherein whereupon wherever whether which while
whither who whoever whole whom whose why will with within without would
yet you your yours yourself yourselves yeah okay
""".split()
)
stop_words = set(list(stop_words)+list(stop_words_spacy))
class CandidateKPExtractor(object):
def __init__(self, stop_words, filter_small_sents = True):
self.punct = set(string.punctuation)
self.filter_small_sents = filter_small_sents
self.stop_words = stop_words
def get_candidate_phrases(self, text, pos_search_pattern_list=[r"""base: {(<JJ.*>*<NN.*>+<IN>)?<JJ>*<NN.*>+}"""]):
all_chunks = []
for pattern in pos_search_pattern_list:
all_chunks+=self.getregexChunks(text, pattern)
candidates_tokens = [' '.join(word for word, pos,
chunk in group)
for key, group in itertools.groupby(all_chunks,
self.lambda_unpack(lambda word, pos, chunk: chunk != 'O')) if key]
candidate_phrases = [cand for cand in candidates_tokens if cand not in self.stop_words and not all(char in self.punct for char in cand)]
return candidate_phrases
def getregexChunks(self,text, grammar):
chunker = nltk.chunk.regexp.RegexpParser(grammar)
tagged_sents = nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(text))
all_chunks = list(itertools.chain.from_iterable(nltk.chunk.tree2conlltags(chunker.parse(tagged_sent))
for tagged_sent in tagged_sents))
return all_chunks
def lambda_unpack(self, f):
return lambda args: f(*args)
def get_kp_nouns(kp,sent_nouns):
return set(kp.lower().split(' '))&set(sent_nouns)
import pickle
#se_graph = pickle.load(open("/home/ray__/ssd/minds/se/se_ether_demo_extended.pkl", "rb"))
#se_graph = pickle.load(open("/home/ray__/ssd/minds/se/se_noun_graph.pkl", "rb"))
se_graph = pickle.load(open("/home/ray__/ssd/minds/ai/noun_graph.pkl", "rb"))
def get_freq_score(text_list):
filtered_kp_ctr = []
text_kps_list = []
kp_e = CandidateKPExtractor(stop_words)
for input_text in text_list:
text_kps = kp_e.get_candidate_phrases(input_text)
text_kps = list(set([ele.lower() for ele in text_kps]))
segment_noun_list = []
tagged_sents = nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(input_text))
text_nouns = []
for tagged_sent in tagged_sents:
text_nouns.extend([ele[0] for ele in list(tagged_sent) if ele[1].startswith('NN')])
text_nouns = [ele.lower() for ele in text_nouns]
intersecting_nouns = list(set(text_nouns)&set(se_graph))
#connected phrases
intersection_ctr = 0
filtered_kps = []
for kp in text_kps:
if len(kp.split(' '))>1:
kp_nouns = list(set(kp.split(' '))&set(intersecting_nouns))
for noun in kp_nouns:
rem_nouns = list(set(kp_nouns)-set([noun]))
if set(rem_nouns)&set(se_graph[noun])==set(rem_nouns):
filtered_kps.append(kp)
continue
filtered_kps = list(set(filtered_kps))
filtered_kp_ctr.append(len(filtered_kps))
text_kps_list.append(filtered_kps)
return filtered_kp_ctr
from extra_preprocess import preprocess_text
from scipy.spatial.distance import cdist
def get_freq(groupobj):
seg_list = " ".join([seg['originalText'] for seg in groupobj])
freq_score = get_freq_score([seg_list])
return freq_score
import text_preprocessing.preprocess as tp
from extra_preprocess import preprocess_text
sys.path.append("../helper_functions/")
from get_groups import call_gs
with open('../topic_testing/arjun_validation_2.txt','rb') as f:
request = json.load(f)
if isinstance(request, str):
request = json.loads(request)
group = call_gs(request)
group_freq = {}
for groupid, groupobj in group.items():
group_freq[groupid] = get_freq(groupobj)
## ranking
group_rank = {}
user_id_map = {}
user_id_map = {"2c94451217a049129a166a3408da807c":"Sai","3f01f2032f584b178fafde6b437058ae":"Venkat","8fff81b5b2f14aa5ad67405f3e8127f3":"Sai","60d2ea6bed8c48269c8c024202a4148d":"Shubham","70caa6269d8e4869a45f7ea91ade3472":"Ether","3e1a008f734448b0ad9190778449af81":"Cullen","b4a57b25de68446cac990f856d3fe4d5":"Deep","716067a60a1a4034abc49a12ecafb39b":"Cullen","2f506a3d9e814de69d46a1fbf949fdc9":"Cullen","8d6db5f7d9b74c54ba38fe710ffcaf3f":"Krishna Sai", "c66797a92e6d46ad9573926e57f7dac3":"Nisha Yadav","31a3ba4761854ad9a041ddf1c4c6a1dc":"Reagan Rewop","84fbaa66a2474ea29ae053f3a2e519d6":"Mithun","75bdf310110b4b8fab88b16fafce920e":"Trishanth Diwate","b1e8787a9a1f4859ac11cbb6a8124fd9": "Venkata Dikshit", "fb52cb663aec4795aee38ccfd904d315":"Reagan Rewop", "81a3e15469374fceba1cf972faa209b2":"Arjun Kini", "ecfeeb757f0a4d47af1ebd513929264a":"Shubham", "62b6ae1d7f834b0bb2055f7c72bc3368":"Karthik Muralidharan", "1a21542584494fcaba957d768b595b80":"Vamshi Krishna", "7e7ccbba232d411aa95ad3f244a35f40":"Shashank", "65bb83952fb54409a4bb59bb707f1375":"Vani", "0bbbfe84c66145af8d0ffcd5258bba38":"Parshwa Nemi Jain"}
for index, (groupid, rank) in enumerate(sorted(group_freq.items(), key=lambda kv:kv[1], reverse=True)):
print ("Group Rank: ", index ,"\n\n")
#user_list = set([user_id_map[seg['spokenBy']] for seg in group[groupid]])
#print (*user_list, sep=", ", end=" ")
#print ("discussed: ")
print (" ".join([seg['originalText'] for seg in group[groupid]]), "\n\n")
###Output
Group Rank: 0
And welcome back to work Daily and today's video I'm going to give you eight smart questions ask hiring managers and job interviews, but before I do that I just wanna remind you it clicks the link below to subscribe about you get instant speech every time to be create nuclear content that helps you get a ahead. Alright so what do you do when you're in an interview and we get to that end where the hiring manager looks at you and said, give any questions for me all of course you do in fact it is very important you ask questions I wanna tell you the recently and a employer right now reached out to be frustrated and said I hate when I asked that question and the person says otherwise that he's been really nice. I think I'm okay that person told me does mac get hired and the reason for that is that they have real confirmed. If you aren't asking questions if you want doing your do generation and if you mart having enough value in yourself. It ask clarifying mind questions to make sure that this business is the right place for you remember we always tell you you're a working daily that you are business of want selling your services to an employer and you wanna make sure that it's a good partnership so to do that you've got ask questions to make sure that this is the right place for you. It's like kicking the tires are looking under the eyes make sure that you're making a good investment. So that mind let's take a look at the eight questions you should be asking me your next category. So before break down the eight questions been to tell you that they fall into four categories that we call them the four C's now by cutting up on these four C's you're gonna make sure that you're asking all of us is necessary to make a great decision about this employer and those four C's are connect and to culture validate and closely even by out knows that guys hiring that person he'll be working with one action questions to help you bond in the interview as far as culture does you wanna make sure this is the right corporate culture for you does this company believe and have values and go about business in the way that you respect trust that you. Best, but forward with respect challenges you wanna know what's keeping them up at night so that you can make sure these these are the kinds of challenges that you actually Wanna work on and leverage your skills to be successful flag and then lastly close you want to end this interviews conversation on a high. Note you wanna leave very clear and what the next step are so once we know the four C's it gets really easy to create questions. So we can ask them the interviews to make sure that we're getting the information we need and that's what I'm gonna show you next. So we're starting with connect and the first question you should always ask in an interview is how did you come to work here this is an opportunity for you to bond with this as a well by learning how they got hired by the company why through the into the company what made this side. This is a great place for that to work. So gonna give you a lot of insights to their own decisions and what they're like as a worker. The second question is what left knows about working here. So important that you ask a positive question that lets been talk about open seems a they love about the employer. This is gonna be a great chance for you to Bond and share a love for the things that are great about working at this company. Whilst you're gonna be able to validate whether or not you sound like things that you'd really care about remember these questions are designed for you it insight into whether or not just company's stupid for you. So while very positive questions is also giving you a chance to evaluate whether or not. It's a fit the second is cultures specifically cold culture and the two questions are going to ask care are around individuals that have been hired at the company. The first question is tell me about the most successful hire use may why is that person been very successful in their role. This is gonna to help you understand who has being recognised with the organization as a high performer you wanna listen carefully. I mean at an example, if they tell you a person you gave in it works eighty hours a week and took everything that they had to do and worked with no budget and real able to knock it out of the time does that sound like a place you wanna work is that's something like of Facebook work like balance it has their dust in row no so if they're recognizing that as a bit hire did you know this. The right place for you and the second place you wanna ask is the polar opposite. Tell me about a hire recently that didn't work out why did they fail them another role once again you want listen closely to the answer because we want to see if the traits are characteristics of that person who failed sound like yourself you don't think you can be successful there at the person it sounds like they really couldn't have a one in their job and you're gonna know that this is the right place for you and the thirteenth is challenge units and specifically the challenges of the company is going to get facing in the coming year. So first question you want ask is tell me now the biggest challenge you think the company will experience this year at how will this job helps and overcome it. What you're doing. There's is showing that that you understand you're a business of one your service provider and that what you're going to do is help them solve a about and alleviate your team. So by asking them with that pain or that problem is and how you can help solve that you're really sending a message to then you understand your role if you get hired the second question you wanna to the very challenges is around of format how do I measure my own performance to ensure that I'm having a positive impact on this challenge. Once again you're taking full ownership of your work of your efforts and by telling them that I understand that I'm supposed to do that you're sending a clear message to them you really will be accountable for your actions and that's a great selling speaker at the same time though if they come back you and say well there's no real way to measure we won't have any idea whether or not you're making an impact that would be a huge red flag to you because you're gonna wanna be able to do that in order to get a promotion or gonna raise what kind of company can't measure impact us a sign of a company that you probably don't want work for so you can see there why these questions are really warn not only do they setting up well with the employer by that you know what you're talking about also is gonna use some key insight into whether or not you wanna work that now the fourth and final is about close and that's how to properly close up this conversation. So the question that you wanna to ask and this you can a little great. But I tell you it really works is if there were some skills or experience which I had that would make me a benefit fit for this job. What would may be this is your polite where asking if they feel you have any shortcomings for if there is something sufficient about what you presented to them stage during the interview. This is a chance for them to call out and explain any certain skills or experience or things that might make uncertainty and high on you and so asking this question shows that you understand you're not a perfect candidate but then you do wanna to know what you can done better by the same and token if they do share something with you you now have an opportunity to say oh but I didn't share this experience or maybe I need to tell you about this feels that I ask it's a chance for me to but become that objection maybe be realizing that you can give them everything they needed to to hear space. That's a really important question ask and that last question is what are the next steps in the process and that's you want to understand where you stand and what has to happen next beauty get hired you don't ever wanna leave a job wondering when you'll get a phone call and they'll follow up when they'll make their decision. So ask this question is gonna help you get that information they're likely gonna tell you some things like while everything more people interview they'll hear by this date you need to come back in for another interview, but at least you'll have that information you won't be staring at the fathers or staring at your email has wondering whether or not you got the job you have the great thing about that is that if they give you a data that and it comes and goes you now have a valid reasons follow up with them and say when we spoke to the interview you mentioned and this will be the next steps that base come and gone is there anything else I can refer that my candidacy say so this is a really important question for you to ask for your own piece of mind at the same time it's going give you a sense of how structure this company is really how well got out their own hiring offices so that you can get a clear sense of whether or not they're the kind of company you wanna work there you how then the four C's connect culture challenges and clothes and with two questions of teams creates a super smart questions you should be asking hiring managers in your next job interview. Okay. So now that you have. I can hear I want hear from you if there one particular question you've asked in an interview that got a great resolved or maybe uses a question you apps that didnt get a good result make sure you share it with me below you want your you okay so questions not in feedback regarding this video we just to make sure you post it below we answer each everyone and I also hope that you will share and like this content because you know there's a lot of people out there that need help care questions, you'll be doing that in a favor and I'll make you as good as well lastly don't forget to subscribe each every week we're bringing to fresh new career content that help you get ahead and most of importantly I can't wait to see you in the next video so with in mind remember this if you wanna win you've got work it daily.
Group Rank: 1
Understands human Resource Management will take a look at brief history understand the roles of human resource management to organization and rip renewal model to best organize the functional areas of HR management. Now called human Resource Management is lot of a great deal since its beginnings around the year nineteen hundred. Here's a brief history of human Resource management. It's believed that the first personnel management Department began the National patch register company in the early nineteen hundreds personnel departments, which is merged is clearly defined in the nineteen twenties at least in the United States. We're largely search with technical functions. What began is a primarily clerical collaboration in large companies concerned with enable and employee records begin to face changes with so installation of nineteen sixties hR, developed in response to increase in compet*tion experience by the way seventeenth as a result of deregulation and rapid technological change in the nineteen nineteen globalization and compet*tion required human resource department to become more concerned with cost planning and the implications of various HR strategies for both organizations and their employees. Full of human Resource Management professionals has dramatically evolved over the years. This course provides an overview of the past and future roles of human Resource Management professionals organizations. If an organization has a formal HR group perhaps an HR department. There are typically three different rules that group might play in the market organization which of the roles or whether all fuels are performed depends kindly a blood management want HR to do and what part feature staff is demonstrated the potential mix of roles is shown here the primary role is strategic which helps to find business strategy relative to human capital and this contribution to organizational results. The strategic goal helps link human resource strategy with organizational mission and of the work of people in the your organization the operational and complete advocacy role manages most activities in keeping with organizational strategy and serving as an employee champion to balance the issues of employees and deploy. He has been a straight role, which focuses a clerical administration and record keeping, including essential paperwork compliance and policy implementation human Resource Management is claimed for the role cause a like organization grow bottom history in the past role of human Resource management. The personnel function was highly and Clerical in nature as you to see of this flight side of the model labeled the past role the pastoral rule focus not transactional activities related to processing. If people related in activities such payroll a heavy emphasis compliance led to the personnel function policing policy and procedure which lets little time for the importance strategic role. Role as seen on the left year of human Resource Management has evolved to focus on the strategic contributions of human Resource Management organizations as organization strive to realize their greater results the critical role of people is recognized by the strategic role of human Resource professionals to provide tech with ease champion employees and leads strategically human the organizations human research management is a value added function of your organization influencing key organizational outcomes. Let's explore a simple human Resource Management model the pivotal model suggests that the management is human resources is an organization centers on a key areas these functions are a collection of specialized human Resource management work for each functional area human Resource professionals are responsible for key activities. You can see them in the boxes around the model wants to find each functional area and discuss those a*sociated activities. Is doing what is asked or required by Federal State and local government in the management of people the activities of compliance includes people employment opportunity compliance into the federal state the local employment laws and regulations talent management are the integrated processes to attract motivate and retain productive and engaged employees these activities of talent management include recruitment talent acquisition collection an onboarding training and development is about the veteran of people and performance through information. They will use the activities of training the development included training development career planning and so on performance management are the processes to ensure the organization mission with the work of employees the activities of performance management include performance appraisal improvement and intervention total rewards as our financial and non financial tools used to attract automate and retain employees the activities of total rewards include compensation benefits recognition more life effective events employee safety powers is arbitrary the safety out well of people of work the activities is faking and health include employee safety security and workers compensation. In labor Relations focus have the relationship of a employees with the organization and with each other the activities of employee labor relations include policy management doc*mentation workplace investigations labor relations and employee rights. Human Resource Management model along with our freeze history should give you the context to best understand and apply the important role of human and resource management in today's organization.
Group Rank: 2
Image can be individuals family teams and and tries to understand the importance of study see behavior. So first you need a new how would you go about i is fine to see where were you trying for information. How would you set your budget whenever you mind about pay to some decision and consume based on regular basis, interestingly each can see answer these five differently and on background and some go factory for a company that makes sense the answer these questions will help them note and types of those design the price consumers are learning pay and where we should sell it first the very important understand how can we decision because the best way to create a deep customers is to understand what merger rate is consumption decisions and that place consumption only this way campaigns can design products or services are numbers the that to an happy insights things like super talents structured and a**lytics and offering we're all interested bringing joins everyone really other all the different skills areas of expertise pa*sion in one the coolest parts about work with China and product managers continuing engineering is that you'll never operate. Everyone to each other for the like and I think that is sort of part find these person who give an automatically to create a to cover app that they see all that way the face transform we decided and products firefox content kids to spend a lot of time showing that how the tutorial would work how would we have in I think we have need my time they're doing various research projects that figure app how asking period we want any trust and questions that relates either what we're discussing size that capabilities having the issues perspective and see how people actually using our product completely different than not something with can't basis, but think inform that would pick about the choices page and to work into some entire times we decided that for need doc*mentation study that we go first ways to really try to point it to why small yeah have a combination of info interviews as well as ownership shadow customers meeting our backgrounds for Netflix. So much more at a product and source more than the streaming platform that was hugely a that personal internationally. If you want to work out a large problem if you like exploring at the creative that's like unable alright as I had a chance to exercise muslims that I could believe that I had no one is telling you what to do how to do it and I find on how important. Customer engagement is a business communication can be created external stakeholder consumer and an organization. How many are plan those various channels. This connection to reaction introduction is sent on all customer experience like take plenty, you know use consider something has to record on and product service on however combination to numbers states some of this list and consumer action and conscious level online pa*sed to engagement is qualified different from also an engagement as the nature of the customer's interactions with a brand company and other customers different on the internet discussion forms thoughts for example, our statements remains can continue or social and ways that the I finally interact media a role all the engagement is discussions. No not that detectives between with minds to doc*ment of the internet in the late nineteen nineties days, but a*sets the technical developments and part scale connectivity in social media. These factors and April customer behavior regularly engaged normal communities evolving in directly or indirectly around product categories and other consumption topics. This possibly turns positive engagement with the by or offer as well as the behavior of tough with different degrees of customer engagement, but marketing practice saying to create in so in fact. Which placed conversion which to most updated contacts as type. So restarting as changes on maximizing functional care in some sort of differences the crazy like the other takes and merchants. Although best you see is almost market the rise on use celebrated content. So level support disabled engagement and targets long term try up to your data it you seems our purchase downward. Although customer case and marketing is consistent over line and offline the internet is the basis for marketing users leveraging customer automation are an important source of compet*tive whether update your user generated product people customer to represent use consumers can socialize with one another or contributes to product development guest or also customer each with these best leverage by so like so positive knowledge and access and support and equipped with a tag of to deliver store and sales training on for those brand basket noises to relationship okay. Innovation just out one and one if you can enter sign fine this information design thinking counsel right through the master I'm covering creative insights and you at the thirteenth Google's business we've developed a mute project base online horse a*signed this page one of us to well try while you guys videos use and several all highly practicable sorry about team like that from reply and Ashley recognize the to design things it deep in that office design thinking coaches are tailored based for helping the selfish. They don't pretend that we start out believing that we have the one true messages as across he will provide specific instructions to help you work our challenge in your organization. We'll our stories of design thinking and action at not the real delivery service for the elderly environment denmark. So designed for IBM and more a real buy lots of requests and recommend others and highlight works of designers have feel throughout the course willing encourage to participate on a form where you can share your experiences so find your brother learners in offer byte to help them throughout it all we little about the community learners working together. Let stop waiting for miracles get freddy.
Group Rank: 3
Story of pre reinforcement learning all the way back to AI tangible psychology and the and the heart it it also enough by a person animal robot would that when it investigate an environment with the code back cart to sparkles org sponsors are a great example of this just think of what our product less agent would have to deal with his own batch the one have to consider actions like it deserves returns and these actions change the state of the game or in other words. The current set the leading player things like that and every action is performed with a reward mind when a point in order to win game set and match our needs to follow a policy or set roles and strategies in order to maximize the final score. What if you were building on autonomous agent how would you actually model this we know that the agents actions will change the status environment. So model would need to see it what a he state and action is input and generate a maximum perspective rewards output a*sumption we gets you to the next day and may taken to account the total expected reward for every action from the current of the end state the way of us works will be different for every application and I'm probably not surprised to know that golden had agent is different for building an atari agent the searchers a line he's a series of Atari screenshots to build a confirmation barrel network with a couple of tweaks the output wasn't in a cla*s but instead it was a target that was have a actual award. Was actually dealing with regression on notification they also didn't use Polish players since unlike image recognition individual possessions of gateway objects like the player are all important and can't be reduced current that could have been used through as long as they ask what layer was tailored for aggression. He had input in each time step included at the action and the environment that's also a deep q network or themes for short the deep also uses the principles of projecting the maximum reward given escape action. He was actually patented by Google and has seen a lot of improvements like the experience free play and the tooling network architecture. Enforcement learning is just a fancy smart side way to see supervised learning supervised learning is all about making sense of the environment based on historical examples. And that isn't always the best way to do things imagine if you're trying to drag a car and heavy each pa*s based the web patterns you observed the week before when the roads were clear that's I'm this effective stride where are all looking in the year reinforcement. My learning on the other hand this all about rewards you get points for your actions like fame in your lane driving under the speed limit segment when you're supposed to things like that you can also lose points for dangerous actions like and speeding. So in fact need to take the back something above points possible given the current state of the traffic on the road value you reinforcement learning exercises seven action results in a change of state, which is something super were model doesn't focus on. Girl twenty Sixteen Amazon's cyber Jeff basis talked about how these companies great place to fail and how most companies are unwilling to suffer through the stream of scaled experiments. You can pick of this as a statement about rewards most organizations operate in the realm of conventional wisdom. What is about explaining what is known to achieve finite rewards odds. Some Spencer into the unknown and export territory with the prospect of outside rewards at all odds and many of these applications do fail a some of seed and end up changing the wallet with reinforcement learning and native exports trained off between expiration at location and choose the path to the maximum capacity born. Channels all about deep working so we focused this on as topic of building reinforcement net, but reinforcement learning off under the broader umbrella artificial intelligence. It involves topics like goal setting planning and perception and it can even form our bridge between AI and the engineering discipline. Automate learning simple of powers and get them the recent advances to has the potential become date before the field. If you wanna learn more about deep learning and around after this for or sounds staging web Twitter thanks for watching and we'll see you that time. Okay Let's do this today I'm going to teach you what behavior search is you destroying to the.
Group Rank: 4
I'm friendly and I'm a business administrator call and I hope identical Google. We're gonna take a couple of minutes to walk you through how we hire a Google for all of our jobs and share some helpful details preparing for the experience. The first step is getting application to us far my browser open jobs around a world the clear side find up this screen jobs that match skills try to focus someone roles what be call the minimum modifications and I feel some of them prefer. Now I time to get your resume a right check out the video on the description for a tips on how to make your resume stand out one thing to point out here is that we don't provide debris from certainly our stage or a university at all depending on the goal and G is only part of your application skills to subscribed ask me some of your application you have a team how trained pros who over your resume and connect the thousands to your experience and roles a component. There's no one kind of Google. So we're always looking different for people who bring New perspective and life experiences that help those strongly products and services fall by users. We really care about making this part of the process fair opinion. So these are viewers focus on each tenant instead transportation in first specific jobs once you resumes is that you use you a your lower to like us here we don't hear from us and you you you're gonna a*sume that we're more important on the candidates unless tell what learned but we buy reach out the future. If another rules after creator reached out to you phone calls are usually the next step typically you will chat and learn more about you your interested experience so okay, then has tony in the news google set are more specific with job or achieve technical roles is often being included introduced and for business roles, they'll focus on skills and experience to the job your voice next step is areas here build. It's a really exciting day you'll usually they have four forty five in eight where you need clients google's. Demonstrate set your enemies and have time ask questions about your interviewer role. The team. You may also have one because you learn to learn more about what it swipe to us here. Now let's talk through other one thing to know about angles we use better then high quality questions that tell the job you platform for and challenges, but it's is a good way. So don't worry you won't be asked if bring pieces request our research. So these types of questions find using process all interviewers are trained and each standardized as a consistent and confident their a*sessment regardless of the job platform for there four agile look for for is terrible context building what's the highest smart people to learn and adapt situation. So this is about how you learn so guys not about June days test second is full related knowledge. It's pretty straightforward. We wanna make sure you will experience background and skills that'll set you up for success this thing we have this look at jobs t*tles from step payments. We need this step leadership roles decision look for examples of things like the team player and navigate help on just and last but not least he is you wanna make sure to get right here. So we look for signs of comfortable can deploy any by action and a collaborative feature use your recruit will show you as present references and the before example was submitted put timing to ready frame together pool goods for notify old tests, but have that any to have is owners offer comprehensive unbiased. If the community recommend it to hire you that recommendation and all of your info effects the senior who provides another layer opportunity for final reviews. If everything's proved your career will get in touch and make it pop. That's how we high here Google check out the links of subscription all resources and help you put there and head to the clear site and see how open jobs to see application.
Group Rank: 5
Fence that you would want to advertise in maybe holiday or do you wanna have a special advertis***nt or any other important date. Require some kind of marketing or advertising then you wanna to think of how you touch this it gonna be in press very often today know a lot of advertising internet are you going to print brochures for almost any other kind of paid tab. You'll decide what you want to have how this relates to your budget and what you gonna afford now the most important thing about a marketing plan you should ever forget is to evaluate how effective these advertis***nts and marketing and ideas are and somehow either through surveys conducted with your customers or some sort of post event valuation determine whether or not any of this marketing actually produces his customers if they they don't produce customers then they're not effective and for the following year for your next year marketing plan. We probably want to eliminate them or change them some why so it's it will become more effective in the end you'll have an effective marketing plan mean I idea is just new running wisely. So of I mean you spend linkedin. In my last video we went through a process of getting a marketing budget that was also payment to your individual or business needs than the and the all encompa*sing what can get when you ask what should I asking i also promised the video that within testing the house allocate that okay so here it is last time we're all about taking something that was over and not you and incorporating it slightly. So great good this time we're doing a there's a bit of the opposite because if you start doing some research on the internet how you to kind of pay your mark touch it what you're going to find this you get really confused maybe quickly and probably start thinking that this some kind of dark option involved as so the case just the fact that there's million different businesses out there Google had different results from different marketing bunch allocation and budget and as such we've have ended up with millions of different results for how to allocate your budget what I want speak here was painters of those of you who have absolutely no idea where start with page parts but it and gives you an idea of what the average business doing how that give you something to build repository in the stage stuff. So first off. We need to distinguish between the different areas that you can spend your mind and those come down to online absolutely offline media. If that you can showcase that direct marketing that you can do and market research where your this try that puts into learning about how your markets is changing and changing your allocation to that now based. The internet research April checking down on board and then putting on my personal experience spread clients kicks and itself come up with these sort of figures where the average business spends around the option budget online they they send another the sense offline and the rest of their budget goes into twenty percent or events fifteen percent of direct marketing opportunities and five said let's go know research. Now I'm sure some of your shouting at this screen see graphic thinking. I absolutely no idea what I'm talking about and all that is is proof that your business stage individual and you're already work events change those from the average to yeah you might end know was something a little bit like this or something to be, but either weighing on hope this helps and given you a foundation that we to build and you leave me with your thoughts and comments in the so get in touch and let me know question I have that you think I'm not go twelve in forthcoming videos, obviously if we think anyone else could benefit from this information they share it with them. Otherwise I look forward to recording more just free me actually. After study one you I had an now full time job software engineer. I make three times more money and the has spent on the courses on the you. Friend tool is working in the that what does he said I see software so individually and you sent me a link to lecture of programming they one like us. I think those about exactly a reason when I knew of and was never even think that I will be here two years ago when I started on new first have you been thinking about definite downward and shopping I would have a go of there. So didn't expect us in eight months. I would have feel much for you welcome the phone list until center and today we're going to be talking about the marching campaign. The Coca Cola company call was first invented with a required based pharmacy by the he may fifteen ninety six and soon sold at local artifacts dancers going to demand and instead the beverage possible that bottle is spot with and it's first here payment source just nine plus of coca Cola a day for five sets across since then the company is expanded is the source slightly currently selling context escalate rate of more than one point nine billion serving today that's equivalent every day twelve most one four people will buying something from Coke color. The Cola company is one of the most epic large companies in the world and certainly the world's biggest straight company they control more than half the global markets and polarizing sockets because well as a substantial chunk of the sole launch non carbon it four of the world's fine. They just selling drinks with the coca Cola being the world's best known and most valuable non technology brand within portfolio volume the company holds more the five hundred ram for sprite and relentless within these they produce three half. Thousand and very products spanning from service to bottle water to ice street coffee the line an estimated ninety four percent of the world's recognizing the President wide self about ten percent revenue forty one billion dollars. We there's on advertising and maximum campaigns last year. This has to article for billion million dollars spend range campaign taxes across most regions as with many things in the world. Technically the companies the most well recognized suggest the highest pushing from advertising and public image luckily respecting bunch of allows code to experiments and get creative with their monitoring providing your opportunity to remove this like there is stressful are.
Group Rank: 6
Tech technical product management. If you're at beyond in our cloud data apps and this whiteboard session is all about giving kubernetes you five minutes and it's going to be a little bit of a challenge. So I'm gonna have the show a lot of detailed concepts and kubernetes, but I want give you a rough idea what is it and let us actually do and what is responsible for so there's a few different architectural components too, which I'm gonna talk about the first of which is the kubernetes cluster services and the fundamental from behind kubernetes is that we can enforce what's called desired state manager. Really what that means is that I'm going to feed the cluster services as specific configuration that will be up up to the cluster services to go out and run that configuration in my infrastructure right one of the main components that I do what to call out is this API that sits in front all these API service. So that is one able walk up the system. The second building block of the system is this thing called day worker and what is a worker? Well, the worker is really just a container host the one thing unique about a worker or the container host in a nineties environment. It does have this equivalent process that runs, which is responsible for communicating with yes, you guessed kubernetes cluster services and so this whole thing the cluster services. The workers themselves that's what makes up is oracle cost. So let's talk about use case. So in this case you what we wanna do is wanna feed this the configuration. So the desired snake service to speak exists here in a deployment yellow file. So here I'm just gonna call an application one dot. Inside this there could be a whole bunch of configuration information that I yelled got a bypa*s quite a bit of the parameter. I talk about two fundamental the first of which in this deployment file. A pod configuration pods is like the Smallest units of deployment and kubernetes in terms of the kubernetes object and what it means is that in a pod I can have running containers and you can have one or more. So in order to run that part. I need to specify some sort of container image. Maybe I want to have to container images and further demo line. There's other things like what people board and we're services right or what that, but another additional thing is that I'm gonna specify how many of these pods you could be running here. So maybe there's three more number that I can also list additional box of your flaws. So pod number two yeah, I basically have container image number three and then replicas as a configuration file equal to two. So what happens. I will take this deployment file and will be it to the API with up you'll be up to the cluster service figure out how to schedule the pods in the environment and make sure that I have the right number of pods. So I see this file up here let's work on pod one so I'm gonna have pod one replica one pod one replica two pod one replica three in addition I've also got pod. So here we are pod to replica one and in this case we have two replica here. So let's just do pod one replica to here and so you can see at any point in time the cluster services a responsibility to make sure that configuration is running across all of my computer code groups.
Group Rank: 7
I know like for and you're watching selling right now some so what's part of business is that think there a logical thing that on that and there's no better example, this to Gorilla a market where you try and make a lot of impact a very content, but we actually have one band the thing J timelines limits today we're gonna look at it open real world its listing nice or see and we'll show you the various not team to see what would best suit your business commerce club was born in nineteen thirty thirty at and he studied psychology, which gave to around world work after you learn the trade in London and then give back the United States upon further their each for you work on some of the most famous campaigns all times more man to these tiger and all of which are now for history, but this is great that human is his book to real marketing what one million copies that was translated to go over time. The core concept is not a pure small business available like the jungle really the technology sorry and you need to find a new or file and your best one and this product limits instead that in this experience that we to on the unexpected way it's never much larger inactive auxiliary build pods. Now it's also important to be very direct the image tearing being sales quickly before the book out giant companies is the messages all the things like interaction where this this to for the gorilla where them. It's a sales so all working in the innovative with original also need a crystal clear which pushes sales. So this question is how do you do this little more at the number of yeah person this is working put advertising the appointment or message highly we use. There's a great also a lot of inspiration people really notice things that play with buyer there. As the name might suggest if there's this year and trying to market it without seeming like the working like you real music that I place used to do this. But most wise well and it's hard to do so a great example of are etcetera one between library, you have track and then their phones set for when guys asked the mouse they hit over the black bridge like guy would take this number and it's experience to you can see that this example, could easily go wrong in black cause the label will sciences or something but they got a way our marketing is really a subset promo marketing too, but we have a full video on our marketing it's created tip great that'll leave in scripts and most marketing is when you try to create connection between your brand and a bigger event. So you been thinking you back on their for proposal you obviously did be very careful about a legal aspect of the camera, you'll see every time there's a maintenance sporting event with the Olympics is a London twenty total leverage the official sponsors got temporary order cords and phrases such as gold and one least one one dollar found way around the rules for creating ads that said we give eventually the event in in the city can't even mention this here at least we can't stop with telling you about this the cobra a seven pounds as far as they manage the themselves when they granted themselves and underdog exactly what many people mom house or there are plenty of other tactics for hopefully I give do an idea of what you could meet with a bigger outside of box is wanna learn more about business theories history you sure the like some subscribe for. My next Allan today I'd like to tell you how to develop the heartbeat feeds for a company either small or large you're going to have to market your product before you spend a lot of money bringing your outline advertis***nts. You really want to develop some kind of a marketing plan so that you know what person and why they're spending generally for a marketing plan you start off with a calendar.
Group Rank: 8
The have always entered in December twenty thirteenth a small group of AI research and they tell me you called Dubai least the ground breaking paper called playing target phrase force and just a little goodwill a months later Google announced that they bought might for a really good sum money since then there's the all kinds of talk about reinforcement learning in the field.
|
tutorials/zh/8_node_classification_on_citation_network.ipynb | ###Markdown
论文引用网络中的节点分类任务在这一教程中,我们将展示GraphScope如何结合图分析、图查询和图学习的能力,处理论文引用网络中的节点分类任务。在这个例子中我们使用的是[ogbn-mag](https://ogb.stanford.edu/docs/nodeprop/ogbn-mag)数据集。ogbn是由微软学术关系图(Microsoft Academic Graph)的子集组成的异构图网络。该图中包含4种类型的实体(即论文、作者、机构和研究领域),以及连接两个实体的四种类型的有向关系边。我们需要处理的任务是,给出异构的ogbn-mag数据,在该图上预测每篇论文的类别。这是一个节点分类任务,该任务可以归类在各个领域、各个方向或研究小组的论文,通过对论文属性和引用图上的结构信息对论文进行分类。在该数据中,每个论文节点包含了一个从论文标题、摘要抽取的 128 维 word2vec 向量作为表征,该表征是经过预训练提前获取的;而结构信息是在计算过程中即时计算的。这一教程将会分为以下几个步骤:- 建立会话和载图;- 通过gremlin交互式查询图;- 执行图算法做图分析;- 执行基于图数据的机器学习任务;- 关闭会话
###Code
首先,我们要新建一个会话,并载入ogbn_mag数据集
import os
import graphscope
from graphscope.dataset.ogbn_mag import load_ogbn_mag
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
graphscope.set_option(show_log=True)
sess = graphscope.session(k8s_volumes=k8s_volumes)
graph = load_ogbn_mag(sess, "/home/jovyan/datasets/ogbn_mag_small/")
###Output
_____no_output_____
###Markdown
Interactive query with gremlin在此示例中,我们启动了一个交互查询引擎,然后使用图遍历来查看两位给定作者共同撰写的论文数量。为了简化查询,我们假设作者可以分别由ID 2 和 4307 唯一标识。
###Code
# get the entrypoint for submitting Gremlin queries on graph g.
interactive = sess.gremlin(graph)
# count the number of papers two authors (with id 2 and 4307) have co-authored.
papers = interactive.execute("g.V().has('author', 'id', 2).out('writes').where(__.in('writes').has('id', 4307)).count()").one()
print("result", papers)
###Output
_____no_output_____
###Markdown
Graph analytics with analytical engine继续我们的示例,下面我们在图数据中进行图分析来生成节点结构特征。我们首先通过在特定周期内从全图中提取论文(使用Gremlin!)来导出一个子图,然后运行 k-core 分解和三角形计数以生成每个论文节点的结构特征。
###Code
# exact a subgraph of publication within a time range.
sub_graph = interactive.subgraph(
"g.V().has('year', inside(2014, 2020)).outE('cites')"
)
# project the subgraph to simple graph by selecting papers and their citations.
simple_g = sub_graph.project_to_simple(v_label="paper", e_label="cites")
# compute the kcore and triangle-counting.
kc_result = graphscope.k_core(simple_g, k=5)
tc_result = graphscope.triangles(simple_g)
# add the results as new columns to the citation graph.
sub_graph = sub_graph.add_column(kc_result, {"kcore": "r"})
sub_graph = sub_graph.add_column(tc_result, {"tc": "r"})
###Output
_____no_output_____
###Markdown
Graph neural networks (GNNs)接着我们利用生成的结构特征和原有特征通过GraphScope的学习引擎来训练一个学习模型。在本例中,我们训练了GCN 模型,将节点(论文)分类为349个类别,每个类别代表一个出处(例如预印本和会议)。
###Code
# define the features for learning,
# we chose original 128-dimension feature and k-core, triangle count result as new features.
paper_features = []
for i in range(128):
paper_features.append("feat_" + str(i))
paper_features.append("kcore")
paper_features.append("tc")
# launch a learning engine. here we split the dataset, 75% as train, 10% as validation and 15% as test.
lg = sess.learning(sub_graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
# Then we define the training process, use internal GCN model.
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
def train(config, graph):
def model_fn():
return GCN(graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"])
trainer = LocalTFTrainer(model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"],
config["learning_rate"],
config["weight_decay"]))
trainer.train_and_evaluate()
# hyperparameters config.
config = {"class_num": 349, # output dimension
"features_num": 130, # 128 dimension + kcore + triangle count
"batch_size": 500,
"val_batch_size": 100,
"test_batch_size":100,
"categorical_attrs_desc": "",
"hidden_dim": 256,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 10],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites"}
# start traning and evaluating
train(config, lg)
###Output
_____no_output_____
###Markdown
最后,当我们完成所有的计算过程后,关闭当前的会话
###Code
# close the session.
sess.close()
###Output
_____no_output_____
###Markdown
论文引用网络中的节点分类任务在这一教程中,我们将展示GraphScope如何结合图分析、图查询和图学习的能力,处理论文引用网络中的节点分类任务。在这个例子中我们使用的是[ogbn-mag](https://ogb.stanford.edu/docs/nodeprop/ogbn-mag)数据集。ogbn是由微软学术关系图(Microsoft Academic Graph)的子集组成的异构图网络。该图中包含4种类型的实体(即论文、作者、机构和研究领域),以及连接两个实体的四种类型的有向关系边。我们需要处理的任务是,给出异构的ogbn-mag数据,在该图上预测每篇论文的类别。这是一个节点分类任务,该任务可以归类在各个领域、各个方向或研究小组的论文,通过对论文属性和引用图上的结构信息对论文进行分类。在该数据中,每个论文节点包含了一个从论文标题、摘要抽取的 128 维 word2vec 向量作为表征,该表征是经过预训练提前获取的;而结构信息是在计算过程中即时计算的。这一教程将会分为以下几个步骤:- 建立会话和载图;- 通过gremlin交互式查询图;- 执行图算法做图分析;- 执行基于图数据的机器学习任务;- 关闭会话
###Code
首先,我们要新建一个会话,并载入ogbn_mag数据集
import os
import graphscope
from graphscope.dataset.ogbn_mag import load_ogbn_mag
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
graphscope.set_option(show_log=True)
sess = graphscope.session(k8s_volumes=k8s_volumes)
graph = load_ogbn_mag(sess, "/home/jovyan/datasets/ogbn_mag_small/")
###Output
_____no_output_____
###Markdown
Interactive query with gremlin在此示例中,我们启动了一个交互查询引擎,然后使用图遍历来查看两位给定作者共同撰写的论文数量。为了简化查询,我们假设作者可以分别由ID 2 和 4307 唯一标识。
###Code
# get the entrypoint for submitting Gremlin queries on graph g.
interactive = sess.gremlin(graph)
# count the number of papers two authors (with id 2 and 4307) have co-authored.
papers = interactive.execute("g.V().has('author', 'id', 2).out('writes').where(__.in('writes').has('id', 4307)).count()").one()
print("result", papers)
###Output
_____no_output_____
###Markdown
Graph analytics with analytical engine继续我们的示例,下面我们在图数据中进行图分析来生成节点结构特征。我们首先通过在特定周期内从全图中提取论文(使用Gremlin!)来导出一个子图,然后运行 k-core 分解和三角形计数以生成每个论文节点的结构特征。
###Code
# exact a subgraph of publication within a time range.
sub_graph = interactive.subgraph(
"g.V().has('year', inside(2014, 2020)).outE('cites')"
)
# project the subgraph to simple graph by selecting papers and their citations.
simple_g = sub_graph.project(vertices={"paper": []}, edges={"cites": []})
# compute the kcore and triangle-counting.
kc_result = graphscope.k_core(simple_g, k=5)
tc_result = graphscope.triangles(simple_g)
# add the results as new columns to the citation graph.
sub_graph = sub_graph.add_column(kc_result, {"kcore": "r"})
sub_graph = sub_graph.add_column(tc_result, {"tc": "r"})
###Output
_____no_output_____
###Markdown
Graph neural networks (GNNs)接着我们利用生成的结构特征和原有特征通过GraphScope的学习引擎来训练一个学习模型。在本例中,我们训练了GCN 模型,将节点(论文)分类为349个类别,每个类别代表一个出处(例如预印本和会议)。
###Code
# define the features for learning,
# we chose original 128-dimension feature and k-core, triangle count result as new features.
paper_features = []
for i in range(128):
paper_features.append("feat_" + str(i))
paper_features.append("kcore")
paper_features.append("tc")
# launch a learning engine. here we split the dataset, 75% as train, 10% as validation and 15% as test.
lg = sess.learning(sub_graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
# Then we define the training process, use internal GCN model.
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
def train(config, graph):
def model_fn():
return GCN(graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"])
trainer = LocalTFTrainer(model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"],
config["learning_rate"],
config["weight_decay"]))
trainer.train_and_evaluate()
# hyperparameters config.
config = {"class_num": 349, # output dimension
"features_num": 130, # 128 dimension + kcore + triangle count
"batch_size": 500,
"val_batch_size": 100,
"test_batch_size":100,
"categorical_attrs_desc": "",
"hidden_dim": 256,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 10],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites"}
# start traning and evaluating
train(config, lg)
###Output
_____no_output_____
###Markdown
最后,当我们完成所有的计算过程后,关闭当前的会话
###Code
# close the session.
sess.close()
###Output
_____no_output_____ |
feedbackPython2.ipynb | ###Markdown
Exercise 1Let us consider the sequence $U_n$ given by \begin{equation}\label{fib}\left\lbrace \begin{array}{ll}U_0 &= 1,\\U_1 &= 2,\\U_{n} &=-3U_{n-1} +U_{n-2}, \;\; \forall\; n=2,3,4\cdots\end{array}\right. \end{equation}Write a python function named SeqTerms that takes as input an integer $n,\;\;n\geq 0$ and return an array of the first $n$ terms (i.e. $U_0, \cdots, U_{n-1}$) of the sequence \eqref{fib}. The code should return an array of one or two when (n=0 or n =1), for other inputs the code works properly, you may need to print the results as an array not as a list.
###Code
import numpy as np
def Seq(n):
a=1
b=2
if n==0:
return 1
if n==1:
return 2
for i in range(2,n+1):
c=-3*b+a
a=b
b=c
return c
Seq(2)
def SeqTerms(n):
l=[]
g=np.vectorize(Seq)
for i in range(n):
l+=[Seq(i)]
return l
SeqTerms(1)
###Output
_____no_output_____
###Markdown
Exercise 2Let $\{ x_k\}$ be a partition of $[a,b]$ such that $a=x_0<x_1<\cdots<x_{N-1}<x_N=b$ and $H$ be the length of the $k$-th subinterval ($H = x_k - x_{k-1}$),then we have $$\int_a^bf(x)dx \approx \sum_{k=1}^N \frac{f(x_{k-1})+f(x_k)}{2}H = A$$1. Write a function named Trap that takes $a,b,N, f$ as inputs and return A Correct.
###Code
def trap(a,b,N,f):
C=np.linspace(a,b,N+1)
g=np.vectorize(f)
A=g(C)
S=0
for i in range(1,len(A)):
S+=A[i]+A[i-1]
K=1/2*S*((b-a)/N)
return K
f= lambda x: x**3+7
trap(0,1,10**6,f)
###Output
_____no_output_____
###Markdown
2. Write a Python code to compute and display an approximation $Aquad$ of the integral bellow using the Python function $quad$$$A = \int_{0}^{2} \dfrac{x^3+5x-20}{x^2+3}dx$$ Correct.
###Code
from scipy.integrate import quad
a = 0
b = 2
f = lambda x: (x**3+5*x-20)/(x**2+3)
Aquad= quad(f, a, b)[0]
print(Aquad)
###Output
-7.049316535735796
###Markdown
3. write a Python function ErrorTrap that takes $M$ as input and return an arrays $ErrorInt$ and $ListN$. Here, $ErrorInt$ contains the absolute errors between $Aquad$ and the approximation of the integral $A$ obtained using the function Trap for all positve intergers $N$ in $ListN$ the set of all multiples of 10 less or equal to $M$. Correct.
###Code
def ErrorTrap(M):
u= lambda x: abs(quad(f,0,2)[0]-trap(0,2,x,f))
ListN=[]
#ErrorInt=np.zeros(M)
for i in range(1,M+1):
if i%10==0:
ListN+=[i]
g=np.vectorize(u)
ErrorInt=g(ListN)
return ErrorInt, ListN
ErrorTrap(100)
###Output
_____no_output_____
###Markdown
4. Plot the output $ErrorInt$ against $ListN$ for $M=200$ You were asked to plot not to print the ListN and ErrorInt.
###Code
𝐸𝑟𝑟𝑜𝑟𝐼𝑛𝑡 , 𝐿𝑖𝑠𝑡𝑁 = ErrorTrap(200)
print(𝐿𝑖𝑠𝑡𝑁)
print(ErrorInt)
###Output
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200]
[3.07950054e-03 7.70701307e-04 3.42601551e-04 1.92726674e-04
1.23349010e-04 8.56605200e-05 6.29349176e-05 4.81848732e-05
3.80721757e-05 3.08385649e-05 2.54864800e-05 2.14157629e-05
1.82477772e-05 1.57340710e-05 1.37061368e-05 1.20464185e-05
1.06708827e-05 9.51816892e-06 8.54262634e-06 7.70972323e-06]
###Markdown
Exercise 31. Write code to solve the following system of ordinary differential equations using the Python function odeint.$$\begin{cases}\dfrac{dx_1}{dt}& = & -\dfrac{1}{2}x_1\\\\\dfrac{dx_2}{dt}& = & \dfrac{1}{2}x_1-\dfrac{1}{4}x_2\\\\\dfrac{dx_3}{dt}& = & \dfrac{1}{4}x_2-\dfrac{1}{6}x_3\end{cases}, \text{ on } [0,4]$$Subject to the initial conditions $x_1(0) = 1, x_2(0) = 1, x_3(0) = 1$.Correct.
###Code
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# function that returns dz/dt
def model(z,t):
x_1,x_2,x_3 = z
dx_1dt = -1/2*x_1
dx_2dt = 1/2*x_1 -1/4*x_2
dx_3dt = 1/4*x_2-1/6*x_3
return dx_1dt,dx_2dt,dx_3dt
# initial condition
z0 = [1,1,1]
# time points
a = 0
b = 4
N = 100
t = np.linspace(a,b,N+1)
# solve ODE
z = odeint(model,z0,t)
x_1 = z[:,0]
x_2 = z[:,1]
x_3=z[:,2]
plt.plot(t,x_1,'b-')
plt.plot(t,x_3,'r--')
plt.plot(t,x_2,'green');
def f(z,t):
x1,x2,x3=z
dx1dt=-1/2*z[0]
dx2dt=1/2*z[0]-1/4*z[1]
dx3dt=1/4*z[1]-1/6*z[2]
return dx1dt, dx2dt,dx3dt
#f(6,7)
###Output
_____no_output_____
###Markdown
2. The exact solution of the above system of ODEs is given by$$\begin{cases}x_1(t)& = & e^{-t/2}\\x_2(t)& = & -2e^{-t/2}+3e^{-t/4}\\x_3(t)& = & \dfrac{3}{2}e^{-t/2} - 9e^{-t/4} + \dfrac{17}{2}e^{-t/6}\end{cases}$$Use $Subplot$ to plot side by side- each exact and approximate solution in the same window- and their absolute error vs the time Correct.
###Code
import numpy as np
import matplotlib.pyplot as plt
# x_1(t)=np.exp(-t/2)
# x_2(t)=-2*np.exp(-t/2)+3*np.exp(-t/4)
# x_3(t)=3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6)
# #plot results
plt.subplot(3,1,1)
plt.plot(t,np.exp(-t/2),'b')
plt.plot(t,x_1,'y--')
plt.xlabel('time')
plt.ylabel('x_1(t)')
plt.show()
#plot results
plt.subplot(3,1,2)
plt.plot(t,-2*np.exp(-t/2)+3*np.exp(-t/4),'y-')
plt.plot(t,x_2,'g--')
plt.xlabel('time')
plt.ylabel('x_2(t)')
plt.show()
plt.subplot(3,1,3)
plt.plot(t,3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6),'r-')
plt.plot(t,x_3,'b--')
plt.xlabel('time')
plt.ylabel('x_3(t)')
plt.show()
#plot results
# plt.subplot(3,1,3)
# plt.plot(x,y)
# plt.xlabel('x')
# plt.ylabel('y')
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
# x_1(t)=np.exp(-t/2)
# x_2(t)=-2*np.exp(-t/2)+3*np.exp(-t/4)
# x_3(t)=3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6)
# #plot results
plt.subplot(3,1,1)
plt.title("Absolute Error vs Times")
#plt.plot(t,np.exp(-t/2),'b')
plt.plot(t,abs(x_1-np.exp(-t/2)),'b-')
plt.xlabel('time')
plt.ylabel('absolute error of x_1')
plt.show()
#plot results
plt.subplot(3,1,2)
#plt.plot(t,-2*np.exp(-t/2)+3*np.exp(-t/4),'g-')
plt.plot(t,abs(x_2+2*np.exp(-t/2)-3*np.exp(-t/4)),'g-')
plt.xlabel('time')
plt.ylabel('absolute error of x_2')
plt.show()
plt.subplot(3,1,3)
#plt.plot(t,3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6),'r-')
plt.plot(t,abs(x_3-3/2*np.exp(-t/2)+9*np.exp(-t/4)-17/2*np.exp(-t/6)),'r-')
plt.xlabel('time')
plt.ylabel('absolute error of x_3')
plt.show()
#plot results
# plt.subplot(3,1,3)
# plt.plot(x,y)
# plt.xlabel('x')
# plt.ylabel('y')
# plt.show()
###Output
_____no_output_____
###Markdown
Exercise 4Let $\{ t_k\}$ be a partition of $[a,b]$ such that $a=t_1<t_2<\cdots<t_{N}=b$ and $H$ be the constant length of the $k$-th subinterval ($H = t_k - t_{k-1}$). Let us consider initial value problem\begin{equation}\label{eul2} \begin{cases} \dfrac{dz}{dt} = f(z,t), & \quad \text{on } [a, b]\\\\ z(a) = c, \end{cases}\end{equation}where $z,f,c\in R^M$ i.e. $z = [x_1, x_2,\cdots, x_{M}]$, $c = [x_1(a), x_2(a),\cdots, x_{M}(a)]$ and $f = [f_1, f_2,\cdots, f_{M}]$. Note that \eqref{eul2} is a the general form of system of ODEs. Let $t, z_k,Z$ defined as follows $$t=[t_1,t_2,\cdots,t_{N-1},t_{N}],\quad z_k = [x_1(t_k), x_2(t_k),\cdots, x_{M}(t_k)], \quadZ =\begin{pmatrix}x_1(t_1)& x_2(t_1)&\cdots& x_{M}(t_1)\\x_1(t_2)& x_2(t_2)&\cdots& x_{M}(t_2)\\\vdots& \vdots&\ddots& \vdots\\x_1(t_{N})& x_2(t_{N})&\cdots& x_{M}(t_{N})\end{pmatrix}$$1. Write a python function EulerOdeSys that takes $f,c,t$ and return the solution $Z$ of the initial value problem \eqref{eul2} using Euler method i.e.$$ z_{k+1} = z_k + Hf(z_k,t_k) $$The structure is fine.
###Code
def EulerOdeSys(f,c,t):
n=len(t)
Z = np.zeros((len(t),)+ np.shape(c))
Z[0]= c
for i in range(n-1):
h =(t[i+1] - t[i])
Z[i+1]= Z[i]+ h*f(Z[i],t[i])
return Z
def f(x,y):
return x+y
c=[5,3]
t=np.linspace(0,4,10)
EulerOdeSys(f,c,t)
###Output
_____no_output_____
###Markdown
2. Write a python function RK4OdeSys that takes $f,c,t$ and return the solution $Z$ of the initial value problem (1) using the fourth order Runge-Kutta method i.e.\begin{equation}\begin{cases}k_1 = f(z_k,t_k),\\\\k_2 = f(z_k+H\dfrac{k_1}{2}, t_k + \dfrac{H}{2}),\\\\k_3 = f(z_k+H\dfrac{k_2}{2}, t_k + \dfrac{H}{2}),\\\\k_4 = f(z_k+Hk_3, t_k + H),\\\\z_{k+1} = z_k + \dfrac{H}{6}(k_1+2k_2+2k_3+k_4)\end{cases}\end{equation}Correct
###Code
def RK4OdeSys(f,c,t):
n = len (t)
Z = np.zeros((len(t),)+ np.shape(c))
Z[0]= c
for i in range (n-1):
k1 = f(Z[i] ,t[i])
h =(t[i+1] - t[i])/2
k2 = f(Z[i]+ h*k1 , t[i]+h)
k3 = f(Z[i]+ h*k2 , t[i]+h)
k4 = f(Z[i]+2*h*k3 ,t[i]+2*h )
Z[i+1]= Z[i]+ h/3*(k1 +2*k2 +2*k3+k4 )
return Z
def f(x,y):
return x+y**2
c=[5,2]
t=np.linspace(0,4,10)
RK4OdeSys(f,c,t)
#plt.plot(RK4OdeSys1(f,c,t),'b-')
###Output
_____no_output_____
###Markdown
3. Solve the system of ODEs in $Exercise2$ using your function EulerOdeSys and RK4OdeSys Not done. 4. By plotting the absolute error in the approximate and exact solutions, tell us which function gives a more accurate solution of a system of ODEs.Not done. Exercise 5Let consider us consider the function primes that takes $n$ as input and return a list of primes less than $n$
###Code
# This cell is only to import the labraries
import numpy as np
import time
def primes(n):
""" Returns a list of primes < n """
sieve = [True] * (n//2)
for i in range(3,int(n**0.5)+1,2):
if sieve[i//2]:
sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)
return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]
###Output
_____no_output_____
###Markdown
For any integer $n>0$ and a prime number $p$, define $\nu_p(n)$ as the greatest integer $r$ such that $p^r$ divides $n$.Define $$ D(n,m) = \sum_{p\; prime} \Bigl| \nu_p(n) - \nu_p(m)\Bigr| $$For example $D(14,24)=4$.Furthermore, define$$S(N) = \sum_{n=1}^{N}\sum_{m=1}^{N}D(n,m).$$You are given $S(10)=210$.1. Write an efficient python function, Func_S , that takes $N$ as input and return the value $S(N)$.Correct.
###Code
from math import floor
from math import log as ln
def nu(n,p):
L=[]
for i in range(floor(ln(n)//ln(p))+2):
if n%(p**i)==0:
L+=[i]
return L[-1]
def D(n,m):
list_prime=primes(max(m,n)+1)
SumD=0
for i in list_prime:
SumD+=abs(nu(n,i)-nu(m,i))
return SumD
print(D(14,24))
def Func_S(N):
s=0
for i in range(1,N+1):
for j in range(1,N+1):
#if j!=i:
s=s+D(i,j)
return s
Func_S(10)
nu(7,23)
###Output
4
###Markdown
2. Compute $S(10)$ and display its computational time
###Code
N = 10
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
S(10) = 210
computational Time = 0.0028889940003864467
###Markdown
3. Compute $S(100)$ and display its computational time
###Code
N = 100
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
S(100) = 37018
computational Time = 0.4434022469940828
###Markdown
4. Compute $S(1000)$ and display its computational time
###Code
N = 1000
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
S(1000) = 4654406
computational Time = 256.6060328760068
###Markdown
5. Compute $S(10000)$ and display its computational timeYou were asked to compute S(10) and so on. There is no outputs for that. This was meant to test the efficiency for your algorithm.
###Code
N = 10000
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
_____no_output_____
###Markdown
6. Compute $S(100000)$ and display its computational time
###Code
N = 100000
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
_____no_output_____
###Markdown
7. Compute $S(1000000)$ and display its computational time
###Code
N = 1000000
time_start = time.perf_counter()
S = Func_S(N)
time_elapsed = (time.perf_counter() - time_start)
print('S({}) = {}'.format(N,S))
print('computational Time = ', time_elapsed)
###Output
_____no_output_____
###Markdown
Exercise 6 1. Read the Covid-19 dataset
###Code
import pandas as pd
import numpy as np
a=pd.read_csv('Covid-19.csv')
a
###Output
_____no_output_____
###Markdown
2. Drop the Country code column
###Code
del a['Country_code']
a
###Output
_____no_output_____
###Markdown
3. Randomely choose three different countries
###Code
a.sample(n=3)
b=a['Country']
rand=b.sample(n=3)
rand
# b=a.sample(n=3)
# b
###Output
_____no_output_____
###Markdown
4. Select and display the records for those three countries
###Code
q=a[a['Country'].isin(rand)]
q
###Output
_____no_output_____
###Markdown
5. Calculate and display the sum, the average of the cumulative cases of each WHO region.
###Code
M=a.groupby('WHO_region').mean()
print("the average of cumulative case of each WHO region is\n",M['Cumulative_cases'])
S=a.groupby('WHO_region').sum()
print("the Sum of cumulative case of each WHO region is\n",S['Cumulative_cases'])
###Output
the average of cumulative case of each WHO region is
WHO_region
AFRO 3.853838e+04
AMRO 5.795662e+05
EMRO 2.228024e+05
EURO 3.936270e+05
Other 6.911569e+02
SEARO 1.181455e+06
WPRO 4.465386e+04
Name: Cumulative_cases, dtype: float64
the Sum of cumulative case of each WHO region is
WHO_region
AFRO 1215886016
AMRO 20479550865
EMRO 3092942346
EURO 15399475886
Other 436120
SEARO 8200477089
WPRO 986180579
Name: Cumulative_cases, dtype: int64
###Markdown
6. Calculate and display sum, the average of the cumulative deaths of each WHO region.
###Code
M=a.groupby('WHO_region').mean()
print("the average of cumulative deaths of each WHO region is\n",M['Cumulative_deaths'])
S=a.groupby('WHO_region').sum()
print("the Sum of cumulative case of each WHO region is\n",S['Cumulative_deaths'])
###Output
the average of cumulative deaths of each WHO region is
WHO_region
AFRO 916.600856
AMRO 15814.192891
EMRO 4672.818470
EURO 8890.024871
Other 11.554675
SEARO 17467.325457
WPRO 723.480281
Name: Cumulative_deaths, dtype: float64
the Sum of cumulative case of each WHO region is
WHO_region
AFRO 28918757
AMRO 558810320
EMRO 64868066
EURO 347795553
Other 7291
SEARO 121240706
WPRO 15978062
Name: Cumulative_deaths, dtype: int64
###Markdown
7. Produce plots that look like the following three figures. Pay attention to the annotations.7.a.
###Code
import seaborn as sns
sns.boxplot(x="Country", y="New_cases", data=q)
sns.stripplot(x="Country", y="New_cases", data=q);#, jitter=True, edgecolor="gray")
plt.legend()
a.groupby('WHO_region')['Cumulative_cases',"Cumulative_deaths"].sum().plot.bar(grid=False);
###Output
<ipython-input-189-1cfdf2cd8944>:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.
a.groupby('WHO_region')['Cumulative_cases',"Cumulative_deaths"].sum().plot.bar(grid=False);
###Markdown
7.b.
###Code
import matplotlib.pyplot as plt
sns.lineplot(x="Date_reported", y="Cumulative_cases", hue="Country",linewidth=5,data=q)
###Output
_____no_output_____ |
examples/Sector_AnalysisSynthesis.ipynb | ###Markdown
Parameters
###Code
N_sph = 2
shtype = 'real'
ENREC = True # amplitude or energy reconstruction
pattern = "maxRE"
# steering of sectors
N_sph_sec = N_sph
sec_vecs = spa.grids.load_t_design(2*N_sph_sec if ENREC else N_sph_sec+1) # N+1 or 2N
sec_azi, sec_colat, _ = spa.utils.cart2sph(*sec_vecs.T)
# number of sectors
J_sec = len(sec_vecs)
spa.plots.hull(spa.decoder.get_hull(*sec_vecs.T), title="Sector Hull")
plt.gcf().set_size_inches(3.3, 3.5)
#plt.savefig('figs/hull.pdf')
###Output
_____no_output_____
###Markdown
Sector analysis matrix A_sec - consists of the stacked beamformers
###Code
if pattern.lower() == "cardioid":
c_n = spa.sph.cardioid_modal_weights(N_sph_sec)
elif pattern.lower() == "hypercardioid":
c_n = spa.sph.hypercardioid_modal_weights(N_sph_sec)
elif pattern.lower() == "maxre":
c_n = spa.sph.maxre_modal_weights(N_sph_sec, True) # works with amplitude compensation and without!
else:
assert()
A_sec = spa.sph.repeat_per_order(c_n) * \
spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, shtype)
spa.plots.sh_coeffs_subplot([A_sec[0, :], A_sec[1, :],
A_sec[2, :], A_sec[3, :]],
titles=["$s_0$", "$s_1$", "$s_2$", "$s_3$"])
plt.gcf().set_size_inches(7, 3.5)
beta_a = np.sqrt(4*np.pi)/ (A_sec[0, 0] * J_sec)
beta_e = 1/ (A_sec[0, :].conj()@A_sec[0, :]/np.sqrt(4*np.pi) * J_sec/np.sqrt(4*np.pi))
###Output
_____no_output_____
###Markdown
Defining some input signal as a superposition of some patterns
###Code
# INPUT
Omega_in = np.c_[0, 0]
N_sph_in = N_sph
in_nm = spa.sph.sh_matrix(N_sph_in, Omega_in[:, 0], Omega_in[:, 1], shtype).conj()
in_nm = spa.sph.repeat_per_order(spa.sph.cardioid_modal_weights(N_sph_in)) * in_nm # make cardioid
in_nm += spa.sph.sh_matrix(N_sph_in, Omega_in[:, 0]+np.pi/2, Omega_in[:, 1]+\
np.pi/2, shtype).conj() # add another
fig = plt.figure()
spa.plots.sh_coeffs(in_nm, title="Input", fig=fig)
ax = fig.gca()
ax.plot(np.insert(sec_vecs[:, 0], np.arange(J_sec), np.zeros(J_sec)),
np.insert(sec_vecs[:, 1], np.arange(J_sec), np.zeros(J_sec)),
np.insert(sec_vecs[:, 2], np.arange(J_sec), np.zeros(J_sec)),
color='black', linestyle='dashed')
for s, co in enumerate(sec_vecs):
ax.text(co[0], co[1], co[2], s+1, zorder=1, fontsize=12)
plt.savefig('input_sec_nm.pdf')
###Output
_____no_output_____
###Markdown
Carry out sector analysis, producing sector signals
###Code
# Apply sectors
delta_numSH = (N_sph_in+1)**2 - (N_sph_sec+1)**2
pw_secs_out = np.c_[A_sec, np.zeros([J_sec, delta_numSH])] @ in_nm.T
pw_secs_out = np.real_if_close(pw_secs_out)
fig, ax = plt.subplots(figsize=(default_figsize[0], 3/4*default_figsize[1]), constrained_layout=True)
plt.stem(np.arange(J_sec)+1, pw_secs_out)
plt.xlabel("Sec ID")
plt.grid()
plt.title("Sector Output")
plt.savefig('sector_output.pdf')
###Output
_____no_output_____
###Markdown
Defining resynthesis matrix B
###Code
# Resynth
c_n_re = np.ones(N_sph_sec+1)
if ENREC:
c_n_re = (1/(c_n/c_n[0]))
else:
c_n_re = np.ones(N_sph_sec+1)
Y_resynth = spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, shtype)
B_resynth = spa.sph.repeat_per_order(c_n_re)*Y_resynth
resynth_nm = (B_resynth.conj().T @ pw_secs_out).T
resynth_nm = beta_a * resynth_nm
spa.plots.sh_coeffs(np.c_[resynth_nm, np.zeros([1, delta_numSH])],
title=f"Output")
# little plot helper with some syntactic sugar
def plot_mats(*Ms, titles=None):
assert(len(Ms) > 1)
L = len(Ms)
fig, axs = plt.subplots(1, L, constrained_layout=True)
for ax, m_it in zip(axs, Ms):
p = ax.matshow(m_it)
cbar = axs[0].figure.colorbar(p, ax=ax, shrink=1/(L))
if titles is not None:
for it in range(L):
axs[it].set_title(titles[it])
M1 = (spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real')).T @ \
(spa.sph.repeat_per_order(c_n) * spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real'))
M2 = (spa.sph.repeat_per_order(1/(c_n)) * spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real')).T @ \
(spa.sph.repeat_per_order(c_n) * spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real'))
M3 = (spa.sph.repeat_per_order((4*np.pi/J_sec)/(c_n)) * spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real')).T @ \
(spa.sph.repeat_per_order(c_n) * spa.sph.sh_matrix(N_sph_sec, sec_azi, sec_colat, 'real'))
plot_mats(M1, M2, M3, titles=["Encoder", "1/Enc", "Restored"])
# Isotropic noise input
# Evaluation
from numpy.random import default_rng
rng = default_rng()
fs = 48000
t_sig = 3
in_nm_diff = 1 * rng.standard_normal((t_sig*fs, (N_sph+1)**2))
in_nm_pw1 = 1 * rng.standard_normal((t_sig*fs, 1)) * spa.sph.sh_matrix(N_sph, sec_azi[0], sec_colat[0], shtype).conj()
in_nm_pw2 = 1 * rng.standard_normal((t_sig*fs, 1)) * spa.sph.sh_matrix(N_sph, sec_azi[2], sec_colat[2], shtype).conj()
in_nm = in_nm_diff # + in_nm_pw1 + in_nm_pw2
#in_nm = np.ones([1, (N_sph+1)**2])
def rms(X, axis=0):
return np.sqrt(np.mean(np.square(np.abs(X)), axis=axis))
in_sec = (A_sec @ in_nm.T).T # discrete domain
plt.figure()
plt.stem(rms(in_nm))
plt.figure()
plt.stem(rms(in_sec))
def plot_sph_rms(F_nm):
assert(F_nm.ndim == 2)
grid_azi, grid_zen, grid_r = spa.utils.cart2sph(*spa.grids.load_n_design(40).T)
# Looking with hyper-cardioids
s_grid = F_nm @ ((4*np.pi)/((N_sph+1)**2) * spa.sph.sh_matrix(N_sph, grid_azi, grid_zen, shtype)).T
s_rms = rms(s_grid, axis=0) # why is this sqrt(4pi/nSH)?!
print("RMS Mean: ", np.round(s_rms.mean(), 3))
fig = plt.figure()
spa.plots.spherical_function(s_rms, grid_azi, grid_zen, fig=fig)
return s_rms.mean()
rms_in = plot_sph_rms(in_nm)
plt.savefig('input_RMS.pdf')
resynth_nm = (B_resynth.conj().T @ in_sec.T).T
resynth_nm = beta_a * resynth_nm
plot_sph_rms(resynth_nm)
spa.utils.test_diff(in_nm, resynth_nm, "Testing perfect reconstruction: ")
print('RMSE in dB : ', spa.utils.db(rms(resynth_nm.ravel()-in_nm.ravel())))
# Resynth based on sector rms and white noise
in_sec_rms = rms(in_sec)
in_sec_resynth = in_sec_rms * rng.standard_normal(in_sec.shape)
spa.utils.test_diff(in_sec_rms, rms(in_sec, axis=0))
# Correlation matrix
Ys = spa.sph.sh_matrix(N_sph, sec_azi, sec_colat, 'real')
R = Ys @ np.diag(spa.sph.repeat_per_order(c_n))@ Ys.T
fig, ax = plt.subplots(figsize=(0.75*default_figsize[0], 0.75*default_figsize[1]), constrained_layout=True)
p = ax.matshow(R)
cbar = ax.figure.colorbar(p, ax=ax)
plt.title('Spatial Correlation')
plt.savefig('correlation_matrix.pdf')
R = R / np.sum(R, axis=0)
plt.matshow(R)
plt.colorbar()
#resynth_nm = in_sec_resynth @ Y_resynth
resynth_nm = (B_resynth.conj().T @ (R @ in_sec_resynth.T)).T
resynth_nm = np.sqrt(beta_e) * resynth_nm
rms_out_en = plot_sph_rms(resynth_nm)
C_in = in_nm.conj().T @ in_nm / (fs * t_sig)
C_out = resynth_nm.conj().T @ resynth_nm / (fs * t_sig)
U, s, Vt = np.linalg.svd(C_out @ np.linalg.inv(C_in))
spa.utils.test_diff(1, np.mean(s), "Testing energy Reconstruction: ")
print("RMSE: ", rms((C_out @ np.linalg.inv(C_in)).ravel() - np.eye(*C_out.shape).ravel()))
print("RMSE in dB: ", spa.utils.db(rms(C_out.ravel() - C_in.ravel())))
print("Mean RMSE in dB from signals: ", spa.utils.db(rms_in - rms_out_en))
plt.stem(s)
plt.matshow(np.abs(C_out @ np.linalg.inv(C_in)))
plt.colorbar()
B_pinv = np.linalg.pinv(A_sec.conj().T)
plt.plot(np.abs(B_pinv.ravel() - beta_a*B_resynth.ravel()))
###Output
_____no_output_____ |
Term-1/Project4/RoboND-NN-Lab/NN_lab.ipynb | ###Markdown
TensorFlow Neural Network Lab In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, notMNIST, consists of images of a letter from A to J in differents font.The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in! To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
###Code
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
###Output
All modules imported.
###Markdown
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
###Code
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with
size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
###Output
100%|██████████| 210001/210001 [00:27<00:00, 7559.13files/s]
100%|██████████| 10001/10001 [00:01<00:00, 7874.70files/s]
###Markdown
Problem 1The first problem involves normalizing the features for your training and test data.Implement Min-Max scaling in the `normalize()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.Min-Max Scaling:$X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}$*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
###Code
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
x_min = 0
x_max = 255
a = 0.1
b = 0.9
scale_data = a + ((image_data - x_min) * (b - a)) / (x_max - x_min)
return scale_data
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
###Output
Saving data to pickle file...
Data cached in pickle file.
###Markdown
CheckpointAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
###Code
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
###Output
Data and modules loaded.
###Markdown
Problem 2For the neural network to train on your data, you need the following float32 tensors: - `features` - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`) - `labels` - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`) - `weights` - Variable Tensor with random numbers from a truncated normal distribution. - See `tf.truncated_normal()` documentation for help. - `biases` - Variable Tensor with all zeros. - See `tf.zeros()` documentation for help.*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
###Code
features_count = 784
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), axis=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
###Output
Accuracy function created.
###Markdown
Problem 3Below are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.Parameter configurations:Configuration 1* **Epochs:** 1* **Batch Size:** * 2000 * 1000 * 500 * 300 * 50* **Learning Rate:** 0.01Configuration 2* **Epochs:** 1* **Batch Size:** 100* **Learning Rate:** * 0.8 * 0.5 * 0.1 * 0.05 * 0.01Configuration 3* **Epochs:** * 1 * 2 * 3 * 4 * 5* **Batch Size:** 100* **Learning Rate:** 0.2The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.*If you're having trouble solving problem 3, you can view the solution [here](https://github.com/udacity/CarND-TensorFlow-Lab/blob/master/solutions.ipynb).*
###Code
# TODO: Find the best parameters for each configuration
epochs = 5
batch_size = 100
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
###Output
Epoch 1/5: 100%|██████████| 1425/1425 [00:03<00:00, 434.13batches/s]
Epoch 2/5: 100%|██████████| 1425/1425 [00:03<00:00, 437.36batches/s]
Epoch 3/5: 100%|██████████| 1425/1425 [00:03<00:00, 439.04batches/s]
Epoch 4/5: 100%|██████████| 1425/1425 [00:03<00:00, 438.86batches/s]
Epoch 5/5: 100%|██████████| 1425/1425 [00:03<00:00, 427.84batches/s]
###Markdown
TestSet the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
###Code
# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 5
batch_size = 100
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
###Output
Epoch 1/5: 100%|██████████| 1425/1425 [00:00<00:00, 1950.08batches/s]
Epoch 2/5: 100%|██████████| 1425/1425 [00:00<00:00, 1996.31batches/s]
Epoch 3/5: 100%|██████████| 1425/1425 [00:00<00:00, 1980.80batches/s]
Epoch 4/5: 100%|██████████| 1425/1425 [00:00<00:00, 1991.67batches/s]
Epoch 5/5: 100%|██████████| 1425/1425 [00:00<00:00, 1999.94batches/s] |
code/qss20_groupcode/text_addendums/01_sampledescriptives (2).ipynb | ###Markdown
Load datasets
###Code
# load data
LOAD_LOCAL = False
if LOAD_LOCAL:
addendum = pd.read_csv("../FOIA_2021-F-05932_raw_data_combined_202021Q1.csv")
disclosure = pd.read_csv ("../H-2A_Disclosure_Data_FY_combined_202021Q1.csv")
else:
addendum = pd.read_csv(DROPBOX_DATA_PATH + COMBINED_ADDENDUMS_DATA)
disclosure = pd.read_csv (DROPBOX_DATA_PATH + COMBINED_DISCLOSURE_DATA)
###Output
/Users/rebeccajohnson/opt/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3071: DtypeWarning: Columns (17,18,31,83,84,138,139) have mixed types.Specify dtype option on import or set low_memory=False.
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
###Markdown
Addendum and Disclosure Data Drop Duplicate
###Code
print("Addendum dropping duplicates, %s rows" % len(addendum))
addendum = addendum.drop_duplicates(["CASE_NUMBER", "SECTION_DETAILS"])
print("Addendum After dropping duplicates, %s rows" % len(addendum))
print("Disclosure Before dropping duplicates, %s rows" % len(disclosure))
disclosure = disclosure.drop_duplicates(["CASE_NUMBER"])
print("Disclosure After dropping duplicates, %s rows" % len(disclosure))
###Output
Disclosure Before dropping duplicates, 16783 rows
Disclosure After dropping duplicates, 16774 rows
###Markdown
Pre-Aggregate addendum data overview
###Code
### Table
addendum_mini=addendum.copy()
addendum_mini=addendum_mini.loc[~addendum_mini.SECTION_NUMBER.isnull()&~addendum_mini.SECTION_NAME.isnull()]
#Find the top 5 section
addendum_mini.SECTION_NUMBER.value_counts().nlargest(5)
top5_section=["A.8a","B.6","F.2","F.1","A.11"]
addendum_mini["top5sect"] = np.where(addendum_mini.SECTION_NUMBER.isin(top5_section), True, False)
#Find An Example of Each
addendum_mini=addendum_mini[addendum_mini.top5sect==True]
addendum_mini=addendum_mini.drop_duplicates(subset='SECTION_NUMBER', keep="first")
pd.options.display.max_colwidth = 500
addendum_mini = addendum_mini[['SECTION_NAME','SECTION_NUMBER',"SECTION_DETAILS"]]
addendum_mini
addendum_mini.to_latex()
###Output
_____no_output_____
###Markdown
Aggregate addendums data- Rename Column- Joining free text- Removing ones with Spanish
###Code
# Rename Columns in aggregated addendums
addendum = addendum.rename(columns = {"SECTION_DETAILS":"JOB_DESCRIPTION"})
print("Range of text lengths are:-------------------------")
print(addendum.JOB_DESCRIPTION.astype(str).str.len().value_counts())
#Remove Job Listings that Contain Spanish Words
spanish_words = ['compensación',
'ocupación',
'pago',
'transporte',
'reglas',
'contrato',
'horastrabajadores',
'parte',
'tarifas',
'cuartos',
'trabajo',
'registros',
'adicionales',
'comidas',
'empleo',
'terminación',
'vivienda',
'empleador',
'adicionales',
'frecuencia',
'producción',
'liquidación',
'comida',
'herramientas',
'prueba',
'diaria',
'tarifas',
'parado',
'adicionales',
'cobertura',
'proporcionados',
'parte',
'abandono',
'deducciones',
'causa',
'garantías',
'divulgación',
'comidas',
'política',
'disciplina',
'despidos',
'derechos',
'imposibilidad',
'detalles',
'divulgación',
'bloqueo',
'abandono',
'ganancias',
'cobertura',
'pedidos',
'pago',
'oportunidad',
'trabajo']
addendum['is_contains_spanish'] = np.where(addendum.JOB_DESCRIPTION.str.contains('|'.join(spanish_words)),
True, False)
addendum_eng = addendum[~addendum.is_contains_spanish].copy()
print("Removing addendums with any spanish takes us from {} jobs to {} jobs".format(
len(addendum.CASE_NUMBER.unique()),
len(addendum_eng.CASE_NUMBER.unique())))
#Concatenating English Job Lisitngs from Same Case
pd.set_option('display.max_colwidth', None)
addendum_eng_agg = addendum_eng.groupby(['CASE_NUMBER'])['JOB_DESCRIPTION'].apply(lambda text:
' || '.join(text)).astype(str).str.replace('\\n', '', regex = True).reset_index()
print("After pasting together addendums from same case, %s rows" % len(addendum_eng_agg))
assert len(addendum_eng_agg) == len(addendum_eng.CASE_NUMBER.unique())
addendum_eng_agg.head(2)
print("After aggregation, addendum lengths are:------------")
print(addendum_eng_agg.JOB_DESCRIPTION.astype(str).str.len().value_counts())
addendum_eng_agg['raw_text_length'] = addendum_eng_agg.JOB_DESCRIPTION.astype(str).str.len()
## Above code modified from fifth answer at https://stackoverflow.com/questions/27298178/concatenate-strings-from-several-rows-using-pandas-groupby
## because original groupby was including a bunch of linebreaks
###Output
_____no_output_____
###Markdown
Merge the addendums data to disclosure data- Clean up state in the disclosure data- Create indicate for TRLA catchment area- Create diff merges- Merged data resulted from inner join overview
###Code
#Replace EMPLOYER_STATE NA with employer_poc_state State is there's a value but left NA if both of them are NA
disclosure['EMPLOYER_STATE'] =disclosure.EMPLOYER_STATE.astype("string")
print("number of NA in EMPLOYER_STATE column before allowing employer_poc_state to count as state")
disclosure['EMPLOYER_STATE'].isna().sum()
disclosure['EMPLOYER_POC_STATE'] =disclosure.EMPLOYER_POC_STATE.astype("string")
disclosure.EMPLOYER_STATE = disclosure.EMPLOYER_STATE.fillna(disclosure.EMPLOYER_POC_STATE)
print("number of NA in EMPLOYER_STATE column after allowing employer_poc_state to count as state")
disclosure['EMPLOYER_STATE'].isna().sum()
# Label TRLA Catchment State VS Remaining States
TRLA_catchment = ["TX", "MS", "AL", "TN", "KY", "LA"]
disclosure["TRLA"] = np.where(disclosure.EMPLOYER_STATE.isin(TRLA_catchment), True, False)
disclosure.TRLA.value_counts(normalize = True)
###Output
_____no_output_____
###Markdown
Left Join
###Code
print("before merge disclosure")
print(len(disclosure))
print("before addendum")
print(len(addendum_eng_agg))
merged_data_left = pd.merge(disclosure,
addendum_eng_agg,
left_on = 'CASE_NUMBER',
right_on = 'CASE_NUMBER',
how = "left",
indicator = "case_merge_status")
merged_data_left.case_merge_status.value_counts()
print("after left merge ")
print(len(merged_data_left))
## merge including spanish
merged_data_left = pd.merge(disclosure,
addendum_eng_agg,
left_on = 'CASE_NUMBER',
right_on = 'CASE_NUMBER',
how = "left",
indicator = "case_merge_status")
merged_data_left.case_merge_status.value_counts(normalize = True)
###Output
before merge disclosure
16774
before addendum
13143
###Markdown
Inner Join
###Code
## same join as above but inner join rather than left join
print("before merge disclosure")
print(len(disclosure))
print("before addendum")
print(len(addendum_eng_agg))
merged_data_inner = pd.merge(disclosure,
addendum_eng_agg,
left_on = 'CASE_NUMBER',
right_on = 'CASE_NUMBER',
how = "inner")
print("after left merge ")
print(len(merged_data_inner))
# merged data basic stat
stat=merged_data_inner[["CASE_NUMBER", "JOB_DESCRIPTION","EMPLOYER_STATE", "EMPLOYER_POC_STATE"]].describe()[:2]
stat
stat.to_latex()
###Output
_____no_output_____
###Markdown
Diagnostics of ones with and without english addendums
###Code
merged_data_left[['ANTICIPATED_NUMBER_OF_HOURS', 'WAGE_OFFER', 'TOTAL_WORKERS_NEEDED']].dtypes
if merged_data_left.dtypes['WAGE_OFFER'] != "float64":
merged_data_left['WAGE_OFFER'] = merged_data_left['WAGE_OFFER'].str.replace('$', '')
merged_data_left['WAGE_OFFER'] = merged_data_left['WAGE_OFFER'].str.replace(',', '')
merged_data_left['WAGE_OFFER'] = merged_data_left['WAGE_OFFER'].astype(float)
merged_data_left[['ANTICIPATED_NUMBER_OF_HOURS', 'WAGE_OFFER', 'TOTAL_WORKERS_NEEDED']].dtypes
## add more informative column
merged_data_left['case_merge_status_descriptive'] = np.where(merged_data_left.case_merge_status ==
"left_only",
"No English addendum",
"English addendum")
merged_data_left_stats=merged_data_left.groupby('case_merge_status_descriptive').agg(
{
"WAGE_OFFER": ["median"],
"TOTAL_WORKERS_NEEDED": ["median"],
"ANTICIPATED_NUMBER_OF_HOURS": ["median"],
"PIECE_RATE_OFFER": ["median"],
"TOTAL_OCCUPANCY" : ["median"],
"LIFTING_AMOUNT" : ["median"],
"TRLA": np.mean}
)
merged_data_left_stats
merged_data_left_stats.to_latex()
###Output
_____no_output_____
###Markdown
Plot breakdown of addendums by TRLA or not Pie chart
###Code
merged_data_left.case_merge_status.value_counts()
no_add = [merged_data_left[(merged_data_left.case_merge_status == "left_only") &
(~merged_data_left.TRLA)].shape[0],
merged_data_left[(merged_data_left.case_merge_status == "left_only") &
(merged_data_left.TRLA)].shape[0]]
no_add
add = [merged_data_left[(merged_data_left.case_merge_status == "both") &
(~merged_data_left.TRLA)].shape[0],
merged_data_left[(merged_data_left.case_merge_status == "both") &
(merged_data_left.TRLA)].shape[0]]
add
labels = ['Non-TRLA', 'TRLA']
fig, (plt1,plt2) = plt.subplots(1,2,figsize=(10,10))
plt1.pie(no_add, labels=labels, autopct='%1.1f%%')
plt1.set_title("Jobs without English addendums\n(not in text analysis)")
plt2.set_title("Jobs with English addendums\n(in text analysis)")
plt2.pie(add, labels=labels, autopct='%1.1f%%')
## save to output dir
fig.savefig("../output/addendum_comparison.png")
###Output
_____no_output_____
###Markdown
Write outputs for next script- pkl and csv (in case linebreak) of inner join between (1) disclosure and (2) aggregated, English-only addendums
###Code
## final check on length stuff
merged_data_inner[["raw_text_length",
"JOB_DESCRIPTION"]].sort_values(by = "raw_text_length", ascending = False).head(2)
## write pkl and csv form to Dropbox folder
if LOAD_LOCAL:
merged_data_inner.to_csv("../merged_addendums_jobdisclosures.csv",
index = False)
merged_data_inner.to_pickle("../merged_addendums_jobdisclosures.pkl")
else:
merged_data_inner.to_csv(DROPBOX_DATA_PATH + "merged_addendums_jobdisclosures.csv",
index = False)
merged_data_inner.to_pickle(DROPBOX_DATA_PATH + "merged_addendums_jobdisclosures.pkl")
###Output
_____no_output_____ |
play/data_EDA_SQuAD_stats.ipynb | ###Markdown
Do some basic plots of article stats only Words per paragraph
###Code
# Words per paragraph
a = art[0]
pwords = [len(p['context'].split()) for p in a['paragraphs']]
figure(num=None, figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k')
plt.bar(range(len(pwords)), pwords, align='center', alpha=0.5)
plt.xlabel('Paragraph #')
plt.ylabel('# Words')
plt.show()
plt.hist(pwords, bins=30) # arguments are passed to np.histogram
plt.title("Hist of words per paragraph")
plt.ylabel('# Paragraphs')
plt.show()
###Output
_____no_output_____
###Markdown
Words per article
###Code
awords = []
for a in art:
pwords = [len(p['context'].split()) for p in a['paragraphs']]
awords.append(sum(pwords))
myvar = awords
varname = '# words'
# Plot bargraph
plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #')
# Plot histogram
plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')
###Output
_____no_output_____
###Markdown
Print words per article
###Code
awords = []
for a in art:
pwords = [len(p['context'].split()) for p in a['paragraphs']]
awords.append(sum(pwords))
for i,awords in enumerate(awords):
print("Article # " + str(i) + ": " + art[i]['title'] + ', ' + str(awords) + " words.")
###Output
Article # 0: Beyoncé, 9099 words.
Article # 1: Frédéric_Chopin, 9050 words.
Article # 2: Sino-Tibetan_relations_during_the_Ming_dynasty, 9591 words.
Article # 3: IPod, 5236 words.
Article # 4: The_Legend_of_Zelda:_Twilight_Princess, 3966 words.
Article # 5: Spectre_(2015_film), 5113 words.
Article # 6: 2008_Sichuan_earthquake, 7037 words.
Article # 7: New_York_City, 14934 words.
Article # 8: To_Kill_a_Mockingbird, 8908 words.
Article # 9: Solar_energy, 4938 words.
Article # 10: Kanye_West, 10883 words.
Article # 11: Buddhism, 11834 words.
Article # 12: American_Idol, 11605 words.
Article # 13: Dog, 6557 words.
Article # 14: 2008_Summer_Olympics_torch_relay, 9754 words.
Article # 15: Genome, 1771 words.
Article # 16: Comprehensive_school, 2247 words.
Article # 17: Republic_of_the_Congo, 2879 words.
Article # 18: Prime_minister, 2578 words.
Article # 19: Institute_of_technology, 5388 words.
Article # 20: Wayback_Machine, 1908 words.
Article # 21: Dutch_Republic, 1565 words.
Article # 22: Symbiosis, 2066 words.
Article # 23: Canadian_Armed_Forces, 4180 words.
Article # 24: Cardinal_(Catholicism), 4241 words.
Article # 25: Iranian_languages, 1830 words.
Article # 26: Lighting, 4682 words.
Article # 27: Separation_of_powers_under_the_United_States_Constitution, 3048 words.
Article # 28: Architecture, 2762 words.
Article # 29: Human_Development_Index, 2159 words.
Article # 30: Southern_Europe, 1663 words.
Article # 31: BBC_Television, 2344 words.
Article # 32: Arnold_Schwarzenegger, 8837 words.
Article # 33: Plymouth, 7591 words.
Article # 34: Heresy, 2530 words.
Article # 35: Warsaw_Pact, 2126 words.
Article # 36: Materialism, 2369 words.
Article # 37: Christian, 1970 words.
Article # 38: Sony_Music_Entertainment, 2232 words.
Article # 39: Oklahoma_City, 6709 words.
Article # 40: Hunter-gatherer, 2544 words.
Article # 41: United_Nations_Population_Fund, 1413 words.
Article # 42: Russian_Soviet_Federative_Socialist_Republic, 2299 words.
Article # 43: Alexander_Graham_Bell, 8675 words.
Article # 44: Pub, 7950 words.
Article # 45: Internet_service_provider, 1324 words.
Article # 46: Comics, 3538 words.
Article # 47: Saint_Helena, 5901 words.
Article # 48: Aspirated_consonant, 1643 words.
Article # 49: Hydrogen, 5007 words.
Article # 50: Space_Race, 8179 words.
Article # 51: Web_browser, 1601 words.
Article # 52: BeiDou_Navigation_Satellite_System, 2206 words.
Article # 53: Canon_law, 1677 words.
Article # 54: Communications_in_Somalia, 2236 words.
Article # 55: Catalan_language, 4363 words.
Article # 56: Boston, 8404 words.
Article # 57: Universal_Studios, 4323 words.
Article # 58: Estonian_language, 2059 words.
Article # 59: Paper, 2304 words.
Article # 60: Adult_contemporary_music, 4190 words.
Article # 61: Daylight_saving_time, 5943 words.
Article # 62: Royal_Institute_of_British_Architects, 2002 words.
Article # 63: National_Archives_and_Records_Administration, 1736 words.
Article # 64: Tristan_da_Cunha, 2588 words.
Article # 65: University_of_Kansas, 2640 words.
Article # 66: Nanjing, 6694 words.
Article # 67: Arena_Football_League, 5424 words.
Article # 68: Dialect, 4137 words.
Article # 69: Bern, 3565 words.
Article # 70: Westminster_Abbey, 3717 words.
Article # 71: Political_corruption, 4698 words.
Article # 72: Classical_music, 6698 words.
Article # 73: Slavs, 4231 words.
Article # 74: Southampton, 7579 words.
Article # 75: Treaty, 4266 words.
Article # 76: Josip_Broz_Tito, 7671 words.
Article # 77: Marshall_Islands, 4027 words.
Article # 78: Szlachta, 5458 words.
Article # 79: Virgil, 3309 words.
Article # 80: Alps, 8455 words.
Article # 81: Gene, 5413 words.
Article # 82: Guinea-Bissau, 3008 words.
Article # 83: List_of_numbered_streets_in_Manhattan, 3009 words.
Article # 84: Brain, 7318 words.
Article # 85: Near_East, 5751 words.
Article # 86: Zhejiang, 4002 words.
Article # 87: Ministry_of_Defence_(United_Kingdom), 2074 words.
Article # 88: High-definition_television, 3522 words.
Article # 89: Wood, 5455 words.
Article # 90: Somalis, 6107 words.
Article # 91: Middle_Ages, 14326 words.
Article # 92: Phonology, 1920 words.
Article # 93: Computer, 6718 words.
Article # 94: Black_people, 5914 words.
Article # 95: The_Times, 3632 words.
Article # 96: New_Delhi, 4415 words.
Article # 97: Bird_migration, 4445 words.
Article # 98: Atlantic_City,_New_Jersey, 4574 words.
Article # 99: Immunology, 1650 words.
Article # 100: MP3, 5054 words.
Article # 101: House_music, 4724 words.
Article # 102: Letter_case, 1398 words.
Article # 103: Chihuahua_(state), 10247 words.
Article # 104: Imamah_(Shia_doctrine), 1755 words.
Article # 105: Pitch_(music), 1282 words.
Article # 106: England_national_football_team, 1285 words.
Article # 107: Houston, 5909 words.
Article # 108: Copper, 3645 words.
Article # 109: Identity_(social_science), 3104 words.
Article # 110: Himachal_Pradesh, 2840 words.
Article # 111: Communication, 1596 words.
Article # 112: Grape, 1165 words.
Article # 113: Computer_security, 2737 words.
Article # 114: Orthodox_Judaism, 3198 words.
Article # 115: Animal, 1916 words.
Article # 116: Beer, 4247 words.
Article # 117: Race_and_ethnicity_in_the_United_States_Census, 1200 words.
Article # 118: United_States_dollar, 4568 words.
Article # 119: Imperial_College_London, 2787 words.
Article # 120: Hanover, 2693 words.
Article # 121: Emotion, 5829 words.
Article # 122: Everton_F.C., 2690 words.
Article # 123: Old_English, 2724 words.
Article # 124: Aircraft_carrier, 4401 words.
Article # 125: Federal_Aviation_Administration, 1490 words.
Article # 126: Lancashire, 2454 words.
Article # 127: Mesozoic, 1849 words.
Article # 128: Videoconferencing, 2153 words.
Article # 129: Gregorian_calendar, 2718 words.
Article # 130: Xbox_360, 3711 words.
Article # 131: Military_history_of_the_United_States, 5772 words.
Article # 132: Hard_rock, 5206 words.
Article # 133: Great_Plains, 1507 words.
Article # 134: Infrared, 2158 words.
Article # 135: Biodiversity, 3767 words.
Article # 136: ASCII, 3274 words.
Article # 137: Digestion, 2102 words.
Article # 138: Gymnastics, 3002 words.
Article # 139: FC_Barcelona, 6248 words.
Article # 140: Federal_Bureau_of_Investigation, 5926 words.
Article # 141: Mary_(mother_of_Jesus), 5025 words.
Article # 142: Melbourne, 8157 words.
Article # 143: John,_King_of_England, 12612 words.
Article # 144: Macintosh, 7588 words.
Article # 145: Anti-aircraft_warfare, 8170 words.
Article # 146: Sanskrit, 2349 words.
Article # 147: Valencia, 7233 words.
Article # 148: General_Electric, 1716 words.
Article # 149: United_States_Army, 4139 words.
Article # 150: Franco-Prussian_War, 8010 words.
Article # 151: Adolescence, 11952 words.
Article # 152: Antarctica, 5505 words.
Article # 153: Eritrea, 4395 words.
Article # 154: Uranium, 5067 words.
Article # 155: Order_of_the_British_Empire, 1408 words.
Article # 156: Circadian_rhythm, 2505 words.
Article # 157: Elizabeth_II, 5724 words.
Article # 158: Sexual_orientation, 6692 words.
Article # 159: Dell, 6122 words.
Article # 160: Capital_punishment_in_the_United_States, 5957 words.
Article # 161: Age_of_Enlightenment, 11174 words.
Article # 162: Nintendo_Entertainment_System, 6088 words.
Article # 163: Athanasius_of_Alexandria, 5126 words.
Article # 164: Seattle, 7125 words.
Article # 165: Memory, 4552 words.
Article # 166: Multiracial_American, 5284 words.
Article # 167: Ashkenazi_Jews, 6554 words.
Article # 168: Pharmaceutical_industry, 5510 words.
Article # 169: Umayyad_Caliphate, 5108 words.
Article # 170: Asphalt, 4704 words.
Article # 171: Queen_Victoria, 5891 words.
Article # 172: Freemasonry, 4284 words.
Article # 173: Israel, 13293 words.
Article # 174: Hellenistic_period, 13458 words.
Article # 175: Bill_%26_Melinda_Gates_Foundation, 2269 words.
Article # 176: Montevideo, 8331 words.
Article # 177: Poultry, 4244 words.
Article # 178: Dutch_language, 5805 words.
Article # 179: Buckingham_Palace, 3752 words.
Article # 180: Incandescent_light_bulb, 5283 words.
Article # 181: Arsenal_F.C., 4375 words.
Article # 182: Clothing, 2043 words.
Article # 183: Chicago_Cubs, 10084 words.
Article # 184: Korean_War, 9524 words.
Article # 185: Copyright_infringement, 4187 words.
Article # 186: Greece, 10796 words.
Article # 187: Royal_Dutch_Shell, 2786 words.
Article # 188: Mammal, 3092 words.
Article # 189: East_India_Company, 3285 words.
Article # 190: Hokkien, 2435 words.
Article # 191: Professional_wrestling, 7811 words.
Article # 192: Film_speed, 2843 words.
Article # 193: Mexico_City, 10126 words.
Article # 194: Napoleon, 12440 words.
Article # 195: Germans, 3783 words.
Article # 196: Southeast_Asia, 3096 words.
Article # 197: Brigham_Young_University, 5667 words.
Article # 198: Department_store, 5260 words.
Article # 199: Intellectual_property, 2555 words.
Article # 200: Florida, 3893 words.
Article # 201: Queen_(band), 8673 words.
Article # 202: Presbyterianism, 4879 words.
Article # 203: Thuringia, 3972 words.
Article # 204: Predation, 3722 words.
Article # 205: Marvel_Comics, 2703 words.
Article # 206: British_Empire, 8923 words.
Article # 207: Botany, 6768 words.
Article # 208: Madonna_(entertainer), 11754 words.
Article # 209: Law_of_the_United_States, 3260 words.
Article # 210: Myanmar, 7764 words.
Article # 211: Jews, 5125 words.
Article # 212: Cotton, 4155 words.
Article # 213: Data_compression, 2779 words.
Article # 214: The_Sun_(United_Kingdom), 6112 words.
Article # 215: Pesticide, 2899 words.
Article # 216: Somerset, 5419 words.
Article # 217: Yale_University, 6629 words.
Article # 218: Late_Middle_Ages, 4283 words.
Article # 219: Ann_Arbor,_Michigan, 4557 words.
Article # 220: Gothic_architecture, 4633 words.
Article # 221: Cubism, 5415 words.
Article # 222: Political_philosophy, 3029 words.
Article # 223: Alloy, 3383 words.
Article # 224: Norfolk_Island, 3486 words.
Article # 225: Edmund_Burke, 6063 words.
Article # 226: Samoa, 2514 words.
Article # 227: Pope_Paul_VI, 7248 words.
Article # 228: Electric_motor, 5380 words.
Article # 229: Switzerland, 8888 words.
Article # 230: Mali, 2218 words.
Article # 231: Raleigh,_North_Carolina, 3693 words.
Article # 232: Nutrition, 7478 words.
Article # 233: Crimean_War, 8037 words.
Article # 234: Nonprofit_organization, 2926 words.
Article # 235: Literature, 2792 words.
Article # 236: Avicenna, 4641 words.
Article # 237: Chinese_characters, 6799 words.
Article # 238: Bermuda, 5800 words.
Article # 239: Nigeria, 6126 words.
Article # 240: Utrecht, 3375 words.
Article # 241: Molotov%E2%80%93Ribbentrop_Pact, 5742 words.
Article # 242: Capacitor, 4452 words.
Article # 243: History_of_science, 8702 words.
Article # 244: Digimon, 3981 words.
Article # 245: Glacier, 2461 words.
Article # 246: Comcast, 3435 words.
Article # 247: Tuberculosis, 3617 words.
Article # 248: Affirmative_action_in_the_United_States, 7387 words.
Article # 249: FA_Cup, 4256 words.
Article # 250: New_Haven,_Connecticut, 8003 words.
Article # 251: Alsace, 4064 words.
Article # 252: Carnival, 6382 words.
Article # 253: Baptists, 2536 words.
Article # 254: Child_labour, 7488 words.
Article # 255: North_Carolina, 7415 words.
Article # 256: Heian_period, 3088 words.
Article # 257: On_the_Origin_of_Species, 8595 words.
Article # 258: Dissolution_of_the_Soviet_Union, 8093 words.
Article # 259: Crucifixion_of_Jesus, 3950 words.
Article # 260: Supreme_court, 2976 words.
Article # 261: Textual_criticism, 3804 words.
Article # 262: Gramophone_record, 11653 words.
Article # 263: Turner_Classic_Movies, 2583 words.
Article # 264: Hindu_philosophy, 2721 words.
Article # 265: Political_party, 3740 words.
Article # 266: A_cappella, 3306 words.
Article # 267: Dominican_Order, 5301 words.
Article # 268: Eton_College, 5357 words.
Article # 269: Cork_(city), 3526 words.
Article # 270: Galicia_(Spain), 6521 words.
Article # 271: USB, 6794 words.
Article # 272: Sichuan, 2864 words.
Article # 273: Unicode, 3773 words.
Article # 274: Detroit, 8821 words.
Article # 275: London, 8738 words.
Article # 276: Culture, 3269 words.
Article # 277: Sahara, 3812 words.
Article # 278: Rule_of_law, 2696 words.
Article # 279: Tibet, 4274 words.
Article # 280: Exhibition_game, 2623 words.
Article # 281: Northwestern_University, 6297 words.
Article # 282: Strasbourg, 4772 words.
Article # 283: Oklahoma, 6200 words.
Article # 284: History_of_India, 11033 words.
Article # 285: Gamal_Abdel_Nasser, 11234 words.
Article # 286: Pope_John_XXIII, 3183 words.
Article # 287: Time, 3769 words.
Article # 288: European_Central_Bank, 3150 words.
Article # 289: St._John%27s,_Newfoundland_and_Labrador, 3734 words.
Article # 290: John_von_Neumann, 7633 words.
Article # 291: PlayStation_3, 6096 words.
Article # 292: Royal_assent, 5370 words.
Article # 293: Group_(mathematics), 3674 words.
Article # 294: Central_African_Republic, 2395 words.
Article # 295: Asthma, 2441 words.
Article # 296: LaserDisc, 6726 words.
Article # 297: George_VI, 3249 words.
Article # 298: Federalism, 4510 words.
Article # 299: Annelid, 4890 words.
Article # 300: God, 2945 words.
Article # 301: War_on_Terror, 3892 words.
Article # 302: Labour_Party_(UK), 4766 words.
Article # 303: Estonia, 7568 words.
Article # 304: Alaska, 4975 words.
Article # 305: Karl_Popper, 5919 words.
Article # 306: Mandolin, 5911 words.
Article # 307: Insect, 8059 words.
Article # 308: Race_(human_categorization), 8778 words.
Article # 309: Paris, 11495 words.
Article # 310: Apollo, 6369 words.
Article # 311: United_States_presidential_election,_2004, 3422 words.
Article # 312: Liberal_Party_of_Australia, 3486 words.
Article # 313: Samurai, 6496 words.
Article # 314: Software_testing, 3228 words.
Article # 315: States_of_Germany, 2948 words.
Article # 316: Glass, 3555 words.
Article # 317: Planck_constant, 2827 words.
Article # 318: Renewable_energy_commercialization, 4371 words.
Article # 319: Palermo, 3142 words.
Article # 320: Green, 3457 words.
Article # 321: Zinc, 4206 words.
Article # 322: Neoclassical_architecture, 2478 words.
Article # 323: Serbo-Croatian, 3117 words.
Article # 324: CBC_Television, 3136 words.
Article # 325: Appalachian_Mountains, 4616 words.
Article # 326: IBM, 2880 words.
Article # 327: Energy, 4200 words.
Article # 328: East_Prussia, 3168 words.
Article # 329: Ottoman_Empire, 8729 words.
Article # 330: Philosophy_of_space_and_time, 2921 words.
Article # 331: Neolithic, 2757 words.
Article # 332: Friedrich_Hayek, 5881 words.
Article # 333: Diarrhea, 2485 words.
Article # 334: Madrasa, 5610 words.
Article # 335: Miami, 5495 words.
Article # 336: Philadelphia, 7326 words.
Article # 337: John_Kerry, 6760 words.
Article # 338: Rajasthan, 3222 words.
Article # 339: Guam, 4043 words.
Article # 340: Empiricism, 3749 words.
Article # 341: Idealism, 3691 words.
Article # 342: Czech_language, 3047 words.
Article # 343: Education, 3901 words.
Article # 344: Tennessee, 5944 words.
Article # 345: Post-punk, 2756 words.
Article # 346: Canadian_football, 2929 words.
Article # 347: Seven_Years%27_War, 8022 words.
Article # 348: Richard_Feynman, 4027 words.
Article # 349: Muammar_Gaddafi, 10957 words.
Article # 350: Cyprus, 6882 words.
Article # 351: Steven_Spielberg, 6576 words.
Article # 352: Elevator, 6616 words.
Article # 353: Neptune, 4369 words.
Article # 354: Railway_electrification_system, 4927 words.
Article # 355: Spanish_language_in_the_United_States, 2336 words.
Article # 356: Charleston,_South_Carolina, 5352 words.
Article # 357: The_Blitz, 12329 words.
Article # 358: Endangered_Species_Act, 3316 words.
Article # 359: Vacuum, 3899 words.
Article # 360: Han_dynasty, 8082 words.
Article # 361: Quran, 6258 words.
Article # 362: Geography_of_the_United_States, 2375 words.
Article # 363: Compact_disc, 2966 words.
Article # 364: Transistor, 2489 words.
Article # 365: Modern_history, 10676 words.
Article # 366: 51st_state, 3989 words.
Article # 367: Antenna_(radio), 8980 words.
Article # 368: Flowering_plant, 3003 words.
Article # 369: Hyderabad, 7382 words.
Article # 370: Santa_Monica,_California, 2688 words.
Article # 371: Washington_University_in_St._Louis, 4187 words.
Article # 372: Central_Intelligence_Agency, 1786 words.
Article # 373: Pain, 4072 words.
Article # 374: Database, 3696 words.
Article # 375: Tucson,_Arizona, 6364 words.
Article # 376: Armenia, 6001 words.
Article # 377: Bacteria, 6347 words.
Article # 378: Printed_circuit_board, 3985 words.
Article # 379: Greeks, 5953 words.
Article # 380: Premier_League, 4958 words.
Article # 381: Roman_Republic, 10587 words.
Article # 382: Pacific_War, 10010 words.
Article # 383: San_Diego, 6360 words.
Article # 384: Muslim_world, 3092 words.
Article # 385: Iran, 6733 words.
Article # 386: British_Isles, 5288 words.
Article # 387: Association_football, 3652 words.
Article # 388: Georgian_architecture, 2559 words.
Article # 389: Liberia, 2325 words.
Article # 390: Alfred_North_Whitehead, 5578 words.
Article # 391: Antibiotics, 3385 words.
Article # 392: Windows_8, 6301 words.
Article # 393: Swaziland, 2591 words.
Article # 394: Translation, 2809 words.
Article # 395: Airport, 3113 words.
Article # 396: Kievan_Rus%27, 5717 words.
Article # 397: Super_Nintendo_Entertainment_System, 2768 words.
Article # 398: Sumer, 3867 words.
Article # 399: Tuvalu, 7052 words.
Article # 400: Immaculate_Conception, 3654 words.
Article # 401: Namibia, 4410 words.
Article # 402: Russian_language, 3500 words.
Article # 403: United_States_Air_Force, 6678 words.
Article # 404: Light-emitting_diode, 5301 words.
Article # 405: Great_power, 2593 words.
Article # 406: Bird, 8514 words.
Article # 407: Qing_dynasty, 12591 words.
Article # 408: Indigenous_peoples_of_the_Americas, 6336 words.
Article # 409: Red, 5927 words.
Article # 410: Egypt, 6665 words.
Article # 411: Mosaic, 7985 words.
Article # 412: University, 3729 words.
Article # 413: Religion_in_ancient_Rome, 11227 words.
Article # 414: YouTube, 3987 words.
Article # 415: Separation_of_church_and_state_in_the_United_States, 5035 words.
Article # 416: Protestantism, 8787 words.
Article # 417: Bras%C3%ADlia, 2606 words.
Article # 418: Economy_of_Greece, 3044 words.
Article # 419: Party_leaders_of_the_United_States_House_of_Representatives, 3988 words.
Article # 420: Armenians, 3126 words.
Article # 421: Jehovah%27s_Witnesses, 6207 words.
Article # 422: Dwight_D._Eisenhower, 10615 words.
Article # 423: The_Bronx, 6503 words.
Article # 424: Financial_crisis_of_2007%E2%80%9308, 9743 words.
Article # 425: Portugal, 11005 words.
Article # 426: Humanism, 4431 words.
Article # 427: Geological_history_of_Earth, 3430 words.
Article # 428: Police, 3897 words.
Article # 429: Genocide, 3396 words.
Article # 430: Saint_Barth%C3%A9lemy, 2679 words.
Article # 431: Tajikistan, 4112 words.
Article # 432: University_of_Notre_Dame, 8080 words.
Article # 433: Anthropology, 4705 words.
Article # 434: Montana, 5831 words.
Article # 435: Punjab,_Pakistan, 3160 words.
Article # 436: Richmond,_Virginia, 7542 words.
Article # 437: Infection, 4168 words.
Article # 438: Hunting, 4173 words.
Article # 439: Kathmandu, 7054 words.
Article # 440: Myocardial_infarction, 2831 words.
Article # 441: Matter, 3537 words.
Article # 442: Normans, 4024 words.
Article # 443: Computational_complexity_theory, 4493 words.
Article # 444: Southern_California, 2938 words.
Article # 445: Sky_(United_Kingdom), 2477 words.
Article # 446: Victoria_(Australia), 2599 words.
Article # 447: Huguenot, 5435 words.
Article # 448: Steam_engine, 5436 words.
Article # 449: Oxygen, 5001 words.
Article # 450: 1973_oil_crisis, 2795 words.
Article # 451: European_Union_law, 10642 words.
Article # 452: Amazon_rainforest, 2322 words.
Article # 453: Ctenophora, 4549 words.
Article # 454: Fresno,_California, 3551 words.
Article # 455: Packet_switching, 2964 words.
Article # 456: Black_Death, 3213 words.
Article # 457: Geology, 3101 words.
Article # 458: Pharmacy, 2995 words.
Article # 459: Civil_disobedience, 3656 words.
Article # 460: Construction, 2379 words.
Article # 461: Private_school, 3022 words.
Article # 462: Harvard_University, 3834 words.
Article # 463: Jacksonville,_Florida, 2770 words.
Article # 464: Economic_inequality, 5753 words.
Article # 465: University_of_Chicago, 4756 words.
Article # 466: Yuan_dynasty, 7228 words.
Article # 467: Immune_system, 6155 words.
Article # 468: Intergovernmental_Panel_on_Climate_Change, 2963 words.
Article # 469: Prime_number, 3902 words.
Article # 470: Rhine, 5073 words.
Article # 471: Scottish_Parliament, 4799 words.
Article # 472: Islamism, 4220 words.
Article # 473: Imperialism, 5473 words.
Article # 474: Warsaw, 6172 words.
Article # 475: French_and_Indian_War, 5780 words.
Article # 476: Force, 5924 words.
###Markdown
Sentences per article
###Code
# Number of sentences per article
from nltk.tokenize import sent_tokenize
asentences = []
for a in art:
psentences = [len(sent_tokenize(p['context'])) for p in a['paragraphs']]
asentences.append(sum(psentences))
myvar = asentences
varname = '# sentences'
# Plot bargraph
plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #')
# Plot histogram
plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')
# Total number of sentences in dataset
import statistics
Nsent_train = statistics.mean(myvar[0:Ntrain-1])* Ntrain
Nsent_dev = statistics.mean(myvar[Ntrain:]) *Ndev
print("Nsent_train={}, Nsent_dev={},Nsent_tot={}".format(Nsent_train,Nsent_dev,Nsent_train+Nsent_dev))
###Output
Nsent_train=94291.32879818595, Nsent_dev=6392.0,Nsent_tot=100683.32879818595
###Markdown
Questions per article
###Code
arts[0]['paragraphs'][0]['qas'][0]['answers'][0]
aquestions = []
for a in art:
pquestions = [len(p['qas']) for p in a['paragraphs']]
aquestions.append(sum(pquestions))
myvar = aquestions
varname = '# questions'
# Plot bargraph
plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #')
# Plot histogram
plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')
###Output
_____no_output_____
###Markdown
Answers per article
###Code
# import mpld3
# mpld3.enable_notebook()
aanswers = []
for a in art:
qanswers = [len(q['answers']) for p in a['paragraphs'] for q in p['qas']]
aanswers.append(sum(qanswers))
myvar = aanswers
varname = '# answers'
# Plot bargraph
plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #')
# Plot histogram
plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')
###Output
_____no_output_____
###Markdown
Words per answer
###Code
# import mpld3
# mpld3.enable_notebook()
art[0]['paragraphs'][0]['qas'][0]['answers'][0]
art[0]['paragraphs'][0]['qas'][0]['is_impossible']
from utils_NLP import extract_no_stopwords
a_wordsperanswer = []
for a in art:
awords = [len(extract_no_stopwords(ans['text'].strip().split())) for p in a['paragraphs'] for q in p['qas'] for ans in q['answers'] if not q['is_impossible']]
if len(awords) > 0:
a_wordsperanswer.append(sum(awords)/len(awords))
else:
a_wordsperanswer.append(0)
myvar = a_wordsperanswer
varname = '# words per answer'
# Plot bargraph
plotbar_train_dev(myvar,Ntrain,Ndev,varname,xlabel='Article #')
# Plot histogram
plothist_train_dev(myvar,Ntrain,Ndev,varname,ylabel='N Articles')
###Output
_____no_output_____
###Markdown
Investigate articles with zero answers From figure above, looks like there are some articles near the end that have zero associated answers. See what's going on there
###Code
# a = art[Ntrain-4]
# pquestions = [q['question'] for p in a['paragraphs'] for q in p['qas']]
# qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']]
# qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
# qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']]
# print(qisimpossible)
# a = art[Ntrain-3]
# pquestions = [q['question'] for p in a['paragraphs'] for q in p['qas']]
# qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']]
# qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
# qanswers = [a['text'] for p in a['paragraphs'] for q in p['qas'] for a in q['answers']]
# print(qisimpossible)
# a = art[Ntrain-2]
# qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
# print(qisimpossible)
# a = art[Ntrain-1]
# qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
# print(qisimpossible)
# a = art[Ntrain-0]
# qisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
# print(qisimpossible)
arts[0]['paragraphs'][0]['qas'][0]['is_impossible']
###Output
_____no_output_____
###Markdown
Fraction of questions that are impossible
###Code
# [q['is_impossible'] for a in art for p in a['paragraphs']]
# arts[0]['paragraphs'][0]['qas'][0]['is_impossible']
aquestions = []
aisimpossible = []
aimpquestionsratio = []
for a in art:
pquestions = [len(p['qas']) for p in a['paragraphs']]
pisimpossible = [q['is_impossible'] for p in a['paragraphs'] for q in p['qas']]
aquestions.append(sum(pquestions))
aisimpossible.append(sum(pisimpossible))
aimpquestionsratio.append(sum(pisimpossible)/sum(pquestions)*100)
figure(num=None, figsize=(15, 4),facecolor='w', edgecolor='k')
barlist = plt.bar(range(len(aimpquestionsratio)), aimpquestionsratio, align='center', alpha=0.5)
plt.xlabel('Article #')
plt.ylabel('# Questions')
for i in range(Ntrain,Ntrain+Ndev):
barlist[i].set_color('r')
plt.show()
# figure(num=None, figsize=(15, 4), dpi=80, facecolor='w', edgecolor='k')
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False,figsize=(15, 4));
ax1.hist(aimpquestionsratio[0:Ntrain-1], bins=30); # arguments are passed to np.histogram
ax1.set_title("Train data: Narticles=" + str(Ntrain));
ax1.set_ylabel('N Articles');
ax1.set_xlabel('# Words');
ax2.hist(aimpquestionsratio[Ntrain:], bins=30); # arguments are passed to np.histogram
ax2.set_title("Dev data: Narticles=" + str(Ndev));
ax2.set_xlabel('# Words');
###Output
_____no_output_____
###Markdown
Seems like a few articles near the end contain 100% unanswerable questions. Also, a large number of articles contain 100% answerable questions. The rest are a mix of about 50/50 Answers verbatim in text Run the analysis Test that blanks are properly indi
###Code
p = art2[0]['paragraphs'][0]
c = p['context']
cs = c.split()
bc = p['blank_classification']
for i in range(len(bc)):
if bc[i]:
print('Blank at word #' + str(i) + ' ' + cs[i])
print( p['context'])
print( p['context_blanked'])
print(context_split)
temp = 'ASDFASfdsf'
temp.lower()
a = ['lets','walk','the','dog']
b = ['dog']
b in a
a = 'lets walk the dog'
b = 'dog'
b in a
# Gather all questions together
answers = []
all_questions = [qa['question'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']]
all_answers = [a['text'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']]
answer_is_verbatim_in_context = [a['text'] in p['context'] for a in art for p in a['paragraphs'] for qa in p['qas'] for a in qa['answers']]
answers_per_article[for a in art for p in a['paragraphs'] for ]
print('Num answers: ' + str(len(all_answers)))
print('Num true bools: ' + str(sum(answer_is_verbatim_in_context)))
# Find all answers less than 3 words in duration. These are ideal candidates for fill in the blank questions
a=all_answers[0]
Nmax=3
answer_is_short = [len(a.split()) <= Nmax for a in all_answers]
# Count number of trues
print('Num answers: ' + str(len(all_answers)))
print('Num true bools: ' + str(sum(answer_is_short)))
print('Percent: ' + str(sum(answer_is_short)/len(all_answers)*100))
# Display all all answers and their validity
for q,a, context_bool in zip(all_questions,all_answers,answer_is_short):
print(f"{a}\t\t{context_bool}")
# Display all question-answer pairs and their validity
for q,a, context_bool in zip(all_questions,all_answers,answer_is_short):
print(f"{q}\t{a}\t{context_bool}")
unique_answers = set(all_answers)
print('All answers: ' + str(len(all_answers)))
print('Unique answers: ' + str(len(unique_answers)))
unique_answers
text = [p['context'] for a in art for p in a['paragraphs']]
text = ' '.join(text[:])
text
###Output
_____no_output_____
###Markdown
Analyze poorly performing NER
###Code
# This article on music, NER performs poorly; fails to capture key concepts
art = arts[105:107] # A few short articles
# Might need to use word2vec or other embedding as additional features
# List all answers associatd with a specific question
ind_art = Ntrain-0
Nq = len(art[ind_art]['paragraphs'][0]['qas'])
for i in range(Nq):
print(art[ind_art]['paragraphs'][0]['qas'][i]['answers'])
i=0
art[ind_art]['paragraphs'][0]['qas'][i]['answers'][0]
###Output
_____no_output_____ |
1-uIDS-quiz/ps1-TitanicSuvivorData.ipynb | ###Markdown
Assignment description Quiz 1: A Simple HeuristicIn this exercise, we will perform some *rudimentary* practices similar to those ofan actual data scientist.Part of a data scientist's job is to use her or his `intuition` and `insight` towrite `algorithms` and `heuristics`. A data scientist also creates `mathematical models` to make *predictions* based on some `attributes` from the data that they are examining.We would like for you to take your knowledge and intuition about the Titanicand its passengers' attributes to **predict** whether or not the passengers survivedor perished. You can read more about the Titanic and specifics about this dataset at:http://en.wikipedia.org/wiki/RMS_Titanichttp://www.kaggle.com/c/titanic-gettingStartedIn this exercise and the following ones, you are given a `list of Titantic passengers`and their associated information. More information about the data can be seen at the link below:http://www.kaggle.com/c/titanic-gettingStarted/data. For this exercise, you need to write a simple `heuristic` that will usethe `passengers' gender` to predict if that person survived the Titanic disaster.You prediction should be **78% accurate or higher**.Here's a simple heuristic to start off:- 1) If the passenger is female, your heuristic should assume that the passenger survived.- 2) If the passenger is male, you heuristic should assume that the passenger did not survive.You can access the gender of a passenger via `passenger['Sex']`.- If the passenger is *male*, `passenger['Sex']` will return a string **"male"**.- If the passenger is *female*, `passenger['Sex']` will return a string **"female"**.Write your prediction back into the "predictions" dictionary. - The key of the dictionary should be the passenger's id (which can be accessed via passenger["PassengerId"]) and the associated value should be "1" if the passenger survied or "0" otherwise.- For example, - if a passenger is predicted to have survived: - passenger_id = passenger['PassengerId'] - predictions[passenger_id] = 1 - And if a passenger is predicted to have perished in the disaster: - passenger_id = passenger['PassengerId'] - predictions[passenger_id] = 0You can also look at the Titantic data that you will be working withat the link below:https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/titanic_data.csv
###Code
import numpy
import pandas
from pandas.core import datetools
#import statsmodels.api as sm
def simple_heuristic(file_path):
'''Read the assignment description above.
'''
# Create a dictionary to store all the value
predictions = {}
# Read from csv file
df = pandas.read_csv(file_path)
# Using for loop to read through the list:
# read the passenger's id using pandas's function interrows() - Iterate over DataFrame rows as (index, Series) pairs.
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
# Your code here:
# For example, let's assume that if the passenger
# is a male, then the passenger survived.
# if passenger['Sex'] == 'male':
# predictions[passenger_id] = 1
if passenger['Sex'] == 'male':
predictions[passenger_id] = 0
elif passenger['Sex'] == 'female':
predictions[passenger_id] = 1
return predictions
simple_heuristic('l2-ps1-data-titanic.csv')
###Output
_____no_output_____
###Markdown
Quiz 2 - A More Complex HeuristicYou are given a list of Titantic passengers and their associatedinformation. More information about the data can be seen at the link below:http://www.kaggle.com/c/titanic-gettingStarted/dataFor this exercise, you need to write a *more* sophisticated algorithmthat will use the passengers' gender and their socioeconomical class and age to predict if they survived the Titanic diaster. You prediction should be 79% accurate or higher.Here's the algorithm:> predict the passenger **survived** if:> - 1) If the passenger is `female` or> - 2) if his/her socioeconomic status is `high` AND if the passenger is `under 18`> Otherwise, your algorithm should predict that the passenger perished in the disaster.Or more specifically in terms of coding:> female or (high status and under 18)You can access the gender of a passenger via passenger['Sex'].- If the passenger is male, **passenger['Sex']** will return a string **"male"**.- If the passenger is female, **passenger['Sex']** will return a string **"female"**.You can access the **socioeconomic status** of a passenger via **passenger['Pclass']**:- High socioeconomic status -- passenger['Pclass'] is `1`- Medium socioeconomic status -- passenger['Pclass'] is `2`- Low socioeconomic status -- passenger['Pclass'] is `3`You can access the **age** of a passenger via **passenger['Age']**.Write your prediction back into the "predictions" dictionary.The key of the dictionary should be the Passenger's id (which can be accessedvia passenger["PassengerId"]) and the associated value should be 1 if thepassenger survived or 0 otherwise. - For example, - if a passenger is predicted to have survived: - `passenger_id = passenger['PassengerId']` - `predictions[passenger_id] = 1` - And if a passenger is predicted to have perished in the disaster: - `passenger_id = passenger['PassengerId']` - `predictions[passenger_id] = 0`You can also look at the Titantic data that you will be working withat the link below:https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/titanic_data.csv
###Code
import numpy
import pandas
import statsmodels.api as sm
def complex_heuristic(file_path):
'''
Read the quiz 2 description above
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
#
# your code here
# for example, assuming that passengers who are male
# and older than 18 surived:
# if passenger['Sex'] == 'male' or passenger['Age'] < 18:
# predictions[passenger_id] = 1
#
predictions[passenger_id] = 0
if passenger['Sex'] == 'female' or passenger['Age'] < 18 and passenger['Pclass'] == 1:
predictions[passenger_id] = 1
return predictions
###Output
_____no_output_____
###Markdown
Quiz 3 - Your Custom HeuristicYou are given a list of Titantic passengers and their associatedinformation. More information about the data can be seen at the link below:http://www.kaggle.com/c/titanic-gettingStarted/dataFor this exercise, you need to write a *custom* heuristic that will takein ***some combination*** of the passenger's **attributes** and predict if the passengersurvived the Titanic diaster.> Can your custom heuristic beat 80% accuracy?The available **attributes** are:- `Pclass` Passenger Class - (1 = 1st; 2 = 2nd; 3 = 3rd)- `Name` Name- `Sex` Sex- `Age` Age- `SibSp` Number of Siblings/Spouses Aboard- `Parch` Number of Parents/Children Aboard- `Ticket` Ticket Number- `Fare` Passenger Fare- `Cabin` Cabin- `Embarked` Port of Embarkation - (`C` = Cherbourg; `Q` = Queenstown; `S` = Southampton)**SPECIAL NOTES**:- Pclass is a proxy for socioeconomic status (SES) - 1st ~ Upper; - 2nd ~ Middle; - 3rd ~ Lower- Age is in years; fractional if age less than one - If the age is estimated, it is in the form xx.5- With respect to the family relation variables (i.e. SibSp and Parch) some relations were ignored. The following are the definitions used for SibSp and Parch. - Sibling: brother, sister, stepbrother, or stepsister of passenger aboard Titanic - Spouse: husband or wife of passenger aboard Titanic (mistresses and fiancees ignored) - Parent: mother or father of passenger aboard Titanic - Child: son, daughter, stepson, or stepdaughter of passenger aboard Titanic- Write your prediction back into the `"predictions"` dictionary. - The key of the dictionary should be the passenger's id (which can be accessed via passenger["PassengerId"]) and - the associating value should be `1` if the passenger survvied or `0` otherwise. - For example, - if a passenger is predicted to have survived: - passenger_id = passenger['PassengerId'] - predictions[passenger_id] = 1 - And if a passenger is predicted to have perished in the disaster: - passenger_id = passenger['PassengerId'] - predictions[passenger_id] = 0You can also look at the Titantic data that you will be working with at the link below:https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/titanic_data.csv
###Code
import numpy
import pandas
import statsmodels.api as sm
def custom_heuristic(file_path):
'''
Read the quiz 3 description above.
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
#
# your code here
#
passenger_id = passenger['passengerId']
predictions[passenger_id] = 0
# assume that all women and children not in 3rd class survived
if (passenger['Sex']=='female' or passenger['Age'] < 15) and passenger['Pclass'] != 3:
predictions[passenger_id] = 1
return predictions
###Output
_____no_output_____ |
Examples/Text-Explanations.ipynb | ###Markdown
Setup
###Code
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pickle
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import to_categorical
import sys
sys.path.append('../')
from exmatchina import *
num_classes = 2
class_names = ['negative', 'positive']
class_dict = {
'negative': 0,
'positive': 1,
}
inv_class_dict = {v: k for k, v in class_dict.items()}
## These are the randomly generated indices
all_idx = np.array([ 9528, 11977, 17734, 18431, 19988])
raw_images_train = pd.read_pickle('data/text/X_train_df')
raw_images_test = pd.read_pickle('data/text/X_test_df')
with open('data/text/tokenizer.pickle', 'rb') as f:
tk = pickle.load(f)
word_index = tk.word_index
print('[INFO] Number of unique tokens found (in train data):', len(word_index))
id_to_word = {value:key for key,value in word_index.items()}
VOCAB_SIZE = 20000
MAX_SEQ_LEN = 40
EMB_DIM = 100
# Returns the raw text of the review
def get_review(x):
return ' '.join(id_to_word[id] for id in x if id != 0)
def get_train_review(idx):
return (raw_images_train['Texts'][idx]).replace("&","&").replace('"','"').replace('<','<').replace('_','@')
def get_test_review(idx):
return raw_images_test['Texts'][idx].replace("&","&").replace('"','"').replace('<','<').replace('_','@')
X_train = np.load('data/text/X_train.npy')
X_test = np.load('data/text/X_test.npy')
y_train = np.load('data/text/y_train.npy')
y_test = np.load('data/text/y_test.npy')
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
model = load_model('trained_models/text.hdf5')
model.summary()
###Output
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
Embedding (Embedding) (None, 40, 100) 2000000
_________________________________________________________________
Drop_1 (Dropout) (None, 40, 100) 0
_________________________________________________________________
Conv_1 (Conv1D) (None, 40, 512) 154112
_________________________________________________________________
Max_1 (MaxPooling1D) (None, 20, 512) 0
_________________________________________________________________
Drop_2 (Dropout) (None, 20, 512) 0
_________________________________________________________________
Conv_2 (Conv1D) (None, 20, 256) 393472
_________________________________________________________________
Drop_3 (Dropout) (None, 20, 256) 0
_________________________________________________________________
Conv_3 (Conv1D) (None, 20, 15) 11535
_________________________________________________________________
Flatten_1 (Flatten) (None, 300) 0
_________________________________________________________________
Dense_2 (Dense) (None, 20) 6020
_________________________________________________________________
Output (Dense) (None, 1) 21
=================================================================
Total params: 2,565,160
Trainable params: 2,565,160
Non-trainable params: 0
_________________________________________________________________
###Markdown
ExMatchina
###Code
selected_layer = 'Flatten_1'
exm = ExMatchina(model=model, layer=selected_layer, examples=X_train)
for test_idx in all_idx:
test_input = X_test[test_idx]
to_explain = np.expand_dims(test_input, axis=0)
class_pred = np.rint(model.predict(to_explain)[0])
print(inv_class_dict[class_pred[0]])
(examples, indices) = exm.return_nearest_examples(test_input, 3)
review = get_test_review(test_idx)
# print("REVIEW RAW", review)
review = get_test_review(test_idx)
review_1 = get_train_review(indices[0])
review_2 = get_train_review(indices[1])
review_3 = get_train_review(indices[2])
print(test_idx, "REVIEW:", review)
print(test_idx, "Example 1:", review_1)
print(test_idx, "Example 2:", review_2)
print(test_idx, "Example 3:", review_3)
print("\n=====\n")
# draw_txt(review, "text-" + str(test_idx))
# draw_txt( "SIMILAR TWEET #1 (" + sentiment + "):\n" +
# review_1 + "\n\nSIMILAR TWEET #2 (" + sentiment + "):\n" +
# review_2 + "\n\nSIMILAR TWEET #3 (" + sentiment + "):\n" +
# review_3 , "text-"+ str(test_idx) +"-example")
###Output
negative
9528 REVIEW: I dont know.. they said it will arrived 1-2 days,, in fact malah suka lewat, even 4 days,,
9528 Example 1: Just missing each other by a week or so. Have fun while you're in Cancun!
9528 Example 2: Oh no Hope everything goes Ok. Will have lots of positive thoughts for you guys.
9528 Example 3: its not fair mourning for south east asian blockheads..thx 4 share it with us gal
=====
negative
11977 REVIEW: Those of you that know me, say a prayer for my Dad, his heart is broken...
11977 Example 1: so i went today to pick up that lap top... my friend talked me out of it i came home dissapointed
11977 Example 2: Yeah its totally crazy. I have friends that are firefighters for Grand Blanc and Mundy. I hear lots of stuff. Real sad.
11977 Example 3: Ok, so I got up on my one day off and my sister is apparently sick.... .....
=====
positive
17734 REVIEW: I love being accused of using an aimbot, it's just so flattering.
17734 Example 1: well good feedback means so much more,in my opinion. Congratulations
17734 Example 2: its fine i will tomorrow shall be a better day, thankyou.
17734 Example 3: Thats ironic cuz I was just watching that. It's really good
=====
negative
18431 REVIEW: you keep disappearing and it makes me a sad panda
18431 Example 1: the end of him and me. very sad ending.
18431 Example 2: Of to work, going to be a very sad day
18431 Example 3: yeah so its been half an hour and still no reply
=====
positive
19988 REVIEW: The road to success is dotted with so many parking places. Agree? Good Tuesday morning Twitter Universe!
19988 Example 1: Barack Obama's speech rocked! I've never seen him smile so much. He loves ASU.
19988 Example 2: Its so smooth i just wanna pet it - allison. Haha gotta love science class
19988 Example 3: She sounds strong-willed and determined. Can't she just become an engineer? Without the navy
=====
|
scratch/fields/likelihood-scratch.ipynb | ###Markdown
simulator
###Code
n_summaries = 2
n_s = 10000
n_d = 5000
λ = 100.0
ϵ = 0.1
θ_fid = np.array([1.0, 0.5], dtype=np.float32)
δθ = np.array([0.1, 0.1], dtype=np.float32)
n_params = 2
N = 128
dim = 2
L = 128
field_shape = (N,N)
dx = L / N
fourier_b = 2*np.pi
input_shape = (1,1, N,N)
simulator_args = {"N": N, "L": L, "dim": dim, "shape": field_shape, 'vol_norm': False, "N_scale": True, "squeeze": False}
# define noise
rng,fg_key = jax.random.split(rng)
foregrounds = jax.random.normal(fg_key, (1000, 1,) + simulator_args['shape'])*0
def simulator(rng, θ, simulator_args=simulator_args):
def P(k, A=1, B=1):
def fnk(k):
return jax.lax.cond(np.equal(k, 0.), lambda _: 0., lambda k: (A * k ** -B), operand=k)
if len(k.shape) == 1:
return jax.vmap(fnk)(k)
else:
return jax.vmap(partial(P, A=A, B=B))(k)
def fn(key, A, B):
shape = simulator_args["shape"]
k = np.sqrt(np.sum(np.array(np.meshgrid(*(
(np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) * 2*np.pi / _shape)**2
for _shape in shape))), axis=0))
new_shape = ()
for _shape in shape:
if _shape % 2 == 0:
new_shape += (_shape+1,)
else:
new_shape += (_shape,)
key1,key2 = jax.random.split(key)
foreground = foregrounds[jax.random.randint(key2,
minval=0, maxval=1000, shape=())]
# L is in length units, like Gpc
L = simulator_args['L']
dim = simulator_args['dim']
if np.isscalar(L):
L = [L]*int(dim)
else:
L = np.array(L)
V = np.prod(np.array(L))
scale = V**(1./dim)
Lk = ()
_N = 1
for i,_shape in enumerate(shape):
_N *= _shape
Lk += (_shape / L[i],) # 1 / dx
fft_norm = np.prod(np.array(Lk))
_dims = len(shape)
tpl = ()
for _d in range(_dims):
tpl += (_d,)
# POWERBOX IMPLEMENTATION
mag = jax.random.normal(key1, shape=tuple(N for N in new_shape))
# random phases
pha = 2 * np.pi * jax.random.uniform(key1, shape=tuple(N for N in new_shape))
# now make hermitian field (reality condition)
revidx = (slice(None, None, -1),) * len(mag.shape)
mag = (mag + mag[revidx]) / np.sqrt(2)
pha = (pha - pha[revidx]) / 2 + np.pi
dk = mag * (np.cos(pha) + 1j * np.sin(pha)) # output is complex
cutidx = (slice(None, -1),) * len(new_shape)
dk = dk[cutidx]
powers = np.concatenate((np.zeros(1),
np.sqrt(P(k.flatten()[1:], A=A, B=B)))).reshape(k.shape)
# normalize power by volume
if simulator_args['vol_norm']:
powers = powers/V
fourier_field = powers * dk
fourier_field = jax.ops.index_update(
fourier_field,
np.zeros(len(shape), dtype=int),
np.zeros((1,)))
field = np.expand_dims(np.fft.ifftn(fourier_field) * fft_norm * V, (0,))
if simulator_args["N_scale"]:
field *= scale
if not simulator_args["squeeze"]:
field = np.expand_dims(field, (0,))
return np.array(np.real((field)), dtype='float32')
shape = simulator_args["shape"]
A, B = θ
#k = np.sqrt(np.sum(np.array(np.meshgrid(*(((2. * np.pi) / N) * np.arange(N // 2) for N in shape)))**2., 0))
#return fn(key, A, B)
if A.shape == B.shape:
if len(A.shape) == 0:
return fn(rng, A, B)
else:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A, B: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, A, B)
else:
if len(A.shape) > 0:
keys = jax.random.split(rng, num=A.shape[0] + 1)
rng = keys[0]
keys = keys[1:]
return jax.vmap(
lambda key, A: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, A)
elif len(B.shape) > 0:
keys = jax.random.split(rng, num=B.shape[0])
return jax.vmap(
lambda key, B: simulator(key, (A, B), simulator_args=simulator_args)
)(keys, B)
###Output
_____no_output_____
###Markdown
sim and gradient
###Code
def simulator_gradient(rng, θ, simulator_args=simulator_args):
return value_and_jacrev(simulator, argnums=1, allow_int=True, holomorphic=True)(rng, θ, simulator_args=simulator_args)
# plot example simulation and derivative
deriv_args = {"N": N, "L": 128, "dim": dim, "shape": field_shape, "vol_norm": True, "N_scale": True, "squeeze": False}
simulation, simulation_gradient = value_and_jacfwd(simulator, argnums=1)(rng, θ_fid, simulator_args=deriv_args)
plt.imshow(np.squeeze(simulation[0]), extent=(0,1,0,1))
plt.colorbar()
plt.title('example simulation')
plt.show()
plt.imshow(np.squeeze(simulation_gradient[0].T[0].T), extent=(0,1,0,1))
plt.title('gradient of simulation')
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
analytic Fisher
###Code
import powerbox as pbox
A,B = θ_fid
#sim = np.squeeze(simulator(rng, θ_fid, simulator_args={"N": N, "squeeze":True, "L": L, "dim": 2}))
shape = simulator_args["shape"]
kmax = 0.5
kmin = 0.5 / N
#kbin = np.linspace(kmin, kmax, num=N**2)
kbin = np.sqrt(np.sum(np.array(np.meshgrid(*(((2. * np.pi) / N) * np.arange(N // 2) for N in shape)))**2., 0))
def fisher_approx(θ, kmin, kmax):
A,B = θ
Faa = (1/A)**2 * (np.log(kmax) - np.log(kmin))
Fab = (1/(2*A)) * ((np.log(kmax))**2 - (np.log(kmin))**2)
Fbb = ((np.log(kmax))**3 - (np.log(kmin))**3) / 3
return np.array([[Faa, Fab], [Fab, Fbb]]) * 2*np.pi
def fisher(θ, kvec, N=32, dim=2, L=None):
A,B = θ
if L is not None:
V = L**dim
else:
V = N**2 # physical box volume
#dk = kvec[1] - kvec[0]
Nk = N**dim # number of k modes
pk = lambda k : A*(k**-B) # P(k) = Ak^(-B)
p_a = lambda k : k**-B # deriv w.r.t. A
p_b = lambda k : -A*(k**-B)*np.log(k) # deriv w.r.t. B
powers = np.concatenate((np.ones(1),
(pk(kvec.flatten()[1:]))))
powera = np.concatenate((np.zeros(1),
(p_a(kvec.flatten()[1:]))))
powerb = np.concatenate((np.zeros(1),
(p_b(kvec.flatten()[1:]))))
Cinv = np.diag(2. / (powers)) # diagonal inv. covariance
Ca = np.diag(powera / 2.) # C_{,A}
Cb = np.diag(powerb / 2.) # C_{,B}
Faa = 0.5 * np.trace((Ca @ Cinv @ Ca @ Cinv))
Fab = 0.5 * np.trace((Ca @ Cinv @ Cb @ Cinv))
Fba = 0.5 * np.trace((Cb @ Cinv @ Ca @ Cinv))
Fbb = 0.5 * np.trace((Cb @ Cinv @ Cb @ Cinv))
return np.array([[Faa, Fab], [Fba, Fbb]]) #* 0.5 #* 1 / np.sqrt(2)
N = simulator_args["N"]
shape = simulator_args["shape"]
# kbin = np.sqrt(np.sum(
# np.array(np.meshgrid(*(np.arange(N // 2) * ((2. * np.pi) / (N))
# for N in shape)))**2., 0)).flatten()
# kbin = np.sqrt(np.sum(
# np.array(np.meshgrid(*(np.arange(N//2) * ((2. * np.pi) / (N))
# for N in shape)))**2., 0)).flatten()
kbin = np.sqrt(np.sum(np.array(np.meshgrid(*(
np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) *2* np.pi / _shape
for _shape in shape)))**2, axis=0))
print('k array extents: ', '[%f, %f]'%(np.min(kbin), np.max(kbin)))
print('k vector length: ', len(kbin))
f_expected = fisher(θ_fid, kbin[:N//2, :N//2])
print("analytic F(θ_fid): ", f_expected)
detf_expected = np.linalg.det(f_expected)
print("analytic det(F(θ_fid)): ", detf_expected)
# MAKE SIMULATION
N = simulator_args["N"]
shape = (N,N)
θ_sim = np.array([0.7, 0.8])
simulator_args = {"N": N, "L": 1, "dim": dim, "shape": shape, "vol_norm": True, "N_scale": False, "squeeze": True}
simulator_args["shape"] = (N,N)
simkey,rng = jax.random.split(rng)
#sim = np.squeeze(target_data)#
sim = np.squeeze(simulator(simkey, θ_sim, simulator_args=simulator_args))
sim_fft = (np.fft.fft2(sim)) / (N**2)
plt.imshow(np.real(sim_fft))
plt.colorbar()
shape = simulator_args["shape"]
kbin = np.sqrt(np.sum(np.array(np.meshgrid(*(
np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) * 2*np.pi / _shape
for _shape in shape)))**2, axis=0))
def Pk(k, A=1, B=0.5, N=32):
eps = 1./N
return (A * (k+eps) ** -B) #/ N**2
def Cov(k, A, B):
pk = Pk(k, A, B)
return np.diag(pk)
def get_likelihood(k, A, B, Δ):
Δ = Δ.flatten()
k = k
dlength = len(k.flatten())
def fn(_A, _B):
nrm = np.pad(np.ones(dlength-2)*2, (1,1), constant_values=1.)
nrm = jax.ops.index_update(
nrm, np.array([[0],[(dlength-2)]]), np.array([[1],[1]]))
#nrm = 1
powers = np.concatenate((np.ones(1),
(Pk(k.flatten()[1:], A=_A, B=_B))))
C = powers * nrm
invC = np.concatenate((np.zeros(1),
(1./Pk(k.flatten()[1:], A=_A, B=_B))))#*np.eye(len(k.flatten()))
logdetC = np.sum(np.log(C))
pi2 = np.pi * 2.
m_half_size = -0.5 * len(Δ) #- 0.5* Δ.shape[0] #// 2
exponent = - 0.5 * np.sum(np.conj(Δ) * invC * Δ) #np.einsum("i,ij,j->", np.conj(Δ), (invC), (Δ))
norm = -0.5 * logdetC + m_half_size*np.log(pi2)
return (exponent + norm)
return jax.vmap(fn)(A, B)
size = 20 # for likelihood gridding
Δ = sim_fft[:N//2, :N//2] #/ N**2
k = kbin[:N//2, :N//2]
A_start = 0.1
A_end = 2.0
B_start = 0.1
B_end = 2.0
# go by quadrant
qsize = 10
# 3 4
# 1 2
# bottom quadrant 1
A_range = np.linspace(0.1, 1, qsize)
B_range = np.linspace(0.1, 1, qsize)
domain_size = 1. / np.log(1e240) #((A_range[1]-A_range[0]) * (A_range[1]-B_range[0]))
print('domain_size', domain_size)
A, B = np.meshgrid(A_range, B_range)
likelihood_1 = np.exp(get_likelihood(k,
A.ravel(), B.ravel(), Δ).reshape(qsize,qsize) * domain_size)
# quadrant 2
A_range = np.linspace(1, 2, qsize)
#domain_size = ((A_range[1]-A_range[0]) * (A_range[1]-B_range[0]))
A, B = np.meshgrid(A_range, B_range)
likelihood_2 = np.exp(get_likelihood(k,
A.ravel(), B.ravel(), Δ).reshape(qsize,qsize) * domain_size)
# quadrant 3
A_range = np.linspace(0.1, 1, qsize)
B_range = np.linspace(1, 2, qsize)
#domain_size = ((A_range[1]-A_range[0]) * (A_range[1]-B_range[0]))
A, B = np.meshgrid(A_range, B_range)
likelihood_3 = np.exp(get_likelihood(k,
A.ravel(), B.ravel(), Δ).reshape(qsize,qsize) * domain_size)
# quadrant 4
A_range = np.linspace(1, 2, qsize)
#domain_size = ((A_range[1]-A_range[0]) * (A_range[1]-B_range[0]))
A, B = np.meshgrid(A_range, B_range)
likelihood_4 = np.exp(get_likelihood(k,
A.ravel(), B.ravel(), Δ).reshape(qsize,qsize) * domain_size)
# concatenate results
likelihood = np.concatenate(
[np.concatenate([likelihood_1, likelihood_2], axis=1),
np.concatenate([likelihood_3, likelihood_4], axis=1)],
axis=0)
A_range = np.linspace(0.1, 2, 20)
B_range = np.linspace(0.1, 2, 20)
A, B = np.meshgrid(A_range, B_range)
print(likelihood.shape)
plt.figure(figsize=(10,10))
plt.contourf(A_range, B_range, likelihood)
#plt.contourf(A_range2, B_range2, L2.reshape((size, size)))
plt.colorbar()
plt.scatter(θ_sim[0], θ_sim[1], zorder=10, marker='+', s=100, color='k')
plt.xlabel('A')
plt.ylabel('B')
# computing likelihood function
shape = simulator_args["shape"]
kbin = np.sqrt(np.sum(np.array(np.meshgrid(*(
np.hstack((np.arange(0, _shape//2 + 1),
np.arange(-_shape//2 + 1, 0))) * 2*np.pi / _shape
for _shape in shape)))**2, axis=0))
class analytic_likelihood():
def __init__(self, field_shape,
k,
Δ,
prior,
gridsize=20,
tiling=2,
):
"""code for computing a gaussian field's likelihood for power spectrum parameters
tiling : list or int. tiling=2 means likelihood will be computed as 2x2 grid
"""
self.field_shape = field_shape
self.gridsize = gridsize
if np.isscalar(tiling):
self.tiling = [tiling]*2
else:
self.tiling = tiling
#self.tilesize = gridsize // tiling
self.N = np.sqrt(np.prod(np.array(field_shape))) # should just be N for NxN grid
self.prior = prior
self.k = k
self.Δ = Δ
def Pk(self, k, A=1, B=0.5, N=32):
eps = 1./self.N # for num. stability
return (A * (k+eps) ** -B) #/ N**2
def Cov(self, k, A, B):
pk = self.Pk(k, A, B)
return np.diag(pk)
def log_likelihood(self, k, A, B, Δ):
Δ = Δ.flatten()
k = k
dlength = len(k.flatten())
def fn(_A, _B):
nrm = np.pad(np.ones(dlength-2)*2, (1,1), constant_values=1.)
nrm = jax.ops.index_update(
nrm, np.array([[0],[(dlength-2)]]), np.array([[1],[1]]))
#nrm = 1
powers = np.concatenate((np.ones(1),
(self.Pk(k.flatten()[1:], A=_A, B=_B))))
C = powers * nrm
invC = np.concatenate((np.zeros(1),
(1./self.Pk(k.flatten()[1:], A=_A, B=_B))))
logdetC = np.sum(np.log(C))
pi2 = np.pi * 2.
m_half_size = -0.5 * len(Δ) #- 0.5* Δ.shape[0] #// 2
exponent = - 0.5 * np.sum(np.conj(Δ) * invC * Δ) #np.einsum("i,ij,j->", np.conj(Δ), (invC), (Δ))
norm = -0.5 * logdetC + m_half_size*np.log(pi2)
return (exponent + norm) + np.log(3e200)
return jax.vmap(fn)(A, B)
def get_likelihood(self, norm=1./np.log(1e240), return_grid=False):
A_start = self.prior[0][0]
A_end = self.prior[1][0]
B_start = self.prior[0][1]
B_end = self.prior[1][1]
region_size = [self.gridsize // self.tiling[i] for i in range(len(self.tiling))]
print("computing likelihood on a %dx%d grid \n \
in tiles of size %dx%d"%(self.gridsize, self.gridsize, region_size[0], region_size[1]))
def get_like_region(A0, A1, B0, B1, qsize, norm):
A_range = np.linspace(A0, A1, qsize)
B_range = np.linspace(B0, B1, qsize)
A, B = np.meshgrid(A_range, B_range)
return np.exp(self.log_likelihood(k,
A.ravel(), B.ravel(), Δ).reshape(qsize,qsize) * norm)
A_incr = (A_end - A_start) / self.tiling[0]
B_incr = (B_end - B_start) / self.tiling[1]
# marks the ends of linspace
A_starts = [A_start + (i)*A_incr for i in range(self.tiling[0])]
A_ends = [A_start + (i+1)*A_incr for i in range(self.tiling[0])]
B_starts = [B_start + (i)*B_incr for i in range(self.tiling[1])]
B_ends = [B_start + (i+1)*B_incr for i in range(self.tiling[1])]
_like_cols = []
for _col in range(self.tiling[0]):
# slide horizontally in A
_like_row = []
for _row in range(self.tiling[1]):
# slide vertically in B
_like = get_like_region(A_starts[_row],
A_ends[_row],
B_starts[_col],
B_ends[_col],
region_size[0],
norm
)
_like_row.append(_like)
_like_cols.append(np.concatenate(_like_row, axis=1))
_likelihood = np.concatenate(_like_cols, axis=0)
if return_grid:
_A_range = np.linspace(self.prior[0,0], self.prior[1,0], self.gridsize)
_B_range = np.linspace(self.prior[0,0], self.prior[1,0], self.gridsize)
return _likelihood, _A_range, _B_range
return _likelihood
def plot_contours(self, ax=None, θ_ref=None, norm=1./np.log(1e240)):
_like, _A, _B = self.get_likelihood(norm=norm, return_grid=True)
_A, _B = np.meshgrid(_A, _B)
if ax is None:
fig,ax = plt.subplots(figsize=(10,10))
mesh = ax.contourf(_A, _B, _like)
plt.colorbar(mesh, ax=ax)
if θ_ref is not None:
ax.scatter(θ_sim[0], θ_sim[1], zorder=10, marker='+', s=100, color='r')
ax.set_xlabel('A')
ax.set_ylabel('B')
return ax
def plot_corner(self, ax=None, label="Analytic likelihood"):
_like, _A_range, _B_range = self.get_likelihood(return_grid=True)
likelihoodA = _like.sum(0)
likelihoodA /= likelihoodA.sum() * (_A_range[1] - _A_range[0])
likelihoodB = _like.sum(1)
likelihoodB /= likelihoodB.sum() * (_B_range[1] - _B_range[0])
sorted_marginal = np.sort(_like.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in [0.95, 0.68]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
# add in the likelihood estimate
ax[0, 0].plot(_A_range, likelihoodA, color="C2", label='Analytic likelihood')
ax[0, 1].axis("off")
ax[1, 0].contour(_A_range, _B_range, _like, levels=value, colors="C2")
ax[1, 1].plot(likelihoodB, _B_range, color="C2", label='loglike')
return ax
gridsize = 50 # for likelihood gridding
Δ = sim_fft[:N//2, :N//2] #/ N**2
k = kbin[:N//2, :N//2]
prior = np.array([[0.01, 0.01], [5., 3.5]])
AL = analytic_likelihood((128,128), k, Δ, prior, gridsize=gridsize, tiling=[2,2])
like = AL.get_likelihood(norm=1./np.log(1e2))
AL.plot_contours(θ_ref=θ_sim, norm=1./np.log(1e240))
like
likelihood = like # computed above
#A_range = np.linspace(0.1, 3.0, 25)
#B_range = np.linspace(0.1, 2.5, 25)
likelihoodA = likelihood.sum(0)
likelihoodA /= likelihoodA.sum() * (A_range[1] - A_range[0])
likelihoodB = likelihood.sum(1)
likelihoodB /= likelihoodB.sum() * (B_range[1] - B_range[0])
sorted_marginal = np.sort(likelihood.flatten())[::-1]
cdf = np.cumsum(sorted_marginal / sorted_marginal.sum())
value = []
for level in [0.95, 0.68]:
this_value = sorted_marginal[np.argmin(np.abs(cdf - level))]
if len(value) == 0:
value.append(this_value)
elif this_value <= value[-1]:
break
else:
value.append(this_value)
# add in the likelihood estimate
ax[0, 0].plot(A_range, likelihoodA, color="C2", label='Analytic likelihood')
ax[0, 1].axis("off")
ax[1, 0].contour(A_range, B_range, likelihood, levels=value, colors="C2")
ax[1, 1].plot(likelihoodB, B_range, color="C2", label='loglike')
like = AL.get_likelihood()
A_range = np.linspace(prior[0,0], prior[1,0], gridsize)
B_range = np.linspace(prior[0,1], prior[1,1], gridsize)
A, B = np.meshgrid(A_range, B_range)
fig,ax = plt.subplots(figsize=(10,10))
mesh = ax.contourf(A_range, B_range, like)
plt.colorbar(mesh, ax=ax)
ax.scatter(θ_sim[0], θ_sim[1], zorder=10, marker='+', s=100, color='k')
ax.set_xlabel('A')
ax.set_ylabel('B')
###Output
computing likelihood on a 50x50 grid
in tiles of size 25x25
|
Activation Functions/Sigmoid Function.ipynb | ###Markdown
A sigmoid function it is a mathematical function having a characteristic "S"-shaped curve or sigmoid curve.a sigmoid function is the logistic function.  A sigmoid function is a bounded, differentiable, real function that is defined for all real input values and has a non-negative derivative at each pointand exactly one inflection point.A sigmoid "function" and a sigmoid "curve" refer to the same object.PropertiesIn general, a sigmoid function is monotonic, and has a first derivative which is bell shaped.Conversely, the integral of any continuous, non-negative,bell-shaped function (with one local maximum and no local minimum, unless degenerate) will be sigmoidal. Thus the cumulative distribution functions for many common probability distributions are sigmoidal. One such example is the error function, which is related to the cumulative distribution function of a normal distribution.
###Code
def sigmoid(x):
return 1 / (1+ np.exp(-x))
###Output
_____no_output_____ |
MIO/TEST-INICIAL-FINAL-nb-forms.ipynb | ###Markdown
Test inicial curso Introducción a la programación con Python
###Code
#@markdown ### Qué experiencia o formación previa a Format:ea tienes?
respuesta1 = "(no rellenar)" #@param {type:"string"}
#@markdown ### Cuáles son tus objetivos y expectativas con el programa Format:ea?
respuesta2 = "" #@param {type:"string"}
#@markdown ### Qué es una máquina virtual?
respuesta3 = "" #@param {type:"string"}
#@markdown ### Qué es una base de datos?
respuesta4 = "" #@param {type:"string"}
#@markdown ### Qué lenguajes de programación puedes nombrar (máximo 5)?
respuesta5 = "" #@param {type:"string"}
#@markdown ### Qué es HTML? Es un lenguaje de programación?
respuesta6 = "" #@param {type:"string"}
#@markdown ### Qué es una variable?
respuesta7 = "" #@param {type:"string"}
#@markdown ### Qué es una función?
respuesta8 = "" #@param {type:"string"}
#@markdown ### Da un ejemplo de una llamada a una función
respuesta9 = "" #@param {type:"string"}
#@markdown ### Da un ejemplo de una expresión aritmética
respuesta10 = "" #@param {type:"string"}
#@markdown ### Qué es o para qué sirve un sistema operativo?
respuesta11 = "" #@param {type:"string"}
#@markdown ### Qué es un tipo flotante?
respuesta12 = "" #@param {type:"string"}
#@markdown ### Qué es un bucle?
respuesta13 = "" #@param {type:"string"}
#@markdown ### Qué es un algoritmo?
respuesta14 = "" #@param {type:"string"}
#@markdown ### Qué es un giga byte?
respuesta15 = "" #@param {type:"string"}
#@markdown ### Qué es un comentario?
respuesta16 = "" #@param {type:"string"}
#@markdown ### Cuál crees que es el resultado de esta expresión? 4 * 7 % 3 ** 2
respuesta17 = "" #@param {type:"string"}
#@markdown ### Qué significa "divide y vencerás" (en programación)?
respuesta18 = "" #@param {type:"string"}
#@markdown ### Qué es depurar?
respuesta19 = "" #@param {type:"string"}
#@markdown ### Qué son o para qué se usan los atributos? Y los métodos?
respuesta20 = "" #@param {type:"string"}
#@markdown ### Qué son o para qué se usan las excepciones?
respuesta21 = "" #@param {type:"string"}
#@markdown ### Qué es la recursión?
respuesta22 = "" #@param {type:"string"}
#@markdown ### Qué es una palabra clave?
respuesta23 = "" #@param {type:"string"}
#@markdown ### Qué es una estructura de datos?
respuesta24 = "" #@param {type:"string"}
#@markdown ### Quién es Guido van Rossum?
respuesta25 = "" #@param {type:"string"}
#@markdown ### Qué es o para qué se usa un diccionario (o pon un ejemplo)?
respuesta26 = "" #@param {type:"string"}
#@markdown ### Qué es la eficiencia o por qué es importante?
respuesta27 = "" #@param {type:"string"}
#@markdown ### Qué es la concurrencia?
respuesta28 = "" #@param {type:"string"}
###Output
_____no_output_____ |
electricity_report_investigation.ipynb | ###Markdown
Test cell
###Code
O_coord = np.array([0.8974, -1.285111, 1.375674])
H_coord = np.array([0.93366, -1.620249, 0.461291])
dist(O_coord,H_coord)
import math
def fibonacci_sphere(samples=1000):
x_p = []
y_p = []
z_p = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
x_p.append(x)
y_p.append(y)
z_p.append(z)
return x_p,y_p,z_p
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
def surf_sph(radius):
x_p,y_p,z_p = fibonacci_sphere(samples=6)
if radius <= 0.5:
mappedStart=0.5-radius
mappedEnd=radius+0.5
x_p_mapped = [0.5]
for i in x_p:
x_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
y_p_mapped = [0.5]
for i in y_p:
y_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
z_p_mapped = [0.5]
for i in z_p:
z_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
else:
print('Radius is too large')
return x_p_mapped,y_p_mapped,z_p_mapped
#test cell, can be run for infinite time
xa,ya,za = surf_sph(0.23)
thetar=np.random.rand(7)*np.pi*2
phir=np.random.rand(7)*np.pi*2
moltypes =[2,1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
#print(constrain01(rall,0.02))
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
###Output
_____no_output_____
###Markdown
Test interactive
###Code
#Debugging Part 2/3
rb = ra/2*3**0.5
c = 0.5
xa,ya,za = surf_sph(i)
thetar = [0,0,0,0,0,0]
phir = np.linspace(-np.pi,+np.pi,6)
moltypes =[1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
print('total E:', potl_energy_sum(rall,qall))
#print(rall,qall)
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
#Debugging Part 2/3
xa=[0.5,0.933,0.933,0.5,0.067,0.067] #x coordinates of molecule anchors
ya=[0,0.25,0.75,1,0.75,0.25] #y coordinates of molecule anchors
za=[0,0,0,0,0,0] #z coordinates of molecule anchors
thetar=[0,0,0,0,0,0] #no theta rotation for any of the molecules
#print(thetar)
phir=[3.141592653589793,4.1887902047863905,5.235987755982989,0.0,1.0471975511965976,2.0943951023931953]
moltypes =[1,1,1,1,1,1]
xyza = np.stack((np.array(xa), np.array(ya),np.array(za)), axis=-1)
#calculate the total E for this generated sys.
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
print('total E:', potl_energy_sum(rall,qall))
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
###Output
total E: -5.281986939532095e-18
###Markdown
Test energy only for two molecules Investigation 1D: total potential energy against distance between two charges
###Code
ditance_bw_charges = np.linspace(0,1,100)
total_E_two_charges = []
for i in ditance_bw_charges:
xa = [0,i]
ya = [0.3,0.3]
za = [0.3,0.3]
thetar = [0,0]
phir = [0,np.pi]
moltypes =[1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against distance',fontsize=14)
plt.plot(ditance_bw_charges, total_E_two_charges, marker='*')
plt.xlabel('distance(nm)')
plt.ylabel('total E (J)')
plt.show();
np.array([total_E_two_charges[:20]])*1e17
np.array([ditance_bw_charges[:20]])
0.13131313*1e-17
###Output
_____no_output_____
###Markdown
Investigation 1D: total potential energy against theta for one molecule
###Code
theta_for_one_mo = np.linspace(-np.pi,np.pi,100)
total_E_two_charges = []
for i in theta_for_one_mo:
xa = [0,0.6]
ya = [0.3,0.3]
za = [0.3,0.3]
thetar = [0,i]
phir = [0,0]
moltypes =[1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against theta',fontsize=14)
plt.plot(theta_for_one_mo, total_E_two_charges, marker='*')
plt.xlabel('theta(rad)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
Investigation 1D: total potential energy against phi for one molecule
###Code
phi_for_one_mo = np.linspace(-np.pi,np.pi,100)
total_E_two_charges = []
for i in phi_for_one_mo:
xa = [0,0.6]
ya = [0.3,0.3]
za = [0.3,0.3]
thetar = [0,0]
phir = [0,i]
moltypes =[1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against theta',fontsize=14)
plt.plot(phi_for_one_mo, total_E_two_charges, marker='*')
plt.xlabel('theta(rad)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
for three molecules system Investigation 1D: total potential energy against distance between two charges
###Code
ditance_bw_charges = np.linspace(0,1,100)
total_E_two_charges = []
for i in ditance_bw_charges:
xa = [0,i,0.6]
ya = [0.3,0.3,0.3]
za = [0.3,0.3,0.3]
thetar = [0,0,0]
phir = [0,0,0]
moltypes =[1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against distance',fontsize=14)
plt.plot(ditance_bw_charges, total_E_two_charges, marker='*')
plt.xlabel('distance(nm)')
plt.ylabel('total E (J)')
plt.show();
###Output
C:\Users\AlbertCielstian\Documents\file_Chemistry_BSc\yr2_MPC2_phy_proj\MPC2_phy_proj.py:38: RuntimeWarning: divide by zero encountered in double_scalars
V=K * (q1*e/dist(r0,r1))
###Markdown
Investigation 1D: total potential energy against theta for one molecule
###Code
''' rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against theta',fontsize=14)
plt.plot(theta_for_one_mo, total_E_two_charges, marker='*')
plt.xlabel('theta(rad)')
plt.ylabel('total E (J)')
plt.show();'''
###Output
_____no_output_____
###Markdown
Investigation 1D: total potential energy against phi for one molecule
###Code
phi_for_one_mo = np.linspace(-np.pi,np.pi,100)
total_E_two_charges = []
for i in phi_for_one_mo:
xa = [0,0.3,0.6]
ya = [0.3,0.3,0.3]
za = [0.3,0.6,0.3]
thetar = [0,0,0]
phir = [0,i,0]
moltypes =[1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against theta',fontsize=14)
plt.plot(phi_for_one_mo, total_E_two_charges, marker='*')
plt.xlabel('phi(rad)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
three degree of freedom of phi angles of three mo.
###Code
phi_list = np.linspace(-np.pi,np.pi,4)
phi01, phi02, phi03 = np.meshgrid(phi_list, phi_list,phi_list)
total_E_two_charges = []
for i in range(len(phi01)):
pinlistV1=[]
for j in range(len(phi01[i])):
pinlistV2=[]
for k in range(len(phi01[i][j])):
r0 = np.array([phi01[i][j][k],phi02[i][j][k],phi03[i][j][k]])
#Calcluate 1D potl for each in 3D
xa = [0,0.3,0.6]
ya = [0.3,0.3,0.3]
za = [0.3,0.3,0.3]
thetar = [0,0,0]
phir = [phi01[i][j][k],phi02[i][j][k],phi03[i][j][k]]
moltypes =[1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
pinlistV2.append(potl_E_sum)
pinlistV1.append(pinlistV2)
total_E_two_charges.append(pinlistV1)
np.max(total_E_two_charges)
###Output
_____no_output_____
###Markdown
for six molecules electrostatic energy against total distance
###Code
def total_dist(ri):
'''
calculate the total distance between each pair of atoms
'''
tot_value=0 #initialise the total potential energy
chargeadded=[] #list of indices of charges already added
for loop in range(len(ri)): #loop over each charge in turn
#....incomplete code below which you can use as a starting point
for j in chargeadded: #loop over charges already added (bringing charge loop towards charge j)
tot_value_ij=dist(ri[j],ri[loop])
#print('Adding PE %.2e [units] of bringing charge %d towards charge %d' % (Uij,loop,j))
tot_value=tot_value+tot_value_ij
chargeadded.append(loop) #add the index of the added charge to the list
return tot_value
def constrain01(xyz_coord,threshold):
chargeadded=[] #list of indices of charges already added
for loop in range(len(xyz_coord)): #loop over each charge in turn
for j in chargeadded: #loop over charges already added (bringing charge loop towards charge j)
tot_value_ij=dist(xyz_coord[j],xyz_coord[loop])
if tot_value_ij<threshold:
return True
chargeadded.append(loop)
xa=np.random.rand(2)
ya=np.random.rand(2)
za=np.random.rand(2)
xyza = np.stack((np.array(xa), np.array(ya),np.array(za)), axis=-1)
print(type(xyza))
testcoord = np.array([[0.08974, -0.1285111, 0.1375674],
[-0.1559218, -0.0241778, 0.14234739999999999],
[0.2090789, 0.09844520000000001, -0.0045693]])
print(constrain01(testcoord,0.266))
import math
def fibonacci_sphere(samples=1000):
x_p = []
y_p = []
z_p = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
x_p.append(x)
y_p.append(y)
z_p.append(z)
return x_p,y_p,z_p
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
'''mappedStart=0.25
mappedEnd=0.75
x_p_mapped = []
for i in x_p:
x_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
y_p_mapped = []
for i in y_p:
y_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
z_p_mapped = []
for i in z_p:
z_p_mapped.append(translate(i,-1,1,mappedStart,mappedEnd))
x_p,y_p,z_p = fibonacci_sphere(samples=6)
x_p_mapped,y_p_mapped,z_p_mapped'''
ditance_bw_all = []
total_E_all = []
datasets = []
for i in range(50):
xa=[0.5,
0.20505244876867207,
0.5428296831899158,
0.7980729498156808,
0.10611460587382854,
np.random.rand(1)[0]]
ya=[1.0, 0.8, 0.6, 0.4, 0.19999999999999996, np.random.rand(1)[0]]
za=[0.5,
0.7701961177046095,
0.011977850668792478,
0.8887833800308066,
0.4303272198482754,
np.random.rand(1)[0]]
thetar=np.random.rand(6)*np.pi*2
phir=np.random.rand(6)*np.pi*2
datasets.append([moltypes,xa,ya,za,phir,thetar])
np.random.rand(1)
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
while constrain01(rall,0.05):
xa=[0.5,
0.20505244876867207,
0.5428296831899158,
0.7980729498156808,
0.10611460587382854,
np.random.rand(1)[0]]
ya=[1.0, 0.8, 0.6, 0.4, 0.19999999999999996, np.random.rand(1)[0]]
za=[0.5,
0.7701961177046095,
0.011977850668792478,
0.8887833800308066,
0.4303272198482754,
np.random.rand(1)[0]]
thetar=np.random.rand(6)*np.pi*2
phir=np.random.rand(6)*np.pi*2
# thetar=np.random.rand(6)
# phir=np.random.rand(6)
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
ditance_bw_all.append(total_dist(rall))
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_all.append(potl_E_sum)
total_E_all = np.array(total_E_all)
datasets = np.array(datasets)
lowest_index = np.argmin(total_E_all)
rall,qall = allatomposns(datasets[lowest_index][0],datasets[lowest_index][1],datasets[lowest_index][2],\
datasets[lowest_index][3],datasets[lowest_index][4],datasets[lowest_index][5])
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
print('potl_E_sum:',potl_E_sum, 'J')
plt.figure(figsize=(8,5.5))
plt.title('total E against total distance between all pairs',fontsize=14)
plt.plot(ditance_bw_all, total_E_all, marker='.',linestyle = '')
plt.xlabel('total distance(nm)')
plt.ylabel('total E (J)')
plt.show();
#plot the lowest e atoms posi.
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1 = mol_plot(moltypes,datasets[lowest_index][1],datasets[lowest_index][2],\
datasets[lowest_index][3],datasets[lowest_index][4],datasets[lowest_index][5])
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
'''
# one of the arrangement Hexagon
rand_p=np.random.rand(1)
a=1/6
b=a*3**0.5
xa=[0,0,0,0,0,0]
ya=[a,2*a,4*a,5*a,4*a,np.random.rand(1)[0]]
za=[a+b,a+2*b,a+2*b,a+b,a,np.random.rand(1)[0]]
#thetar=np.random.rand(6)*np.pi*2
#phir=np.random.rand(6)*np.pi*2
thetar=[0,0,0,0,0,0]
phir=[0,0,0,0,0,0]
moltypes=[1,1,1,1,1,1] '''
'''#second arrangement: sphere surface
xa=[0.5,
0.20505244876867207,
0.5428296831899158,
0.7980729498156808,
0.10611460587382854,
np.random.rand(1)[0]]
ya=[1.0, 0.8, 0.6, 0.4, 0.19999999999999996, np.random.rand(1)[0]]
za=[0.5,
0.7701961177046095,
0.011977850668792478,
0.8887833800308066,
0.4303272198482754,
np.random.rand(1)[0]]
thetar=np.random.rand(6)*np.pi*2
phir=np.random.rand(6)*np.pi*2 '''
np.random.rand(6)*np.pi*2
raList = np.linspace(0.2,0.5,80)
total_E_two_charges = []
for ra in raList:
avg_e_L = []
for i in range(800):
rb = ra/2*3**0.5
c = 0.5
xa = [c,c+rb,c+rb,c,c-rb,c-rb]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [0,0,0,0,0,0]
thetar = np.random.rand(6)*np.pi*2
phir = np.linspace(-np.pi,+np.pi,6)
moltypes =[1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
avg_e_L.append(potl_E_sum)
total_E_two_charges.append(np.mean(avg_e_L))
plt.figure(figsize=(8,5.5))
plt.title('total E energy against hexagon radius',fontsize=14)
plt.plot(raList, total_E_two_charges, marker='*')
plt.xlabel('radius(nm)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
electrostatic energy against hexagon radius
###Code
raList = np.linspace(0.2,0.5,50)
total_E_two_charges = []
for ra in raList:
rb = ra/2*3**0.5
c = 0.5
xa = [c,c+rb,c+rb,c,c-rb,c-rb]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [0,0,0,0,0,0]
thetar = [0,0,0,0,0,0]
phir = np.linspace(-np.pi,+np.pi,6)
moltypes =[1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against hexagon radius',fontsize=14)
plt.plot(raList, total_E_two_charges, marker='*')
plt.xlabel('radius(nm)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
electrostatic energy and theta rotation
###Code
tota = np.linspace(-np.pi,+np.pi,6)
tota
raList = np.linspace(0.2,0.5,50)
total_E_two_charges = []
for ra in raList:
rb = ra/2*3**0.5
c = 0.5
xa = [0,0,0,0,0,0]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [c,c+rb,c+rb,c,c-rb,c-rb]
thetar = np.linspace(-np.pi,+np.pi,6)
phir = [0,0,0,0,0,0]
moltypes =[1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_two_charges.append(potl_E_sum)
plt.figure(figsize=(8,5.5))
plt.title('total E energy against hexagon radius',fontsize=14)
plt.plot(raList, total_E_two_charges, marker='*')
plt.xlabel('radius(nm)')
plt.ylabel('total E (J)')
plt.show();
###Output
_____no_output_____
###Markdown
real ice structure
###Code
H = 0.2
l = 0.276 #O-O length in nm
lV = l*np.cos(75.7/180*np.pi)
lH = l*np.sin(75.7/180*np.pi)
H = 0.2
l = 0.276 #O-O length in nm
lV = l*np.cos(75.7/180*np.pi)
lH = l*np.sin(75.7/180*np.pi)
xa = [H,H+lV,H,H+lV,H,H+lV]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [c,c+rb,c+rb,c,c-rb,c-rb]
thetar = [0,0,0,0,0,0]
phir = [np.pi/2,np.pi/2,np.pi/2,np.pi/2,np.pi/2,np.pi/2]
moltypes =[1,1,1,1,1,1]
#Debugging Part 2/3
H = 0.2
l = 0.276 #O-O length in nm
lV = l*np.cos(75.7/180*np.pi)
lH = l*np.sin(75.7/180*np.pi)
ra=lH
rb = ra/2*3**0.5
c = 0.5
xa = [H,H+lV,H,H+lV,H,H+lV]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [c,c+rb,c+rb,c,c-rb,c-rb]
thetar = [0,-52.25/180*np.pi,0,-52.25/180*np.pi,0,-52.25/180*np.pi]
phir = [-71/180*np.pi,-60/180*np.pi,-71/180*np.pi,-60/180*np.pi,-71/180*np.pi,-60/180*np.pi]
moltypes =[1,1,1,1,1,1]
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
print('total E:', potl_energy_sum(rall,qall))
print(rall,qall)
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
###Output
total E: -5.259811038390616e-27
[array([0.2 , 0.23255166, 0.5 ]), array([0.21909072, 0.17710818, 0.57573225]), array([0.21909072, 0.17710818, 0.42426775]), array([0.26817173, 0.36627583, 0.73161706]), array([0.31606173, 0.28332792, 0.73161706]), array([0.25618103, 0.38704433, 0.63888788]), array([0.2 , 0.63372417, 0.73161706]), array([0.21909072, 0.57828069, 0.80734931]), array([0.21909072, 0.57828069, 0.65588481]), array([0.26817173, 0.76744834, 0.5 ]), array([0.31606173, 0.68450043, 0.5 ]), array([0.25618103, 0.78821684, 0.40727082]), array([0.2 , 0.63372417, 0.26838294]), array([0.21909072, 0.57828069, 0.34411519]), array([0.21909072, 0.57828069, 0.19265069]), array([0.26817173, 0.36627583, 0.26838294]), array([0.31606173, 0.28332792, 0.26838294]), array([0.25618103, 0.38704433, 0.17565376])] [-0.6569052996153647, 0.32845264980768235, 0.32845264980768235, -0.6569052996153647, 0.32845264980768235, 0.32845264980768235, -0.6569052996153647, 0.32845264980768235, 0.32845264980768235, -0.6569052996153647, 0.32845264980768235, 0.32845264980768235, -0.6569052996153647, 0.32845264980768235, 0.32845264980768235, -0.6569052996153647, 0.32845264980768235, 0.32845264980768235]
###Markdown
random six
###Code
ditance_bw_all = []
total_E_all = []
datasets = []
for i in range(1000):
# generate a set of coord.
xa=np.random.rand(6)
ya=np.random.rand(6)
za=np.random.rand(6)
thetar=np.random.rand(6)*np.pi*2
phir=np.random.rand(6)*np.pi*2
xyza = np.stack((np.array(xa), np.array(ya),np.array(za)), axis=-1)
while constrain01(xyza,0.24): # test the genrated coord. set, if dist bw O-O < threshold
#inside the loop, generate another set
xa=np.random.rand(6)
ya=np.random.rand(6)
za=np.random.rand(6)
thetar=np.random.rand(6)*np.pi*2
phir=np.random.rand(6)*np.pi*2
xyza = np.stack((np.array(xa), np.array(ya),np.array(za)), axis=-1)
if constrain01(xyza,0.266):# test if the set is still:dist bw O-O < 0.266
continue# go to the beginning and re-genrate
else:# find the dist bw O-O >= 0.266
#calcuate the rall and qall
break# end the loop, this coord. set is generated successfully.
# then, use this generated coord, save the data
rall,qall = allatomposns(moltypes,xa,ya,za,phir,thetar)
datasets.append([moltypes,xa,ya,za,phir,thetar])# all the succeed coord. set is saved here
#calculate the total distance
ditance_bw_all.append(total_dist(rall))
#calculate the total E_energy
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
total_E_all.append(potl_E_sum)# all the total E_energy of the succeed coord. set are saved here
#------the coord. set finish generating------
#for all the successfully generated coord. sets, find the lowest one
total_E_all = np.array(total_E_all)
datasets = np.array(datasets)
lowest_index = np.argmin(total_E_all)
# calculate the lowest energy
rall,qall = allatomposns(datasets[lowest_index][0],datasets[lowest_index][1],datasets[lowest_index][2],\
datasets[lowest_index][3],datasets[lowest_index][4],datasets[lowest_index][5])
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
print('potl_E_sum:',potl_E_sum, 'J')
#-------plot------
#plot all the possible coord.
plt.figure(figsize=(8,5.5))
plt.title('total E against total distance between all pairs',fontsize=14)
plt.plot(ditance_bw_all, total_E_all, marker='.',linestyle = '')
plt.xlabel('total distance(nm)')
plt.ylabel('total E (J)')
plt.show();
#-------plot------
#plot the lowest E atoms posi.(interactive)
@interact
def make_plot_2(elevpara=(-27,90,3),azimpara=(-117,0,3)):
f1,ax1 = mol_plot(moltypes,datasets[lowest_index][1],datasets[lowest_index][2],\
datasets[lowest_index][3],datasets[lowest_index][4],datasets[lowest_index][5])
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
###Output
potl_E_sum: -5.328069086616345e-18 J
###Markdown
Water Hexamer Prime isomer
###Code
rall = [[0.08974, -0.1285111, 0.1375674], [0.093366, -0.16202490000000003, 0.046129100000000006], [0.15384240000000002, -0.05632700000000001, 0.13259810000000002], [-0.1559218, -0.0241778, 0.14234739999999999], [-0.20641020000000002, -0.05019660000000001, 0.2197464], [-0.06776560000000001, -0.0678646, 0.1525237], [0.2090789, 0.09844520000000001, -0.0045693], [0.1258568, 0.1498421, -0.0018646000000000001], [0.2788216, 0.16420980000000002, -0.0093886], [-0.0447032, 0.202126, 0.0012114], [-0.0903998, 0.1539985, 0.0716909], [-0.09110810000000001, 0.1700472, -0.0771056], [-0.1776681, -0.0211411, -0.1351176], [-0.2560698, -0.05161790000000001, -0.18140610000000001], [-0.1949801, -0.0387242, -0.040953], [0.09244000000000001, -0.1339405, -0.14323950000000002], [0.0044626, -0.0992399, -0.16352670000000002], [0.14665809999999999, -0.0545087, -0.13404100000000002]]
qall = [-0.8043375515414669, 0.40216877577073346, 0.40216877577073346, -0.8043375515414669, 0.40216877577073346, 0.40216877577073346, -0.8043375515414669, 0.40216877577073346, 0.40216877577073346, -0.8043375515414669, 0.40216877577073346, 0.40216877577073346, -0.8043375515414669, 0.40216877577073346, 0.40216877577073346, -0.8043375515414669, 0.40216877577073346, 0.40216877577073346]
rall = np.array(rall)*1E-9
print('total E:', potl_energy_sum(rall,qall))
###Output
total E: -5.2819869395320895e-09
###Markdown
Crown Isomer End of notebook.
###Code
#Debugging Part 2/3
xa = [H,H+lV,H,H+lV,H,H+lV]
ya = [c-ra,c-ra/2,c+ra/2,c+ra,c+ra/2,c-ra/2]
za = [c,c+rb,c+rb,c,c-rb,c-rb]
thetar = [0,0,0,0,0,0]
phir = [np.pi/2,np.pi/2,np.pi/2,np.pi/2,np.pi/2,np.pi/2]
moltypes =[1,1,1,1,1,1]
@interact
def make_plot_2(elevpara=(-27,90,1),azimpara=(-117,0,1)):
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=elevpara, azim=azimpara) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show();
#define molecule anchor positions and orientations here
#below values are for just two molecules; to add more molecules to the list, add new elements to each array
#this example defines three CO2 molecules, positioned at (0,0,0), (0.2,0.2,0.2), (0.4,0.4,0.4) in the units used and with no rotation
xa=np.random.rand(6) #x coordinates of molecule anchors
ya=np.random.rand(6) #y coordinates of molecule anchors
za=np.random.rand(6) #z coordinates of molecule anchors
thetar=np.random.rand(6)*np.pi*2 #no theta rotation for any of the molecules
phir=np.random.rand(6)*np.pi*2 #no phi rotation for any of the molecules
moltypes=[1,1,1,1,1,1] #code to indicate what type of molecule each one is. type=0 for CO2, 1 for water
#%matplotlib notebook
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
#add labels
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=90., azim=90) #adjust 'camera angle' with this command if desired - angles are in degrees
f1.show()
#Example of calling function allatomposns() to get the coordinates and charges of all atoms in a list
rall,qall=allatomposns(moltypes,xa,ya,za,phir,thetar)
# Use the function provided above to get the position coordinates of all 18 atoms and the corresponding charge
r_randMolecules = rall
q_randMolecules = qall
print(potl_energy_sum(rall,qall))
xa=np.array([0.3]) #x coordinates of molecule anchors
ya=np.array([0.3]) #y coordinates of molecule anchors
za=np.array([0.3]) #z coordinates of molecule anchors
thetar=[0] #no theta rotation for any of the molecules
phir=[0] #no phi rotation for any of the molecules
moltypes=[1] #code to indicate what type of molecule each one is. type=0 for CO2, 1 for water
f1,ax1=mol_plot(moltypes,xa,ya,za,phir,thetar)
#add labels
ax1.set_xlabel('x (nm)')
ax1.set_ylabel('y (nm)')
ax1.set_zlabel('z (nm)')
ax1.set_title('molecule positions')
ax1.set_aspect('auto')
ax1.view_init(elev=0, azim=75) #adjust 'camera angle' with this command if desired - angles are in degrees
rall,qall=allatomposns(moltypes,xa,ya,za,phir,thetar)
rall = np.array(rall)*1e-9
potl_E_sum = potl_energy_sum(rall,qall)
print('potl_E_sum =',potl_E_sum, 'J')
###Output
potl_E_sum = -1.3123192169132227e-18 J
|
installments_payments.ipynb | ###Markdown
Install payments About dataIt is payment history for previous loans at Home Credit. There is one row for every made payment and one row for every missed payment. Feature explanationsSK_ID_PREV : ID of previous credit in Home credit related to loan in our sample. (One loan in our sample can have 0,1,2 or more previous loans in Home Credit)SK_ID_CURR: ID of loan in our sampleNUM_INSTALMENT_VERSION: Version of installment calendar (0 is for credit card) of previous credit. Change of installment version from month to month signifies that some parameter of payment calendar has changedNUM_INSTALMENT_NUMBER: On which installment we observe paymentDAYS_INSTALMENT: When the installment of previous credit was supposed to be paid (relative to application date of current loan)DAYS_ENTRY_PAYMENT: When was the installments of previous credit paid actually (relative to application date of current loan)AMT_INSTALMENT: What was the prescribed installment amount of previous credit on this installmentAMT_PAYMENT: What the client actually paid on previous credit on this installment
###Code
# Last amended: 24th October, 2020
# Myfolder: C:\Users\Administrator\OneDrive\Documents\home_credit_default_risk
# Objective:
# Solving Kaggle problem: Home Credit Default Risk
# Processing installment_payments dataset
#
# Data Source: https://www.kaggle.com/c/home-credit-default-risk/data
# Ref: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
# 1.0 Libraries
# (Some of these may not be needed here.)
%reset -f
import numpy as np
import pandas as pd
import gc
# 1.1 Reduce read data size
# There is a file reducing.py
# in this folder. A class
# in it is used to reduce
# dataframe size
# (Code modified by me to
# exclude 'category' dtype)
import reducing
# 1.2 Misc
import warnings
import os
warnings.simplefilter(action='ignore', category=FutureWarning)
# 1.3
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
# 1.4 Display multiple commands outputs from a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# 2.0 Onehot encoding (OHE) function. Uses pd.get_dummies()
# i) To transform 'object' columns to dummies.
# ii) Treat NaN as one of the categories
# iii) Returns transformed-data and new-columns created
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df,
columns= categorical_columns,
dummy_na= nan_as_category # Treat NaNs as category
)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# 3.0 Prepare to read data
pathToData = "C:\\Users\\Administrator\\OneDrive\\Documents\\home_credit_default_risk"
os.chdir(pathToData)
# 2.2 Some constants
num_rows=None # Implies read all rows
nan_as_category = True # While transforming
# 'object' columns to dummies
# 3.0 Read previous application data first
ins = pd.read_csv(
'installments_payments.csv.zip',
nrows = num_rows
)
# 3.0.1 Reduce memory usage by appropriately
# changing data-types per feature:
ins = reducing.Reducer().reduce(ins)
# 3.1
ins.shape # (13605401, 8)
ins.head()
# 3.2 No object type column
ins.dtypes.value_counts()
# 3.3 OHE any object column
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# 3.3.1 This dataset does not have any object feature
cat_cols
# 3.4
ins.shape # 13605401, 8)
ins.head()
# 4.0 Percentage and difference paid in each installment (amount paid and installment value)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
# 4.1 Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
# 4.2 How to perform aggregations?
# For numeric columns
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
# 4.2.1 For categorical columns
for cat in cat_cols:
aggregations[cat] = ['mean']
# 4.2.2
aggregations
# 4.3 Perform aggregation now
grouped = ins.groupby('SK_ID_CURR')
ins_agg= grouped.agg(aggregations)
# 4.4
ins_agg.shape
ins_agg.head()
# 4.5 Rename columns
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# 4.6
ins_agg.shape
ins_agg.head()
# 4.7 Create one more column. Per client how many installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
# 4.8
ins_agg.shape
ins_agg.head()
# 5.0 Save the results for subsequent use:
ins_agg.to_csv("processed_ins_agg.csv.zip", compression = "zip")
##############
###Output
_____no_output_____ |
Assignments/Assignment_2/Q1/q1_Arch3_MNIST.ipynb | ###Markdown
Decreasing filter size to 5x5
###Code
import numpy as np
import keras
from keras.models import Sequential
from keras.datasets import mnist
from matplotlib import pyplot as plt
from keras.layers import Dense,Flatten
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization
from keras.utils import np_utils
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, classification_report
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
self.loss = []
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
self.loss.append(logs.get('loss'))
X_val, y_val = self.validation_data[0], self.validation_data[1]
y_predict = np.asarray(model.predict(X_val))
y_val = np.argmax(y_val, axis=1)
y_predict = np.argmax(y_predict, axis=1)
self.val_recalls.append(recall_score(y_val, y_predict, average=None))
self.val_precisions.append(precision_score(y_val, y_predict, average=None))
self.val_f1s.append(f1_score(y_val,y_predict, average=None))
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# print(X_train.shape)
# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0],28, 28,1).astype('float32')
X_test = X_test.reshape(X_test.shape[0],28, 28,1).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# # one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape)
num_classes = y_test.shape[1]
print(num_classes)
input_shape=(28,28,1)
epochs=10
batch_size = 512
history = AccuracyHistory()
def create_model(filters,filt1_size,conv_stride,pool_size,pool_stride,opt,loss):
model=Sequential()
model.add(Conv2D(filters, kernel_size=(filt1_size, filt1_size), strides=(conv_stride, conv_stride),activation='relu',input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(pool_size, pool_size), strides=(pool_stride,pool_stride), padding='valid'))
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer=opt,loss=loss,metrics=['accuracy'])
return model
model = create_model(32,5,1,2,2,'adam','categorical_crossentropy')
print(model.summary())
def fit_model(epochs,batch_size):
model.fit(X_train, y_train,batch_size=batch_size,epochs=epochs,validation_split=0.05,callbacks=[history])
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_pred = model.predict_classes(X_test)
cnf_mat = confusion_matrix(np.argmax(y_test,axis=1), y_pred)
return cnf_mat,score,y_pred
epochs=10
batch_size = 512
cnf_mat,score,y_pred = fit_model(epochs,batch_size)
from keras.models import load_model
model.save('dec_filter_size_MNIST.h5')
fscore=f1_score(np.argmax(y_test,axis=1), y_pred,average=None)
recall=recall_score(np.argmax(y_test,axis=1), y_pred,average=None)
prec=precision_score(np.argmax(y_test,axis=1), y_pred,average=None)
def plot(r1,r2,data,Info):
plt.plot(range(r1,r2),data)
plt.xlabel('Epochs')
plt.ylabel(Info)
plt.show()
plot(1,epochs+1,history.acc,'Accuracy')
plot(1,epochs+1,history.loss,'Loss')
plt.plot(recall,label='Recall')
plt.plot(prec,label='Precision')
plt.xlabel('Class')
plt.ylabel('F-score vs Recall vs Precision')
plt.plot(fscore,label='F-score')
plt.legend()
avg_fscore=np.mean(fscore)
print(avg_fscore)
avg_precision=np.mean(prec)
print(avg_precision)
avg_recall=np.mean(recall)
print(avg_recall)
cnf_mat = confusion_matrix(np.argmax(y_test,axis=1), y_pred)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
conf = cnf_mat
fig, ax = plt.subplots(figsize=(30,30))
im = ax.imshow(conf,alpha=0.5)
# plt.show()
# We want to show all ticks...
ax.set_xticks(np.arange(cnf_mat.shape[0]))
ax.set_yticks(np.arange(cnf_mat.shape[1]))
# ... and label them with the respective list entries
ax.set_xticklabels(np.arange(0,96))
ax.set_yticklabels(np.arange(0,96))
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(cnf_mat.shape[0]):
for j in range(cnf_mat.shape[1]):
text = ax.text(j, i, conf[i, j],
ha="center", va="center",color="black",fontsize=10)
ax.set_title("Confusion matrix",fontsize=20)
fig.tight_layout()
# fig.savefig('plot1_cnf.png')
plt.show()
del model
###Output
_____no_output_____ |
Mozilla_speech_alignment.ipynb | ###Markdown
Reader for Mozilla Common Voices Datasethttps://commonvoice.mozilla.org/en/datasets
###Code
import os
import IPython.display as ipd
import tensorflow as tf
from matplotlib import pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
from data_readers.mozilla_speech_reader import AudioTarReader # noqa
from models.alignment_model import PraticantoForcedAligner # noqa
import models # noqa
using_colab = False
if using_colab:
%pip install pandas -q
%pip install tqdm -q
%pip install ipywidgets -q
# watch this for the correct version 0.21.0 for tf 2.6, 0.24.0 for tf 2.8
%pip install tensorflow-io==0.24.0 -q
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
file = 'data/cv-corpus-8.0-2022-01-19-pt.tar.gz'
atr = AudioTarReader(file)
os.makedirs('data', exist_ok=True)
data_file = 'data/validated_not_traintest.tfrecords'
if not os.path.isfile(data_file):
atr.write_tfrecords_file(data_file)
data_file = 'data/train.tfrecords'
if not os.path.isfile(data_file):
atr.write_tfrecords_file(data_file, split='train')
dataset = tf.data.TFRecordDataset(
'data/validated_not_traintest.tfrecords'
).map(AudioTarReader.deserialize)
sample = [x for x in dataset.skip(3).take(1)][0]
print(sample[1].numpy().decode('UTF-8'), sample[2:])
ipd.Audio(sample[0][:, 0].numpy(), rate=48000)
###Output
_____no_output_____
###Markdown
Check marked stringsLike á, ó, etc
###Code
v = tf.concat([['[BOS]'], tf.strings.unicode_split(sample[1], 'UTF-8'), ['[EOS]']], axis=0)
print(atr.lookup(v)), print(atr.lookup_inv(atr.lookup(v)))
x = tf.strings.unicode_split(sample[1], 'UTF-8')
atr.lookup(tf.strings.unicode_split(sample[1], 'UTF-8'))
atr.lookup('á'), atr.lookup('í')
###Output
_____no_output_____
###Markdown
Prep for training
###Code
pfa = PraticantoForcedAligner(vocab=atr.tokens, sampling_rate=48000, use_cnn=False)
alignment_model = pfa.build_models()
alignment_model.summary()
def prep_batch_inputs(cur_txt, cur_audio, seq_lengths):
return {
"char_seq": cur_txt,
"waveform": cur_audio,
}, seq_lengths
def prep_inputs(cur_audio, sentence, age, gender):
cur_txt = tf.ensure_shape(sentence, ())
cur_txt = tf.strings.unicode_split(cur_txt, 'UTF-8')
cur_txt = tf.concat([["[BOS]"], cur_txt, ["[EOS]"]], axis=0)
shapes = tf.concat(
[
tf.shape(cur_txt),
1 + (tf.shape(cur_audio[:, 0]) - pfa.frame_length) // pfa.frame_step,
],
axis=0,
)
return cur_txt, cur_audio[:, 0], shapes
batch_size = 8
dataset = tf.data.TFRecordDataset(
["data/validated_not_traintest.tfrecords", "data/train.tfrecords"],
num_parallel_reads=2,
)
dataset = (
dataset
.map(AudioTarReader.deserialize, num_parallel_calls=tf.data.AUTOTUNE)
.map(prep_inputs, num_parallel_calls=tf.data.AUTOTUNE)
.padded_batch(batch_size, padding_values=("[PAD]", 0.0, 0), drop_remainder=True)
.map(prep_batch_inputs, num_parallel_calls=tf.data.AUTOTUNE)
.prefetch(tf.data.AUTOTUNE)
)
###Output
_____no_output_____
###Markdown
Check results
###Code
alignment_model.load_weights('checkpoints/m_54_0.181.chkpt')
m_spec = models.alignment_model.get_spectrogram_model()
m_logmel = models.alignment_model.get_melspec_model()
samples = [x for x in dataset.take(1)]
preds = alignment_model(samples[0][0])
padded_char_len = preds.shape[1]
preds.shape, samples[0][1].shape
idx = 0
unpadded_lens = samples[0][1][idx]
char_len = unpadded_lens[0].numpy()
spec_len = unpadded_lens[1].numpy()
print('Unpadded:', unpadded_lens)
xmax = spec_len
plt.figure(figsize=(15, 6))
# for k in range(0, padded_char_len):
# for k in range(0, 15):
# for k in [0, 1, 2, 3, 4, 5, 6, 7, 8, -2, -1]:
# for k in [0, 1, 2, -2, -1]:
for k in range(0, char_len, 1):
plt.plot(preds[idx, k, 0:spec_len].numpy(), label=str(k))
# plt.plot(preds[idx, k, :].numpy())
plt.ylim(0, 1)
# plt.show()
# plt.legend()
plt.xlim(0, xmax)
audio_data = samples[0][0]['waveform'][idx]
txt_data = tf.strings.join(samples[0][0]['char_seq'][idx]).numpy().decode('UTF-8').replace('[PAD]', '')
logmel = m_logmel(tf.expand_dims(
audio_data, axis=0)
)
print(logmel.shape, txt_data)
# t = tf.cast(tf.range(0, logmel.shape[1]), tf.float32) * 256.0 / tf.cast(sr, tf.float32)
# mels = tf.range(0, logmel.shape[2], delta=1)
plt.figure(figsize=(15, 6))
plt.pcolormesh(
# t.numpy(),
# mels.numpy(),
tf.transpose(logmel[0]).numpy()
)
plt.xlim(0, xmax)
plt.show()
###Output
_____no_output_____ |
docs/guides/repl/Matter - Multi Fabric Commissioning.ipynb | ###Markdown
Multi Fabric - Commissioning and InteractionsThis walks through creating multiple controllers on multiple fabrics, using those controllers to commission a target onto those fabrics and finally, interacting with them using the interaction model. FabricAdmins and ControllersThe `FabricAdmin` class (present in the `chip.FabricAdmin` package) is responsible for adminstering a fabric. It houses the Fabric ID and Index, as well as an RCAC and ICAC that provides the certificate material grounding that fabric.The `FabricAdmin` can be used to vend `ChipDeviceController` objects that represent a controller instance with a specific identity grounded in the admin's fabric. This controller can then be used to commission and interact with devices. Clear Persisted StorageLet's clear out our persisted storage (if one exists) to start from a clean slate.
###Code
import os, subprocess
if os.path.isfile('/tmp/repl-storage.json'):
os.remove('/tmp/repl-storage.json')
# So that the all-clusters-app won't boot with stale prior state.
os.system('rm -rf /tmp/chip_*')
###Output
_____no_output_____
###Markdown
InitializationLet's first begin by setting up by importing some key modules that are needed to make it easier for us to interact with the Matter stack.`ChipReplStartup.py` is run within the global namespace. This results in all of its imports being made available here.> **NOTE**: _This is not needed if you launch the REPL from the command-line._
###Code
import chip.native
import pkgutil
module = pkgutil.get_loader('chip.ChipReplStartup')
%run {module.path}
###Output
_____no_output_____
###Markdown
At startup, the REPL will attempt to find any previously configured fabrics stored in persisted storage. If it can't find any (as is the case here), it will construct a default `FabricAdmin` object on Fabric 1 (Index 1) as well as construct a device controller (`devCtrl`) on that fabric.
###Code
fabricAdmins
devCtrl
###Output
_____no_output_____
###Markdown
Commission onto Fabric 1 Launch ServerLet's launch an instance of the `chip-all-clusters-app`.
###Code
import time, os
import subprocess
os.system('pkill -f chip-all-clusters-app')
time.sleep(1)
# The location of the all-clusters-app in the cloud playground is one level higher - adjust for this by testing for file presence.
if (os.path.isfile('../../../out/debug/chip-all-clusters-app')):
appPath = '../../../out/debug/chip-all-clusters-app'
else:
appPath = '../../../../out/debug/chip-all-clusters-app'
process = subprocess.Popen(appPath, stdout=subprocess.DEVNULL)
time.sleep(1)
###Output
_____no_output_____
###Markdown
Commission TargetCommission the target onto Fabric 1 using the default device controller instance with a NodeId of 1.
###Code
devCtrl.CommissionIP(b'127.0.0.1', 20202021, 2)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.CTL[27921] ERROR Unable to find country code, defaulting to WW
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Read OpCreds ClusterRead out the OpCreds cluster to confirm membership into Fabric 1.
###Code
await devCtrl.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
_____no_output_____
###Markdown
Commission onto Fabric 2 Create new FabricAdmin
###Code
import chip.FabricAdmin as FabricAdmin
fabric2 = FabricAdmin.FabricAdmin(fabricId = 2, fabricIndex = 2)
###Output
New FabricAdmin: FabricId: 2(2)
###Markdown
Here's a brief peek at the JSON data that is in the persisted storage file.
###Code
builtins.chipStack.GetStorageManager().jsonData
devCtrl2 = fabric2.NewController()
###Output
Allocating new controller with FabricId: 2(2), NodeId: 1
###Markdown
Open Commissioning Window
###Code
await devCtrl.SendCommand(2, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(180))
devCtrl2.CommissionIP(b'127.0.0.1', 20202021, 2)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.CTL[27921] ERROR Unable to find country code, defaulting to WW
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Read OpCreds ClusterRead out the OpCreds cluster to confirm membership into Fabric 2.
###Code
await devCtrl2.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
_____no_output_____
###Markdown
Relaunch REPLLet's simulate re-launching the REPL to show-case the capabilities of the persistence storage and its mechanics.
###Code
import chip.native
import pkgutil
module = pkgutil.get_loader('chip.ChipReplStartup')
%run {module.path}
###Output
_____no_output_____
###Markdown
The REPL has now loaded the two fabrics that were created in the previous session into the `fabricAdmins` variable. It has also created a default controller on the first fabric in that list (Fabric 1) as `devCtrl`. Establish CASE and Read OpCredsTo prove that we do indeed have two distinct fabrics and controllers on each fabric, let's go ahead and update the label of each fabric. To do so, you'd need to succcessfully establish a CASE session through a controller on the respective fabric, and call the 'UpdateLabel' command.Underneath the covers, each device controller will do operational discovery of the NodeId being read and establish a CASE session before issuing the IM interaction.
###Code
await devCtrl.SendCommand(2, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("Fabric1Label"))
await devCtrl.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Instantiate a controller on fabric 2 and use it to read out the op creds from that fabric.
###Code
devCtrl2 = fabricAdmins[1].NewController()
await devCtrl2.SendCommand(2, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("Fabric2Label"))
await devCtrl2.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
devCtrl2.Shutdown()
###Output
_____no_output_____
###Markdown
Multi Fabric - Commissioning and InteractionsThis walks through creating multiple controllers on multiple fabrics, using those controllers to commission a target onto those fabrics and finally, interacting with them using the interaction model. FabricAdmins and ControllersThe `FabricAdmin` class (present in the `chip.FabricAdmin` package) is responsible for adminstering a fabric. It houses the Fabric ID and Index, as well as an RCAC and ICAC that provides the certificate material grounding that fabric.The `FabricAdmin` can be used to vend `ChipDeviceController` objects that represent a controller instance with a specific identity grounded in the admin's fabric. This controller can then be used to commission and interact with devices. Clear Persisted StorageLet's clear out our persisted storage (if one exists) to start from a clean slate.
###Code
import os, subprocess
if os.path.isfile('/tmp/repl-storage.json'):
os.remove('/tmp/repl-storage.json')
# So that the all-clusters-app won't boot with stale prior state.
os.system('rm -rf /tmp/chip_*')
###Output
_____no_output_____
###Markdown
InitializationLet's first begin by setting up by importing some key modules that are needed to make it easier for us to interact with the Matter stack.`ChipReplStartup.py` is run within the global namespace. This results in all of its imports being made available here.> **NOTE**: _This is not needed if you launch the REPL from the command-line._
###Code
import chip.native
import pkgutil
module = pkgutil.get_loader('chip.ChipReplStartup')
%run {module.path}
###Output
_____no_output_____
###Markdown
At startup, the REPL will attempt to find any previously configured fabrics stored in persisted storage. If it can't find any (as is the case here), it will construct a default `FabricAdmin` object on Fabric 1 (Index 1) as well as construct a device controller (`devCtrl`) on that fabric.
###Code
fabricAdmins
devCtrl
###Output
_____no_output_____
###Markdown
Commission onto Fabric 1 Launch ServerLet's launch an instance of the `chip-all-clusters-app`.
###Code
import time, os
import subprocess
os.system('pkill -f chip-all-clusters-app')
time.sleep(1)
# The location of the all-clusters-app in the cloud playground is one level higher - adjust for this by testing for file presence.
if (os.path.isfile('../../../out/debug/chip-all-clusters-app')):
appPath = '../../../out/debug/chip-all-clusters-app'
else:
appPath = '../../../../out/debug/chip-all-clusters-app'
process = subprocess.Popen(appPath, stdout=subprocess.DEVNULL)
time.sleep(1)
###Output
_____no_output_____
###Markdown
Commission TargetCommission the target onto Fabric 1 using the default device controller instance with a NodeId of 1.
###Code
devCtrl.CommissionIP(b'127.0.0.1', 20202021, 2)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.CTL[27921] ERROR Unable to find country code, defaulting to WW
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Read OpCreds ClusterRead out the OpCreds cluster to confirm membership into Fabric 1.
###Code
await devCtrl.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
_____no_output_____
###Markdown
Commission onto Fabric 2 Create new FabricAdmin
###Code
import chip.FabricAdmin as FabricAdmin
fabric2 = FabricAdmin.FabricAdmin(fabricId = 2, fabricIndex = 2)
###Output
New FabricAdmin: FabricId: 2(2)
###Markdown
Here's a brief peek at the JSON data that is in the persisted storage file.
###Code
builtins.chipStack.GetStorageManager().jsonData
devCtrl2 = fabric2.NewController()
###Output
Allocating new controller with FabricId: 2(2), NodeId: 1
###Markdown
Open Commissioning Window
###Code
await devCtrl.SendCommand(2, 0, Clusters.AdministratorCommissioning.Commands.OpenBasicCommissioningWindow(100))
devCtrl2.CommissionIP(b'127.0.0.1', 20202021, 2)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.CTL[27921] ERROR Unable to find country code, defaulting to WW
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Read OpCreds ClusterRead out the OpCreds cluster to confirm membership into Fabric 2.
###Code
await devCtrl2.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
_____no_output_____
###Markdown
Relaunch REPLLet's simulate re-launching the REPL to show-case the capabilities of the persistence storage and its mechanics.
###Code
import chip.native
import pkgutil
module = pkgutil.get_loader('chip.ChipReplStartup')
%run {module.path}
###Output
_____no_output_____
###Markdown
The REPL has now loaded the two fabrics that were created in the previous session into the `fabricAdmins` variable. It has also created a default controller on the first fabric in that list (Fabric 1) as `devCtrl`. Establish CASE and Read OpCredsTo prove that we do indeed have two distinct fabrics and controllers on each fabric, let's go ahead and update the label of each fabric. To do so, you'd need to succcessfully establish a CASE session through a controller on the respective fabric, and call the 'UpdateLabel' command.Underneath the covers, each device controller will do operational discovery of the NodeId being read and establish a CASE session before issuing the IM interaction.
###Code
await devCtrl.SendCommand(2, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("Fabric1Label"))
await devCtrl.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
###Output
2022-01-25 16:59:00 johnsj-macbookpro1.roam.corp.google.com chip.SC[27921] ERROR The device does not support GetClock_RealTimeMS() API. This will eventually result in CASE session setup failures.
###Markdown
Instantiate a controller on fabric 2 and use it to read out the op creds from that fabric.
###Code
devCtrl2 = fabricAdmins[1].NewController()
await devCtrl2.SendCommand(2, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel("Fabric2Label"))
await devCtrl2.ReadAttribute(2, [(Clusters.OperationalCredentials.Attributes.FabricsList)], fabricFiltered=False)
devCtrl2.Shutdown()
###Output
_____no_output_____ |
Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2020-09-19.ipynb | ###Markdown
RadarCOVID-Report Data Extraction
###Code
import datetime
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import dataframe_image as dfi
import matplotlib.ticker
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams['figure.figsize'] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
###Output
_____no_output_____
###Markdown
COVID-19 Cases
###Code
spain_region_country_name = "Spain"
spain_region_country_code = "ES"
confirmed_df = pd.read_csv("https://covid19tracking.narrativa.com/csv/confirmed.csv")
radar_covid_countries = {spain_region_country_name}
# radar_covid_regions = { ... }
confirmed_df = confirmed_df[confirmed_df["Country_EN"].isin(radar_covid_countries)]
# confirmed_df = confirmed_df[confirmed_df["Region"].isin(radar_covid_regions)]
# set(confirmed_df.Region.tolist()) == radar_covid_regions
confirmed_country_columns = list(filter(lambda x: x.startswith("Country_"), confirmed_df.columns))
confirmed_regional_columns = confirmed_country_columns + ["Region"]
confirmed_df.drop(columns=confirmed_regional_columns, inplace=True)
confirmed_df = confirmed_df.sum().to_frame()
confirmed_df.tail()
confirmed_df.reset_index(inplace=True)
confirmed_df.columns = ["sample_date_string", "cumulative_cases"]
confirmed_df.sort_values("sample_date_string", inplace=True)
confirmed_df["new_cases"] = confirmed_df.cumulative_cases.diff()
confirmed_df["rolling_mean_new_cases"] = confirmed_df.new_cases.rolling(7).mean().round()
confirmed_df.tail()
extraction_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_date]
extraction_previous_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_previous_date].copy()
if extraction_date_confirmed_df.empty and \
not extraction_previous_date_confirmed_df.empty:
extraction_previous_date_confirmed_df["sample_date_string"] = extraction_date
extraction_previous_date_confirmed_df["new_cases"] = \
extraction_previous_date_confirmed_df.rolling_mean_new_cases
extraction_previous_date_confirmed_df["cumulative_cases"] = \
extraction_previous_date_confirmed_df.new_cases + \
extraction_previous_date_confirmed_df.cumulative_cases
confirmed_df = confirmed_df.append(extraction_previous_date_confirmed_df)
confirmed_df.tail()
confirmed_df[["new_cases", "rolling_mean_new_cases"]].plot()
###Output
_____no_output_____
###Markdown
Extract API TEKs
###Code
from Modules.RadarCOVID import radar_covid
exposure_keys_multi_region_df = \
radar_covid.download_last_radar_covid_exposure_keys(days=14)
exposure_keys_multi_region_df = exposure_keys_multi_region_df[[
"sample_date_string", "source_url", "region", "key_data"]]
active_regions = \
exposure_keys_multi_region_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_region_summary_df = exposure_keys_multi_region_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_region_summary_df.rename(columns={"key_data": "tek_count"}, inplace=True)
multi_region_summary_df.head()
exposure_keys_multi_region_excluding_spain_df = \
exposure_keys_multi_region_df[exposure_keys_multi_region_df.region != spain_region_country_code]
active_regions_excluding_spain = \
exposure_keys_multi_region_excluding_spain_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions_excluding_spain
exposure_keys_df = exposure_keys_multi_region_df[
exposure_keys_multi_region_df.region == spain_region_country_code]
exposure_keys_df.head()
exposure_keys_summary_df = \
exposure_keys_df.groupby(["sample_date_string", "region"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "tek_count"}, inplace=True)
exposure_keys_summary_df.head()
###Output
_____no_output_____
###Markdown
Dump API TEKs
###Code
tek_list_df = exposure_keys_df[["sample_date_string", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
"sample_date").tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
"Data/TEKs/Current/RadarCOVID-TEKs.json",
lines=True, orient="records")
tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json(
"Data/TEKs/Daily/RadarCOVID-TEKs-" + extraction_date + ".json",
lines=True, orient="records")
tek_list_df.to_json(
"Data/TEKs/Hourly/RadarCOVID-TEKs-" + extraction_date_with_hour + ".json",
lines=True, orient="records")
tek_list_df.head()
###Output
_____no_output_____
###Markdown
Load TEK Dumps
###Code
import glob
def load_extracted_teks(mode, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame()
paths = list(reversed(sorted(glob.glob(f"Data/TEKs/{mode}/RadarCOVID-TEKs-*.json"))))
if limit:
paths = paths[:limit]
for path in paths:
logging.info(f"Loading TEKs from '{path}'...")
iteration_extracted_teks_df = pd.read_json(path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
return extracted_teks_df
###Output
_____no_output_____
###Markdown
Daily New TEKs
###Code
daily_extracted_teks_df = load_extracted_teks(mode="Daily", limit=14)
daily_extracted_teks_df.head()
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "new_tek_count",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.head()
new_tek_devices_df = daily_extracted_teks_df.copy()
new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(new_tek_devices_df.sample_date) + datetime.timedelta(1)
new_tek_devices_df["extraction_date"] = pd.to_datetime(new_tek_devices_df.extraction_date)
new_tek_devices_df = new_tek_devices_df[
new_tek_devices_df.new_sample_extraction_date == new_tek_devices_df.extraction_date]
new_tek_devices_df.head()
new_tek_devices_df.set_index("extraction_date", inplace=True)
new_tek_devices_df = new_tek_devices_df.tek_list.apply(lambda x: len(set(x))).to_frame()
new_tek_devices_df.reset_index(inplace=True)
new_tek_devices_df.rename(columns={
"extraction_date": "sample_date_string",
"tek_list": "new_tek_devices"}, inplace=True)
new_tek_devices_df["sample_date_string"] = new_tek_devices_df.sample_date_string.dt.strftime("%Y-%m-%d")
new_tek_devices_df.head()
###Output
_____no_output_____
###Markdown
Hourly New TEKs
###Code
hourly_extracted_teks_df = load_extracted_teks(mode="Hourly", limit=24)
hourly_extracted_teks_df.head()
hourly_tek_list_df = hourly_extracted_teks_df.groupby("extraction_date_with_hour").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
hourly_tek_list_df = hourly_tek_list_df.set_index("extraction_date_with_hour").sort_index(ascending=True)
hourly_new_tek_df = hourly_tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
hourly_new_tek_df.rename(columns={
"tek_list": "new_tek_count"}, inplace=True)
hourly_new_tek_df.tail()
hourly_new_tek_devices_df = hourly_extracted_teks_df.copy()
hourly_new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(hourly_new_tek_devices_df.sample_date) + datetime.timedelta(1)
hourly_new_tek_devices_df["extraction_date"] = pd.to_datetime(hourly_new_tek_devices_df.extraction_date)
hourly_new_tek_devices_df = hourly_new_tek_devices_df[
hourly_new_tek_devices_df.new_sample_extraction_date == hourly_new_tek_devices_df.extraction_date]
hourly_new_tek_devices_df.set_index("extraction_date_with_hour", inplace=True)
hourly_new_tek_devices_df_ = pd.DataFrame()
for i, chunk_df in hourly_new_tek_devices_df.groupby("extraction_date"):
chunk_df = chunk_df.copy()
chunk_df.sort_index(inplace=True)
chunk_tek_count_df = chunk_df.tek_list.apply(lambda x: len(set(x)))
chunk_df = chunk_tek_count_df.diff().fillna(chunk_tek_count_df).to_frame()
hourly_new_tek_devices_df_ = hourly_new_tek_devices_df_.append(chunk_df)
hourly_new_tek_devices_df = hourly_new_tek_devices_df_
hourly_new_tek_devices_df.reset_index(inplace=True)
hourly_new_tek_devices_df.rename(columns={
"tek_list": "new_tek_devices"}, inplace=True)
hourly_new_tek_devices_df.tail()
hourly_summary_df = hourly_new_tek_df.merge(
hourly_new_tek_devices_df, on=["extraction_date_with_hour"], how="outer")
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df.tail()
###Output
_____no_output_____
###Markdown
Data Merge
###Code
result_summary_df = exposure_keys_summary_df.merge(new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(new_tek_devices_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(confirmed_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["tek_count_per_new_case"] = \
result_summary_df.tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_case"] = \
result_summary_df.new_tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_devices_per_new_case"] = \
result_summary_df.new_tek_devices / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_tek_device"] = \
result_summary_df.new_tek_count / result_summary_df.new_tek_devices
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df.set_index("sample_date", inplace=True)
result_summary_df = result_summary_df.sort_index(ascending=False)
###Output
_____no_output_____
###Markdown
Report Results Summary Table
###Code
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[[
"region",
"rolling_mean_new_cases",
"tek_count",
"new_tek_count",
"new_tek_devices",
"tek_count_per_new_case",
"new_tek_count_per_new_case",
"new_tek_devices_per_new_case",
"new_tek_count_per_new_tek_device"]]
result_summary_df
###Output
_____no_output_____
###Markdown
Summary Plots
###Code
summary_ax_list = result_summary_df[[
"rolling_mean_new_cases",
"tek_count",
"new_tek_count",
"new_tek_devices",
"new_tek_count_per_new_tek_device",
"new_tek_devices_per_new_case"
]].sort_index(ascending=True).plot.bar(
title=f"Summary",
rot=45, subplots=True, figsize=(15, 22), legend=False)
ax_ = summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
ax_.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
_ = ax_.set_xticklabels(result_summary_df.index.strftime("%Y-%m-%d").tolist())
###Output
_____no_output_____
###Markdown
Hourly Summary Plots
###Code
hourly_summary_ax_list = hourly_summary_df.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())
###Output
_____no_output_____
###Markdown
Multi-Region Summary Table
###Code
multi_region_summary_df
###Output
_____no_output_____
###Markdown
Publish Results
###Code
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
media_path = get_temporary_image_path()
dfi.export(df, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(df=result_summary_df)
hourly_summary_plots_image_path = save_temporary_plot_image(ax=hourly_summary_ax_list)
multi_region_summary_table_image_path = save_temporary_dataframe_image(df=multi_region_summary_df)
###Output
_____no_output_____
###Markdown
Save Results
###Code
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(report_resources_path_prefix + "Summary-Table.html")
multi_region_summary_df.to_csv(report_resources_path_prefix + "Multi-Region-Summary-Table.csv")
_ = shutil.copyfile(summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(summary_table_image_path, report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(multi_region_summary_table_image_path, report_resources_path_prefix + "Multi-Region-Summary-Table.png")
report_daily_url_pattern = \
"https://github.com/pvieito/RadarCOVID-Report/blob/master/Notebooks/" \
"RadarCOVID-Report/{report_type}/RadarCOVID-Report-{report_date}.ipynb"
report_daily_url = report_daily_url_pattern.format(
report_type="Daily", report_date=extraction_date)
report_hourly_url = report_daily_url_pattern.format(
report_type="Hourly", report_date=extraction_date_with_hour)
###Output
_____no_output_____
###Markdown
Publish on README
###Code
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
summary_table_html = result_summary_df.to_html()
multi_region_summary_table_html = multi_region_summary_df.to_html()
readme_contents = readme_contents.format(
summary_table_html=summary_table_html,
multi_region_summary_table_html=multi_region_summary_table_html,
report_url_with_hour=report_hourly_url,
extraction_date_with_hour=extraction_date_with_hour)
with open("README.md", "w") as f:
f.write(readme_contents)
###Output
_____no_output_____
###Markdown
Publish on Twitter
###Code
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule":
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
hourly_summary_plots_media = api.media_upload(hourly_summary_plots_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
hourly_summary_plots_media.media_id,
]
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
new_teks = extraction_date_result_summary_df.new_tek_count.sum().astype(int)
new_teks_last_hour = extraction_date_result_hourly_summary_df.new_tek_count.sum().astype(int)
new_devices = extraction_date_result_summary_df.new_tek_devices.sum().astype(int)
new_devices_last_hour = extraction_date_result_hourly_summary_df.new_tek_devices.sum().astype(int)
new_tek_count_per_new_tek_device = \
extraction_date_result_summary_df.new_tek_count_per_new_tek_device.sum()
new_tek_devices_per_new_case = \
extraction_date_result_summary_df.new_tek_devices_per_new_case.sum()
status = textwrap.dedent(f"""
Report Update – {extraction_date_with_hour}
#ExposureNotification #RadarCOVID
Shared Diagnoses Day Summary:
- New TEKs: {new_teks} ({new_teks_last_hour:+d} last hour)
- New Devices: {new_devices} ({new_devices_last_hour:+d} last hour, {new_tek_count_per_new_tek_device:.2} TEKs/device)
- Usage Ratio: {new_tek_devices_per_new_case:.2%} devices/case
Report Link: {report_hourly_url}
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
###Output
_____no_output_____ |
Note-5 DQN与HS300指数择时/D3QN_Vector/emulator_v0/行情终端.ipynb | ###Markdown
行情终端
###Code
import os
import h5py
import numpy as np
import pandas as pd
# DataSet for talib factors image
# Time Uinverse Minutes Factors Days
dataset = h5py.File("dataset_factors.h5")
dataset = dataset["talib_factors"]
# 交易日历
tradeDays = pd.read_hdf("tradeDays.h5").iloc[23:]
tradeDays.reset_index(drop=True, inplace=True)
# 股票池
dataset_universe = pd.read_hdf("universe_SH50.h5")
class Terminal(object):
def __init__(self):
self.factors = dataset
self.tradeDays = tradeDays
self.universe = dataset_universe
def step(self, step):
day = self.tradeDays[step]
return self.factors[step], self.universe.loc[day].tolist()
def reset(self):
return self.factors[0], self.universe.iloc[0].tolist()
###Output
_____no_output_____ |
Tensorflow/HelloWorld_IrisClassification.ipynb | ###Markdown
Get Data
###Code
import pandas as pd
import numpy as np
import requests
import re
import seaborn
import matplotlib.pyplot as plt
import tensorflow as tf
#Download the dataset
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
r = requests.get(url, allow_redirects=True)
filename = "raw.csv"
open(filename, 'wb').write(r.content)
#load the dataset into memory
dataset = pd.read_csv('raw.csv', header=None, names=['sepal_length','sepal_width','petal_length','petal_width','species'])
dataset.head()
#Plot the dataset
seaborn.pairplot(dataset, hue="species", size=2, diag_kind="kde")
plt.show()
###Output
_____no_output_____
###Markdown
Convert to Classes
###Code
from sklearn.preprocessing import LabelBinarizer
species_lb = LabelBinarizer()
Y = species_lb.fit_transform(dataset.species.values)
###Output
_____no_output_____
###Markdown
Obtain Features
###Code
from sklearn.preprocessing import normalize
FEATURES = dataset.columns[0:4]
X_data = dataset[FEATURES].as_matrix()
X_data = normalize(X_data)
###Output
_____no_output_____
###Markdown
Split Test Train
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data, Y, test_size=0.3, random_state=1)
X_train.shape
###Output
_____no_output_____
###Markdown
Train Model
###Code
import tensorflow as tf
# Parameters
learning_rate = 0.01
training_epochs = 100
# Neural Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 128 # 1st layer number of neurons
n_input = X_train.shape[1] # input shape (105, 4)
n_classes = y_train.shape[1] # classes to predict
# tf Graph input
X = tf.placeholder("float", shape=[None, n_input])
y = tf.placeholder("float", shape=[None, n_classes])
# Dictionary of Weights and Biases
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Model Forward Propagation step
def forward_propagation(x):
# Hidden layer1
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output fully connected layer
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Model Outputs
yhat = forward_propagation(X)
ypredict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
from datetime import datetime
startTime = datetime.now()
with tf.Session() as sess:
sess.run(init)
#writer.add_graph(sess.graph)
#EPOCHS
for epoch in range(training_epochs):
#Stochasting Gradient Descent
for i in range(len(X_train)):
summary = sess.run(train_op, feed_dict={X: X_train[i: i + 1], y: y_train[i: i + 1]})
train_accuracy = np.mean(np.argmax(y_train, axis=1) == sess.run(ypredict, feed_dict={X: X_train, y: y_train}))
test_accuracy = np.mean(np.argmax(y_test, axis=1) == sess.run(ypredict, feed_dict={X: X_test, y: y_test}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%" % (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
#print("Epoch = %d, train accuracy = %.2f%%" % (epoch + 1, 100. * train_accuracy))
sess.close()
print("Time taken:", datetime.now() - startTime)
###Output
Epoch = 1, train accuracy = 69.52%, test accuracy = 60.00%
Epoch = 2, train accuracy = 87.62%, test accuracy = 91.11%
Epoch = 3, train accuracy = 90.48%, test accuracy = 93.33%
Epoch = 4, train accuracy = 96.19%, test accuracy = 95.56%
Epoch = 5, train accuracy = 94.29%, test accuracy = 95.56%
Epoch = 6, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 7, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 8, train accuracy = 92.38%, test accuracy = 95.56%
Epoch = 9, train accuracy = 89.52%, test accuracy = 93.33%
Epoch = 10, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 11, train accuracy = 92.38%, test accuracy = 95.56%
Epoch = 12, train accuracy = 85.71%, test accuracy = 75.56%
Epoch = 13, train accuracy = 86.67%, test accuracy = 93.33%
Epoch = 14, train accuracy = 94.29%, test accuracy = 97.78%
Epoch = 15, train accuracy = 95.24%, test accuracy = 97.78%
Epoch = 16, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 17, train accuracy = 92.38%, test accuracy = 95.56%
Epoch = 18, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 19, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 20, train accuracy = 93.33%, test accuracy = 95.56%
Epoch = 21, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 22, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 23, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 24, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 25, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 26, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 27, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 28, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 29, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 30, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 31, train accuracy = 93.33%, test accuracy = 95.56%
Epoch = 32, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 33, train accuracy = 92.38%, test accuracy = 95.56%
Epoch = 34, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 35, train accuracy = 97.14%, test accuracy = 100.00%
Epoch = 36, train accuracy = 92.38%, test accuracy = 95.56%
Epoch = 37, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 38, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 39, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 40, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 41, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 42, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 43, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 44, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 45, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 46, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 47, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 48, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 49, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 50, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 51, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 52, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 53, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 54, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 55, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 56, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 57, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 58, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 59, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 60, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 61, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 62, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 63, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 64, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 65, train accuracy = 98.10%, test accuracy = 100.00%
Epoch = 66, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 67, train accuracy = 97.14%, test accuracy = 100.00%
Epoch = 68, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 69, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 70, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 71, train accuracy = 97.14%, test accuracy = 100.00%
Epoch = 72, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 73, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 74, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 75, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 76, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 77, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 78, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 79, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 80, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 81, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 82, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 83, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 84, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 85, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 86, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 87, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 88, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 89, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 90, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 91, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 92, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 93, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 94, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 95, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 96, train accuracy = 97.14%, test accuracy = 97.78%
Epoch = 97, train accuracy = 92.38%, test accuracy = 93.33%
Epoch = 98, train accuracy = 96.19%, test accuracy = 97.78%
Epoch = 99, train accuracy = 91.43%, test accuracy = 93.33%
Epoch = 100, train accuracy = 98.10%, test accuracy = 97.78%
Time taken: 0:00:03.693000
|
001-Jupyter/001-Tutorials/003-IPython-in-Depth/examples/IPython Kernel/Terminal Usage.ipynb | ###Markdown
A few things that work best/only at the IPython terminal or Qt console clients Running code with `%run`
###Code
%%writefile script.py
x = 10
y = 20
z = x+y
print('z is: %s' % z)
%run script
x
###Output
_____no_output_____
###Markdown
Event loop and GUI integration The `%gui` magic enables the integration of GUI event loops with the interactive execution loop, allowing you to run GUI code without blocking IPython.Consider for example the execution of Qt-based code. Once we enable the Qt gui support:
###Code
%gui qt
###Output
_____no_output_____
###Markdown
We can define a simple Qt application class (simplified version from [this Qt tutorial](http://zetcode.com/tutorials/pyqt4/firstprograms)):
###Code
import sys
from PyQt4 import QtGui, QtCore
class SimpleWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 200, 80)
self.setWindowTitle('Hello World')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(10, 10, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'),
self, QtCore.SLOT('close()'))
###Output
_____no_output_____
###Markdown
And now we can instantiate it:
###Code
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([])
sw = SimpleWindow()
sw.show()
from IPython.lib.guisupport import start_event_loop_qt4
start_event_loop_qt4(app)
###Output
_____no_output_____
###Markdown
But IPython still remains responsive:
###Code
10+2
###Output
_____no_output_____
###Markdown
The `%gui` magic can be similarly used to control Wx, Tk, glut and pyglet applications, [as can be seen in our examples](https://github.com/ipython/ipython/tree/master/examples/lib). Embedding IPython in a terminal application
###Code
%%writefile simple-embed.py
# This shows how to use the new top-level embed function. It is a simpler
# API that manages the creation of the embedded shell.
from IPython import embed
a = 10
b = 20
embed(header='First time', banner1='')
c = 30
d = 40
embed(header='The second time')
###Output
_____no_output_____
###Markdown
The example in kernel-embedding shows how to embed a full kernel into an application and how to connect to this kernel from an external process. Logging terminal sessions and transitioning to a notebook The `%logstart` magic lets you log a terminal session with various degrees of control, and the `%notebook` one will convert an interactive console session into a notebook with all input cells already created for you (but no output). Cleanup
###Code
!rm -f script.py
!rm -f simple-embed.py
###Output
_____no_output_____ |
01 - 03 - Convolutional kernels and pooling.ipynb | ###Markdown
UtilityResize and crop image to be square
###Code
def open_and_resize(file, resize=(500,500)):
if file.startswith('http'):
response = requests.get(file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(file).convert('RGB')
w, h = image.size
left = int((w - h) / 2 if w > h else 0)
upper = int((h - w) / 2 if h > w else 0)
right = int(w - ((w - h) / 2) if w > h else w)
lower = int(h - ((h - w) / 2) if h > w else h)
image = image.crop((left, upper, right, lower))
image.thumbnail(resize)
return image
###Output
_____no_output_____
###Markdown
ConvolutionImplements filtering on a single image with selected pad and stride
###Code
def conv2d(X, W, pad=1, stride=1):
# filter\kernel size
f, f, _ = W.shape
n_C = 1
# new output volume
n_H = int(np.floor(X.shape[0] - f + 2 * pad) / (stride * 1.)) + 1
n_W = int(np.floor(X.shape[1] - f + 2 * pad) / (stride * 1.)) + 1
Z = np.zeros((n_H, n_W, n_C))
# padding
x = np.pad(X, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=(0, 0))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
Z[h, w, c] = np.sum(W[:,:,c] * x[vert_start:vert_end,horiz_start:horiz_end,:])
return Z
###Output
_____no_output_____
###Markdown
Max PoolingImplements max pooling with the given pool size and stride.
###Code
def max_pooling2d(X, pool_size=2, stride=2):
# new output volume
n_H = int(np.floor(X.shape[0] - pool_size) / (stride * 1.)) + 1
n_W = int(np.floor(X.shape[1] - pool_size) / (stride * 1.)) + 1
n_C = X.shape[2]
Z = np.zeros((n_H, n_W, n_C))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + pool_size
horiz_start = w * stride
horiz_end = horiz_start + pool_size
Z[h, w, c] = np.amax(X[vert_start:vert_end,horiz_start:horiz_end,c])
return Z
###Output
_____no_output_____
###Markdown
Filter\Kernel
###Code
w = np.zeros((3, 3, 3))
#t = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) / 3
t = np.array([[0, 0, 0], [0, 3., 0], [0, 0, 0]]) / 3
w[:,:,0] = t
w[:,:,1] = t
w[:,:,2] = t
###Output
_____no_output_____
###Markdown
Example Filter ApplicationRun filter on selected image
###Code
image = np.array(open_and_resize("model.png", resize=(200,200)))
plt.imshow(image)
n.shape
n = conv2d(image, w)
plt.imshow(Image.fromarray(n[:,:,0]).convert("L"), cmap="gray")
x = max_pooling2d(n, pool_size=5)
plt.imshow(Image.fromarray(x[:,:,0]).convert("L"), cmap='gray')
###Output
_____no_output_____
###Markdown
UtilityResize and crop image to be square
###Code
def open_and_resize(file, resize=(500,500)):
if file.startswith('http'):
response = requests.get(file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(file).convert('RGB')
w, h = image.size
left = int((w - h) / 2 if w > h else 0)
upper = int((h - w) / 2 if h > w else 0)
right = int(w - ((w - h) / 2) if w > h else w)
lower = int(h - ((h - w) / 2) if h > w else h)
image = image.crop((left, upper, right, lower))
image.thumbnail(resize)
return image
###Output
_____no_output_____
###Markdown
ConvolutionImplements filtering on a single image with selected pad and stride
###Code
def conv2d(X, W, pad=1, stride=1):
# filter\kernel size
f, f, _ = W.shape
n_C = 1
# new output volume
n_H = int(np.floor(X.shape[0] - f + 2 * pad) / (stride * 1.)) + 1
n_W = int(np.floor(X.shape[1] - f + 2 * pad) / (stride * 1.)) + 1
Z = np.zeros((n_H, n_W, n_C))
# padding
x = np.pad(X, ((pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=(0, 0))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
Z[h, w, c] = np.sum(W[:,:,c] * x[vert_start:vert_end,horiz_start:horiz_end,:])
return Z
###Output
_____no_output_____
###Markdown
Max PoolingImplements max pooling with the given pool size and stride.
###Code
def max_pooling2d(X, pool_size=2, stride=2):
# new output volume
n_H = int(np.floor(X.shape[0] - pool_size) / (stride * 1.)) + 1
n_W = int(np.floor(X.shape[1] - pool_size) / (stride * 1.)) + 1
n_C = X.shape[2]
Z = np.zeros((n_H, n_W, n_C))
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h * stride
vert_end = vert_start + pool_size
horiz_start = w * stride
horiz_end = horiz_start + pool_size
Z[h, w, c] = np.amax(X[vert_start:vert_end,horiz_start:horiz_end,c])
return Z
###Output
_____no_output_____
###Markdown
Filter\Kernel
###Code
w = np.zeros((3, 3, 3))
#t = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) / 3
t = np.array([[0, 0, 0], [0, 3., 0], [0, 0, 0]]) / 3
w[:,:,0] = t
w[:,:,1] = t
w[:,:,2] = t
###Output
_____no_output_____
###Markdown
Example Filter ApplicationRun filter on selected image
###Code
image = np.array(open_and_resize("model.png", resize=(200,200)))
plt.imshow(image)
n.shape
n = conv2d(image, w)
plt.imshow(Image.fromarray(n[:,:,0]).convert("L"), cmap="gray")
x = max_pooling2d(n, pool_size=5)
plt.imshow(Image.fromarray(x[:,:,0]).convert("L"), cmap='gray')
###Output
_____no_output_____ |
assignment/Assignment_2.ipynb | ###Markdown
Assignment 2 Numpy and pandasThis assignment will contain 3 questions with details as below. The due date is October 5 (Friday), 2018 23:59PM. Each late day will result in 20% loss of total points. Question 1 (30 points) Numpy is fast!Suppose we need to compute the cumulative sum of $\sum_{i=0}^n \alpha^i$ for given $\alpha$ and $n$. For example, when $\alpha=0.5$ and $n=10$, the cumulative sum of $\sum_{i=0}^{10} 0.5^i$ returns `[1.0, 1.5, 1.75, 1.875, 1.9375, 1.96875, 1.984375, 1.9921875, 1.99609375, 1.998046875, 1.9990234375]`As a courtesy, I implement the following function `cum_sum` that can be used to generate a list of cumulative sum when iterating through a range generator.
###Code
def cum_sum(alpha, n):
current = 1.0
sum = current
for i in range(n):
current = current * alpha
sum = sum + current
return sum
cumsum = []
for i in range(11):
cumsum.append(cum_sum(0.5, i))
print(cumsum)
###Output
_____no_output_____
###Markdown
We can calculate how much time does it spend to run this code using `time` module as below:
###Code
import time
begin = time.time()
n_samples = 10000
cumsum = []
for i in range(n_samples):
cumsum.append(cum_sum(0.5, i))
print(cumsum)
end = time.time()
time0 = end-begin
print("Time took to run: {} seconds.".format(time0))
###Output
_____no_output_____
###Markdown
It takes about 3.6 seconds on my machine to run the code with 10,000 samples. Note that this time may vary depending on the memory and CPU of your machine. **Question 1.1** (15 points) Now implement a list comprehension for the same purpose and estimate how much time does it take to generate a list of cumulative sum for 10,000 samplesHint: you can use the method accumulate in the module itertools. Check the documentation of itertools at [here](https://docs.python.org/3/library/itertools.htmlitertools.accumulate)
###Code
# Question 1
begin = time.time()
n_samples = 10000
# write your code here
end = time.time()
time1 = end-begin
print("Time took to run: {} seconds.".format(time1))
time0/time1
###Output
_____no_output_____
###Markdown
**Question 1.2** (15 points) Now implement using numpy for the same purpose and estimate how much time does it take to generate a list of cumulative sum for 10,000 samples (in order to receive full score, your program must be at least 1500 times faster than the for loop)You may receive 5 bonus points if your program is at least 5000 times faster than the for loop!
###Code
begin = time.time()
n_samples = 10000
alpha = 0.5
# write your code here
print(cumsum)
end = time.time()
time2 = end-begin
print("Time took to run: {} seconds.".format(time2))
###Output
_____no_output_____
###Markdown
Question 2 (30 points) Monte Carlo Monte Carlo is a city in Monacco where the famous Monte Carlo casino is located.In light of this, Monte Carlo methods (or Monte Carlo experiments) are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. Their essential idea is using randomness to solve problems that might be deterministic in principle. They are often used in physical and mathematical problems and are most useful when it is difficult or impossible to use other approaches. Monte Carlo methods are mainly used in three problem classes: optimization, numerical integration, and generating draws from a probability distribution.**Estimate the Pi**In order to estimate the $\pi$, the idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. Imagine a circle inside the same domain with same diameter and inscribed into the square. We can generate a large number of uniformly distributed random points and plot them on the graph. These points can be in any position within the square i.e. between (0,0) and (1,1). We keep track of the total number of points, and the number of points that are inside the circle. If we divide the number of points within the circle, $N_{inner}$ by the total number of points, $N_{total}$, we should get a value that is an approximation of the ratio of the areas we calculated above, $\pi/4$.Write a function `approximate_pi` with argument `number_simulations` to approximate the Pi value using Monte Carlo simulations. You may consider to use `numpy.random` to make random draws. Give a rough estimate about how many random draws you may need to achieve accuray of 99.999% (by comparing with numpy.pi).
###Code
# Question 2
###Output
_____no_output_____ |
transfer_learning_stomata.ipynb | ###Markdown
Author: Hiranya Jayakody. April 2020.Code developed for Smart Robotics Viticulture Group, UNSW, Sydney.Neural Network based on Matterport implementation of Mask-RCNN at https://github.com/matterport/Mask_RCNN PART 1: Install Mask-RCNN repo from Matterport
###Code
!git clone https://github.com/matterport/Mask_RCNN.git
!pip install -r 'Mask_RCNN/requirements.txt'
!cd Mask_RCNN ; python setup.py install
!pip show mask-rcnn
!pip install tensorflow==1.5.1
!pip install keras==2.1.5
###Output
_____no_output_____
###Markdown
PART 2: Set-up Mask-RCNN for training
###Code
import os
import cv2
import glob
import sys
import json
import datetime
import numpy as np
import skimage.draw
import imutils
import imgaug
import statistics as st
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from mrcnn.config import Config
from mrcnn import visualize
from mrcnn import model as modellib, utils
from matplotlib import pyplot as plt
#mount necessary folders
from google.colab import drive
drive.mount('/content/drive')
#define all directories
CWD = 'drive/My Drive/Colab Notebooks/'
STOMATA_WEIGHTS_PATH = os.path.join(CWD,'2020_mask_rcnn_stomata_51.h5')
WEIGHT_FILE_NAME = 'stomata'
CLASS_NAME = 'stomata'
DEFAULT_LOGS_DIR = os.path.join(CWD,'logs/')
DATASET_DIR = os.path.join(CWD,'images/')
TRAINING_IMG_DIR = os.path.join(DATASET_DIR,'train/')
# Find mean pixel value for training
DATA_PATH = os.path.join(TRAINING_IMG_DIR,'*jpg')
training_files = glob.glob(DATA_PATH)
avg_b = []
avg_g = []
avg_r = []
print(DATA_PATH)
for img in training_files:
image = cv2.imread(img)
b, g, r = np.average(image, axis = (0,1))
avg_b.append(b)
avg_g.append(g)
avg_r.append(r)
mean_b = np.average(avg_b[:])
mean_g = np.average(avg_g[:])
mean_r = np.average(avg_r[:])
print([mean_b,mean_g,mean_r])
#create custom config class for training
class CustomConfig(Config):
NAME = CLASS_NAME #provide a suitable name
IMAGES_PER_GPU = 2 #set to one for smaller GPUs
NUM_CLASSES = 1+1 #background+number of classes
STEPS_PER_EPOCH = 100 #number of training steps per epoch
RPN_ANCHOR_SCALES =(12,24, 48, 96, 192)
DETECTION_MAX_INSTANCES = 300
DETECTION_MIN_CONFIDENCE = 0.6
LOSS_WEIGHTS = {'mrcnn_bbox_loss': 1.0, 'rpn_class_loss': 1.0, 'mrcnn_mask_loss': 1.0, 'mrcnn_class_loss': 1.0, 'rpn_bbox_loss': 1.0}
IMAGE_MAX_DIM = 1024
IMAGE_MIN_DIM = 800
RPN_NMS_THRESHOLD = 0.9
RPN_TRAIN_ANCHORS_PER_IMAGE = 1024
MEAN_PIXEL = np.array([mean_r,mean_g,mean_b]) #matterport takes the input as RGB
#add augmentation for input data
augmentation = imgaug.augmenters.Sometimes(4/6,imgaug.augmenters.OneOf(
[
imgaug.augmenters.Affine(rotate=(-30, 30)),
imgaug.augmenters.Affine(rotate=(-45, 45)),
imgaug.augmenters.Affine(rotate=(-90, 90)),
]))
#create custom dataset class
class CustomDataset(utils.Dataset):
def load_customdata(self, dataset_dir, subset):
#subset can be either training or validation
#We add the classes here.
self.add_class(CLASS_NAME,1,CLASS_NAME) #add_class(self,source,class_id,class_name)
assert subset in ['train','val']
dataset_dir = os.path.join(dataset_dir,subset)
#next step is to load the annotations
annotations = json.load(open(os.path.join(dataset_dir,'via_region_data.json')))
annotations = list(annotations.values())
#skip unannotated images
annotations = [a for a in annotations if a['regions']]
for a in annotations:
#get the x,y coordinates of points of the polygon
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() requires the image size
image_path = os.path.join(dataset_dir,a['filename'])
image = skimage.io.imread(image_path)
height,width = image.shape[:2]
self.add_image(CLASS_NAME, image_id = a['filename'], path=image_path, width=width, height=height, polygons=polygons) #add_image(self, source, image_id, path, **kwargs)
#override function for load_mask
def load_mask(self, image_id):
#this function generates instance masks for an image
#if not a custom dataset image, delegate to parent class
image_info = self.image_info[image_id]
if image_info['source'] != CLASS_NAME:
return super(self._class__,self).load_mask(image_id)
#convert polygons to bitmap mask of shape
info = self.image_info[image_id]
mask = np.zeros([info['height'], info['width'],len(info['polygons'])], dtype= np.uint8)
for i,p in enumerate(info['polygons']):
rr,cc = skimage.draw.polygon(p['all_points_y'],p['all_points_x'])
mask[rr,cc,i] = 1
# return mask and class IDs. for 1 class we return 1s (CAN WE MODIFY THIS?)
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
#overrids function for image_reference
def image_reference(self, image_id):
#returns the path of the image
info = self.image_info[image_id]
if info['source'] == CLASS_NAME:
return info['path']
else:
super(self.__class__,self).image_reference(image_id)
#set-up training function
def train(model):
#training dataset
dataset_train = CustomDataset()
dataset_train.load_customdata(DATASET_DIR, 'train')
dataset_train.prepare()
#validation dataset
dataset_val = CustomDataset()
dataset_val.load_customdata(DATASET_DIR, 'val')
dataset_val.prepare()
print('training network heads')
#for transfer learning often training the heads layers should be enough. You can modify the number of epochs here.
model.train(dataset_train, dataset_val, learning_rate = config.LEARNING_RATE, epochs = 40, layers='heads', augmentation = augmentation)
#otherwise layers = 'all' or 'heads'
###Output
_____no_output_____
###Markdown
PART3: Execute training
###Code
#initialize config and model for training
config = CustomConfig()
config.display()
#create model and load default weights from stomata model
model = modellib.MaskRCNN(mode='training',config=config, model_dir= DEFAULT_LOGS_DIR)
print('loading weights', STOMATA_WEIGHTS_PATH)
model.load_weights(STOMATA_WEIGHTS_PATH, by_name=True)
train(model)
###Output
_____no_output_____ |
recommender/agent.ipynb | ###Markdown
Copyright 2021 The Google Research Authors.Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.
###Code
"""Agent models to generate recommendations."""
import abc
import numpy as np
import scipy
import six
import tensorflow as tf
from recs_ecosystem_creator_rl.recommender import data_utils
@six.add_metaclass(abc.ABCMeta)
class AbstractAgent:
"""Abstract class to generate recommendations."""
def __init__(self, slate_size):
self.slate_size = slate_size
@abc.abstractmethod
def step(self, user_dict, creator_dict, docs):
"""Generates recommendations for each user given observable features of users and candidate documents.
Args:
user_dict: A dictionary of user observed information including user_obs =
A dictionary of key=user_id, value=a list of user observations at all
time steps. user_clicked_docs = A dictionary of key=user_id, value=a
list of user consumed documents (doc, reward, index in the candidate
set). user_terminates = A dictionary of key=user_id, value=boolean
denoting whether this user has terminated or not at the end of
simulation.
creator_dict: A dictionary of creator observed information including
creator_obs = A dict describing all creator observation history, with
key=creator_id, value=a list of creator's all past observations.
creator_recommended_docs = A dict describing all creator recommendation
history, with key=creator_id, value=a list of recommended doc objects.
creator_clicked_docs = A dict describing all creator user-clicked
document history, with key=creator_id, value=a list of user-clicked docs
(document object, user reward). creator_actions = A dictionary of
key=creator_id, value=a list of creator actions(one of
'create'/'stay'/'leave') at current time step. creator_terminates = A
dict to show whether creator terminates or not at current time step,
with key=creator_id, value=True if creator terminates otherwise False.
docs: An ordered dictionary of current document candidate set with
key=doc_id, value=document object.
"""
class RandomAgent(AbstractAgent):
"""Random agent class."""
def __init__(self, slate_size=2):
self.name = 'RandomAgent'
super(RandomAgent, self).__init__(slate_size)
def step(self, user_dict, docs):
return generate_random_slate(self.slate_size, user_dict, docs)
def generate_random_slate(slate_size, user_dict, docs):
"""Generate random slate."""
viable_user_ids = [
u_id for u_id, u_tmnt in user_dict['user_terminates'].items()
if not u_tmnt
]
num_doc = len(docs)
slates = {
u_id: np.random.choice(num_doc, size=slate_size)
for u_id in viable_user_ids
}
probs = {u_id: np.ones(num_doc) / num_doc for u_id in viable_user_ids}
return slates, probs, None
class PolicyGradientAgent(AbstractAgent):
"""PolicyGradient agent."""
def __init__(self,
slate_size=2,
user_embedding_size=10,
document_embedding_size=10,
creator_embedding_size=1,
num_candidates=10,
hidden_sizes=(32, 16),
weight_size=10,
lr=1e-3,
user_model=None,
creator_model=None,
entropy_coeff=0.01,
regularization_coeff=None,
model_path=None,
seed=None,
loss_denom_decay=-1.0,
social_reward_coeff=0.0):
if seed:
tf.random.set_seed(seed)
super(PolicyGradientAgent, self).__init__(slate_size)
self.name = 'EcoAgent'
self.entropy_coeff = entropy_coeff
self.social_reward_coeff = social_reward_coeff
self.user_model = user_model
self.creator_model = creator_model
# Moving average user_utlities and social_rewards denom.
self.sum_label_weights_var = tf.Variable(
0.0, name='sum_label_weights', dtype=tf.float32, trainable=False)
self.loss_denom_decay = loss_denom_decay
self.num_updates = 0
# For environment step preprocessing candidates.
self.creator_hidden_state = None
self.doc_feature = None
# Model.
inputs, outputs = self._construct_graph(user_embedding_size,
document_embedding_size,
creator_embedding_size,
num_candidates, hidden_sizes,
weight_size, regularization_coeff)
self.actor_model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
self.optimizer = tf.keras.optimizers.Adagrad(lr)
# Metrics.
self.train_loss = tf.keras.metrics.Mean('train_loss')
self.train_utility_loss = tf.keras.metrics.Mean('train_utility_loss')
self.train_entropy_loss = tf.keras.metrics.Mean('train_entropy_loss')
self.ckpt = tf.train.Checkpoint(
step=tf.Variable(1),
optimizer=self.optimizer,
value_model=self.actor_model)
self.manager = tf.train.CheckpointManager(
self.ckpt, model_path, max_to_keep=3)
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
print('Restored from {}.'.format(self.manager.latest_checkpoint))
else:
print('Initializing from scratch.')
def _construct_graph(self,
user_embedding_size,
document_embedding_size,
creator_embedding_size,
num_candidates,
hidden_sizes,
weight_size,
regularization_coeff=None):
"""Construct network architecture of policy gradient agent."""
if regularization_coeff is not None:
regularizer_obj = tf.keras.regularizers.l2(regularization_coeff)
else:
regularizer_obj = None
user_input_state = tf.keras.layers.Input(
shape=(user_embedding_size), name='user_embedding_state')
document_input_state = tf.keras.layers.Input(
shape=(num_candidates, document_embedding_size),
name='document_feature')
creator_input_state = tf.keras.layers.Input(
shape=(num_candidates, creator_embedding_size),
name='creator_embedding_state')
# User hidden layer is used to embed user to calculate softmax logits.
user_hidden_layer = user_input_state
for i, hidden_size in enumerate(hidden_sizes, 1):
user_hidden_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='relu',
kernel_regularizer=regularizer_obj,
name=f'user_actor_hidden_layer_{i}')(
user_hidden_layer)
user_embedding_weights = tf.keras.layers.Dense(
units=weight_size, activation=None, kernel_regularizer=regularizer_obj)(
user_hidden_layer)
user_embedding_weights = tf.nn.l2_normalize(
user_embedding_weights, axis=-1, name='user_weights')
# User sensitivity to document bias, range [0, 1].
user_sensitivity = tf.keras.layers.Dense(
units=1,
activation='sigmoid',
kernel_regularizer=regularizer_obj,
name='user_sensitivity')(
user_hidden_layer)
# We can also use fixed effects from both users and creators.
# Document hidden layer to embed candidate documents.
candidate_hidden_layer = tf.keras.layers.concatenate(
[document_input_state, creator_input_state], axis=-1)
for i, hidden_size in enumerate(hidden_sizes, 1):
candidate_hidden_layer = tf.keras.layers.Dense(
units=hidden_size,
activation='relu',
kernel_regularizer=regularizer_obj,
name=f'doc-creator_actor_hidden_layer_{i}')(
candidate_hidden_layer)
candidate_embedding_weights = tf.keras.layers.Dense(
units=weight_size, activation=None, kernel_regularizer=regularizer_obj)(
candidate_hidden_layer)
candidate_embedding_weights = tf.nn.l2_normalize(
candidate_embedding_weights, axis=-1, name='document_weights')
# Bias within [-1, 1].
candidate_embedding_bias = tf.squeeze(
tf.keras.layers.Dense(
units=1, activation='tanh',
kernel_regularizer=regularizer_obj)(candidate_hidden_layer),
axis=-1,
name='document_bias')
# Softmax logits = (1 - user_sensitivity) * < user_weights,
# document_weights > + user_sensitivity * document_bias.
# TODO(rhzhan): Experiment with other architecture. For example, add bias
# terms from both users and creators; only bias from creators; etc.
output_log_logits = (1 - user_sensitivity) * tf.linalg.matvec(
candidate_embedding_weights,
user_embedding_weights) + user_sensitivity * candidate_embedding_bias
inputs = [user_input_state, document_input_state, creator_input_state]
return inputs, output_log_logits
def train_step(self, inputs, labels, user_utilities, social_rewards):
"""Training step given mini-batch data."""
self.ckpt.step.assign_add(1)
self.num_updates += 1
user_utilities = tf.cast(user_utilities, dtype=tf.float32)
social_rewards = tf.cast(social_rewards, dtype=tf.float32)
label_weights = (
1 - self.social_reward_coeff
) * user_utilities + self.social_reward_coeff * social_rewards
with tf.GradientTape() as tape:
logits = self.actor_model(inputs, training=True)
p = tf.nn.softmax(logits=logits)
neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
if self.loss_denom_decay >= 0:
# Batch normalization on label weights.
label_weights_denom = tf.reduce_sum(tf.abs(label_weights))
tf.keras.backend.moving_average_update(
self.sum_label_weights_var,
value=label_weights_denom,
momentum=self.loss_denom_decay) # Update moving average.
label_weights_denom = self.sum_label_weights_var / (
1 - self.loss_denom_decay**self.num_updates) # Debias.
label_weights = label_weights / label_weights_denom
utility_loss = tf.reduce_mean(label_weights * neglogp)
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=p, logits=logits)
entropy_loss = -tf.reduce_mean(entropy)
loss = utility_loss + self.entropy_coeff * entropy_loss
grad = tape.gradient(loss, self.actor_model.trainable_variables)
self.optimizer.apply_gradients(
zip(grad, self.actor_model.trainable_variables))
self.train_loss(loss)
self.train_utility_loss(utility_loss)
self.train_entropy_loss(entropy_loss)
def preprocess_candidates(self, creator_dict, docs):
"""Preprocess candidates into creator features and doc features."""
# We are learning creator hidden state using self.creator_model separately.
(creator_hidden_state_dict, creator_rnn_state_dict,
creator_is_saturation_dict) = data_utils.get_creator_hidden_state(
creator_dict, self.creator_model)
# Concatenate document_topic with corresponding creator_hidden_state.
(self.creator_hidden_state, creator_rnn_state, creator_is_saturation,
creator_id, self.doc_feature) = data_utils.align_document_creator(
creator_hidden_state_dict, creator_rnn_state_dict,
creator_is_saturation_dict, docs)
return (self.creator_hidden_state, creator_rnn_state, creator_is_saturation,
creator_id, self.doc_feature)
def step(self, user_dict, docs):
viable_user_ids = [
user_id for user_id, user_tmnt in user_dict['user_terminates'].items()
if not user_tmnt
]
if not user_dict['user_clicked_docs'][viable_user_ids[0]]:
# When no history, generate random slates.
return generate_random_slate(self.slate_size, user_dict, docs)
policy, preprocessed_users = self.get_policy(user_dict)
user_id, p = list(policy.keys()), list(policy.values())
slates = np.argsort(p, axis=-1)[Ellipsis, -self.slate_size:]
return dict(zip(user_id, slates)), policy, preprocessed_users
def get_policy(self, user_dict):
"""Generate policy of given observations."""
# We are learning user hidden state using self.user_model separately.
user_hidden_state_dict = data_utils.get_user_hidden_state(
user_dict, self.user_model)
user_id, user_hidden_state = zip(*user_hidden_state_dict.items())
user_hidden_state = np.array(list(user_hidden_state))
creator_input = np.tile(self.creator_hidden_state,
(len(user_hidden_state), 1, 1))
doc_input = np.tile(self.doc_feature, (len(user_hidden_state), 1, 1))
model_inputs = [user_hidden_state, doc_input, creator_input]
logits = self.actor_model.predict(model_inputs)
p = scipy.special.softmax(logits, axis=-1)
return dict(zip(user_id, p)), dict(zip(user_id, user_hidden_state))
def save(self):
save_path = self.manager.save()
print('Saved checkpoint for step {}: {}'.format(
int(self.ckpt.step), save_path))
###Output
_____no_output_____ |
src/choropleth_plots.ipynb | ###Markdown
LOAD THE CLEAN CSV
###Code
df = pd.read_csv('../data/cleaned_data.csv',
usecols=['points', 'title', 'description', 'price',
'designation', 'variety', 'winery', 'county',
'county_id', 'state_id', 'value_scaled', 'value', 'state', 'region_1'])
df = df.assign(value_scaled=lambda x: (100 - x.price * 1) * x.points/100)
df.head()
winery_df = df.groupby(['variety']).size().reset_index(name='counts')
winery_df = winery_df.sort_values(by='counts')
print("Greater than 100", len(winery_df.query('counts > 100')))
print("Greater than 500",len(winery_df.query('counts > 500')))
print("Greater than 1000", len(winery_df.query('counts > 1000')))
# for i, group in winery_df.iterrows():
# print(group['counts'])
# if winery_df.query('counts == @i').sum()['counts'] > 0:
# print(i, winery_df.query('counts == @i').sum()['counts'])
winery_df = df.groupby(['winery']).size().reset_index(name='counts')
winery_df = winery_df.sort_values(by='counts')
print("Greater than 10", len(winery_df.query('counts > 10')))
print("Greater than 50",len(winery_df.query('counts > 50')))
print("Greater than 100", len(winery_df.query('counts > 100')))
df.query('county == "Pinellas County"')
state_counts = df.groupby(['state', 'state_id']).size().reset_index(name='counts')
county_counts = df.groupby(['county', 'county_id']).size().reset_index(name='counts')
county_counts
###Output
_____no_output_____
###Markdown
GET THE COUNTS BY STATE AND COUNTY
###Code
# Group and aggregate the data by States
states_grouped = df.groupby(['state', 'state_id'], as_index=False)
wine_states = states_grouped.agg({'points': ['mean'],
'price': ['mean'],
'value': ['mean'],
'description': ['count']})
wine_states.columns = wine_states.columns.droplevel(level=1)
wine_states = wine_states.rename(columns={"state": "State",
"state_id": "State ID",
"description": "Num Reviews",
"points": 'Ave Points',
"price": 'Ave Price',
"value": 'Ave Value'})
# Group and aggregate the data by Counties
counties_grouped = df.groupby(['county', 'county_id'], as_index=False)
wine_counties = counties_grouped.agg({'points': ['mean'],
'price': ['mean'],
'value': ['mean'],
'description': ['count']})
wine_counties.columns = wine_counties.columns.droplevel(level=1)
wine_counties = wine_counties.rename(columns={"county": 'County',
"county_id": 'County ID',
"description": "Num Reviews",
"points": 'Ave Points',
"price": 'Ave Price',
"value": 'Ave Value'})
wine_states
# variety_df = df[['state', 'variety']]
# variety_df
# pd.merge()
###Output
_____no_output_____
###Markdown
DRAW STATE CHOROPLETH
###Code
# ['State','State_ID','Ave Points','Ave Price','Ave Value','Num Reviews'])
counties = alt.topo_feature(data.us_10m.url, 'counties')
states = alt.topo_feature(data.us_10m.url, "states")
colormap = alt.Scale(domain=[0, 100, 1000, 2000, 4000, 8000, 16000, 32000],
range=['#C7DBEA', '#CCCCFF', '#B8AED2', '#3A41C61',
'#9980D4', '#722CB7', '#663399', '#512888'])
foreground = alt.Chart(states).mark_geoshape().encode(
color=alt.Color('Num Reviews:Q',
scale=colormap),
tooltip=['State:O', 'State ID:O',
'Ave Points:Q','Ave Price:Q',
'Ave Value:Q','Num Reviews:Q']
).mark_geoshape(
stroke='black',
strokeWidth=0.5
).transform_lookup(
lookup='id',
from_=alt.LookupData(wine_states,
'State ID',
['State', 'State ID', 'Ave Points', 'Ave Price', 'Ave Value', 'Num Reviews'])
).project(
type='albersUsa'
)
background = alt.Chart(states).mark_geoshape(
fill='gray',
stroke='dimgray'
).project(
'albersUsa'
).properties(
title='Wine Reviews by State'
)
(background + foreground).configure_view(
height=400,
width=700,
strokeWidth=4,
fill=None,
stroke=None,
)
###Output
_____no_output_____
###Markdown
DRAW STATE/COUNTY CHOROPLETH
###Code
counties = alt.topo_feature(data.us_10m.url, 'counties')
state_id = 32#California = 6
state = 'California'
colormap = alt.Scale(domain=[0, 100, 500, 1000, 2000, 4000, 8000],
range=['#C7DBEA', '#CCCCFF', '#B8AED2', '#3A41C61',
'#9980D4', '#4634A7', '#4C2C96'])
c_foreground =(
alt.Chart(counties)
.mark_geoshape(
stroke='black',
strokeWidth=1
).encode(
color=alt.Color('Num Reviews:Q',
scale=colormap),
tooltip=['County:O', 'County ID:O',
'Ave Points:Q','Ave Price:Q',
'Ave Value:Q','Num Reviews:Q']
)
.transform_calculate(state_id = "(datum.id / 1000)|0")
.transform_filter((alt.datum.state_id)==state_id)
.transform_lookup(
lookup='id',
from_=alt.LookupData(wine_counties,
'County ID',
['County', 'County ID', 'Ave Points', 'Ave Price', 'Ave Value', 'Num Reviews']))
)
c_background = alt.Chart(counties).mark_geoshape(
fill='dimgray',
stroke='gray'
).transform_calculate(state_id = "(datum.id / 1000)|0",
).transform_filter((alt.datum.state_id)==state_id,
).properties(
title=f'Wine Reviews by County for {state}'
).project('albersUsa')
(c_background + c_foreground).configure_view(
height=400,
width=200,
strokeWidth=4,
fill=None,
stroke=None,
)
###Output
_____no_output_____
###Markdown
DRAW THE COUNTIES ON THE COUNTRY
###Code
counties = alt.topo_feature(data.us_10m.url, 'counties')
foreground = alt.Chart(counties).mark_geoshape().encode(
color=alt.Color('counts:Q',
scale=alt.Scale(scheme='bluepurple'))
).mark_geoshape(
stroke='black',
strokeWidth=1
).transform_lookup(
lookup='id',
from_=alt.LookupData(county_counts, 'county_id', ['counts'])
).project(
type='albersUsa'
).properties(
width=500,
height=300
)
background = alt.Chart(counties).mark_geoshape(
fill='gray',
stroke='dimgray'
).properties(
title='Number of Observations by County',
width=700,
height=400
).project('albersUsa')
background + foreground
###Output
_____no_output_____
###Markdown
ATTEMPTING TO MAKE THE MAP INTERACTIVE WRANGLE THE DF TO GET PROPER COLUMN NAMES
###Code
wine_df = df.rename(columns={"state": 'State',
"state_id": 'State ID',
"points": 'Points',
"price": 'Price',
"value_scaled": 'Value'})
#######
# NOTES:
# - Add dynamic title and chart elements with Dash
# - Add sorting with Dash (i.e. send in a new dataframe based on Dash etc.)
# - Add in slider bar for filtering with Dash (number of reviews, etc.)
counties = alt.topo_feature(data.us_10m.url, 'counties')
states = alt.topo_feature(data.us_10m.url, "states")
colormap = alt.Scale(domain=[0, 100, 1000, 2000, 4000, 8000, 16000, 32000],
range=['#C7DBEA', '#CCCCFF', '#B8AED2', '#3A41C61',
'#9980D4', '#722CB7', '#663399', '#512888'])
click = alt.selection_single(fields=['State'], empty='all')
# MAP OF ALL STATES
foreground = alt.Chart(states).mark_geoshape().encode(
color=alt.Color('Num Reviews:Q',
scale=colormap,
legend=alt.Legend(orient='left')),
tooltip=[alt.Tooltip('State:O'),
alt.Tooltip('Ave Points:Q', format='.2f'),
alt.Tooltip('Ave Price:Q', format='$.2f'),
alt.Tooltip('Ave Value:Q', format='.2f'),
alt.Tooltip('Num Reviews:Q')]
).mark_geoshape(
stroke='black',
strokeWidth=0.5
).transform_lookup(
lookup='id',
from_=alt.LookupData(wine_states,
'State ID',
['State', 'State ID', 'Ave Points', 'Ave Price', 'Ave Value', 'Num Reviews'])
).project(
type='albersUsa'
).add_selection(
click
)
# BACKGROUND MAP OF ALL STATES
background = alt.Chart(states).mark_geoshape(
fill='gray',
stroke='dimgray'
).project(
'albersUsa'
).properties(
title='Wine Reviews by State'
)
# Dynamic Bar Plot
bar_basic = alt.Chart(wine_df).mark_bar().transform_filter(
click,
).transform_window(
mean_price='mean(Price)',
mean_points='mean(Points)',
mean_value='mean(Value)',
num_reviews='count(Price)',
groupby=["State", "variety"],
sort=[{'field': 'Value'}],
frame=[None, None],
).encode(
x=alt.X('variety:O',
title="Grape Variety",
sort=alt.EncodingSortField(
field='mean_value',
order='descending')),
y=alt.Y('mean_value:Q',
title="Average Value"),
tooltip=[alt.Tooltip('State:O'),
alt.Tooltip('mean_price:Q', format='$.2f'),
alt.Tooltip('mean_points:Q', format='.2f'),
alt.Tooltip('mean_value:Q', format='.2f'),
alt.Tooltip('num_reviews:N')]
).transform_filter(
alt.FieldGTPredicate(field='num_reviews', gt=100)
).properties(
title=f'Ave Price of Grape Variety for State (Min. 100 Reviews)'
)
maps = (background + foreground)
final_plot = (maps | bar_basic).configure_view(
height=400,
width=700,
strokeWidth=4,
fill=None,
stroke=None,
)
final_plot
###Output
_____no_output_____
###Markdown
Basic Bar Plot with Transformations
###Code
# BAR PLOT
bar_basic = alt.Chart(wine_df).transform_filter(
alt.FieldEqualPredicate(field='State', equal='California')
).transform_window(
mean_price='mean(Price)',
mean_points='mean(Points)',
mean_value='mean(Value)',
num_reviews='count(Price)',
groupby=["State", "variety"],
sort=[{'field': 'Price'}],
frame=[None, None],
).transform_filter(
alt.FieldGTPredicate(field='num_reviews', gt=100)
).mark_bar().encode(
x=alt.X('variety:O',
title="Grape Variety",
sort=alt.EncodingSortField(
field='mean_price',
order='descending')),
y=alt.Y('mean_price:Q',
title="Average Price"),
color=alt.condition(
alt.datum['variety'] == wine_df.sort_values(by=['Price'], ascending=False).variety.iloc[0],
alt.value('#512888'),
alt.value('lightgrey')),
tooltip=[alt.Tooltip('State:O'),
alt.Tooltip('mean_price:Q', format='$.2f'),
alt.Tooltip('mean_points:Q', format='.2f'),
alt.Tooltip('mean_value:Q', format='.2f'),
alt.Tooltip('num_reviews:N')]
)
bar_basic
# wine_df.sort_values(by=['Price'], ascending=False)
wine_df.sort_values(['Price','variety'], ascending=False).groupby('variety').head(1).head()
wine_df.sort_values(by=['Price'], ascending=False).variety
# pd.merge(wine_states, wine_df, how='left', on=['State', 'State ID']).query("State == 'California'")
pd.merge(wine_states, wine_df, how='right', on=['State'])
###Output
_____no_output_____ |
examples/Linking and Layout.ipynb | ###Markdown
Linking and Layout> [ Simple example](./Simple%20example.ipynb)>> [ Advanced examples](./More%20examples.ipynb)>> Linking and Layout>> [ Exporting Images](./Exporting%20Images.ipynb)
###Code
from ipysankeywidget import SankeyWidget
###Output
_____no_output_____
###Markdown
> This uses the base [ipywidgets](https://github.com/ipython/ipywidgets) for layout and data, but you can use any widgets!
###Code
from ipywidgets import (
VBox,
HBox,
IntSlider,
)
links = [
{'source': 'start', 'target': 'A', 'value': 10},
{'source': 'A', 'target': 'B', 'value': 10},
{'source': 'C', 'target': 'A', 'value': 10},
{'source': 'A', 'target': 'C', 'value': 10},
]
sankey = SankeyWidget(links=links)
sankey
###Output
_____no_output_____
###Markdown
> A convenience factory function
###Code
def slider(link, i, sankey):
value = IntSlider(description="{source} → {target}".format(**link), min=0, max=10, step=1, value=10)
def _change(change):
sankey.links[i]["value"] = value.value
sankey.send_state()
value.observe(_change)
return value
###Output
_____no_output_____
###Markdown
Build up a slider per link to control the value:
###Code
sliders = [slider(link, i, sankey) for i, link in enumerate(links)]
box = HBox(children=[sankey, VBox(children=sliders)])
box
###Output
_____no_output_____
###Markdown
Linking and Layout> [ Simple example](./Simple example.ipynb)>> [ Advanced examples](./More examples.ipynb)>> Linking and Layout>> [ Exporting Images](./Exporting Images.ipynb)
###Code
from ipysankeywidget import SankeyWidget
###Output
_____no_output_____
###Markdown
> This uses the base [ipywidgets](https://github.com/ipython/ipywidgets) for layout and data, but you can use any widgets!
###Code
from ipywidgets import (
VBox,
HBox,
IntSlider,
)
links = [
{'source': 'start', 'target': 'A', 'value': 10},
{'source': 'A', 'target': 'B', 'value': 10},
{'source': 'C', 'target': 'A', 'value': 10},
{'source': 'A', 'target': 'C', 'value': 10},
]
sankey = SankeyWidget(links=links)
sankey
###Output
_____no_output_____
###Markdown
> A convenience factory function
###Code
def slider(link, i, sankey):
value = IntSlider(description="{source} → {target}".format(**link), min=0, max=10, step=1, value=10)
def _change(change):
sankey.links[i]["value"] = value.value
sankey.send_state()
value.observe(_change)
return value
###Output
_____no_output_____
###Markdown
Build up a slider per link to control the value:
###Code
sliders = [slider(link, i, sankey) for i, link in enumerate(links)]
box = HBox(children=[sankey, VBox(children=sliders)])
box
###Output
_____no_output_____ |
b&w/color_to_gray.ipynb | ###Markdown
Imports
###Code
from numba import cuda
import numpy as np
from PIL import Image
from matplotlib import pyplot
import math
image = Image.open('../images/Tree.jpg')
pyplot.imshow(image)
###Output
_____no_output_____
###Markdown
CPU Implementaion
###Code
def conv_to_gray(data):
gray = np.dot(data[...,:3],[0.2989, 0.5870, 0.1140])
return gray
rgb_image = np.array(image)
gray_image = conv_to_gray(rgb_image)
pyplot.imshow(Image.fromarray(gray_image))
###Output
_____no_output_____
###Markdown
GPU Implementation
###Code
@cuda.jit
def grayscale_kernel(d_rgb_image, d_gray_image):
i,j = cuda.grid(2)
if i < d_rgb_image.shape[0] and j < d_rgb_image.shape[1]:
d_gray_image[i,j] = 0.2989*d_rgb_image[i,j,0] + 0.5870*d_rgb_image[i,j,1] + 0.1140*d_rgb_image[i,j,2]
return
# transferring the 2 arrays to the device memory explicitly
d_rgb_image = cuda.to_device(rgb_image)
d_gray_image = cuda.device_array(rgb_image.shape[0:2])
# Setting dimensions with which to invoke the cuda kernel
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(rgb_image.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(rgb_image.shape[1] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
# Running the kernel
grayscale_kernel[blockspergrid, threadsperblock](d_rgb_image, d_gray_image)
# Copying to device array to host array
gray_image = Image.fromarray(d_gray_image.copy_to_host())
pyplot.imshow(gray_image)
###Output
_____no_output_____ |
study/M_L/210925_ML_Airbnb.ipynb | ###Markdown
주제 : 뉴욕에서 방이 둘 딸린 집을 에어비엔비에 내놓으려 한다. 이 때 적당한 숙박료를 구하시오. (5점)---------- 실습 가이드 1. 데이터를 다운로드하여 Colab에 불러옵니다. 2. 필요한 라이브러리는 모두 코드로 작성되어 있습니다. 3. 코드는 위에서부터 아래로 순서대로 실행합니다. 데이터 소개 - 이번 주제는 New York City Airbnb Open Data를 사용합니다. - 다음 1개의 csv 파일을 사용합니다. AB_NYC_2019.csv - 각 파일의 컬럼은 아래와 같습니다. id: 항목의 ID name: 항목의 이름 (타이틀) host_id: 호스트 ID host_name: 호스트의 이름 neighbourhood_group: 방이 있는 구역 그룹 neighbourhood: 방이 있는 구역 latitude: 방이 위치한 위도 longitude: 방이 위치한 경도 room_type: 방의 종류 price: 가격 (미 달러) minimum_nights: 최소 숙박 일수 number_of_reviews: 리뷰의 개수 last_review: 마지막 리뷰 일자 reviews_per_month: 월별 리뷰 개수 calculated_host_listings_count: 호스트가 올린 방 개수 availability_365: 365일 중 가능한 일수 - 데이터 출처: https://www.kaggle.com/dgomonov/new-york-city-airbnb-open-data 최종 목표 - 스크래핑된 dirty 데이터 클리닝 방법 이해 - 다양한 종류의 데이터 정규화 방법 습득 - 데이터 시각화를 통한 인사이트 습득 방법의 이해 - Scikit-learn 기반의 모델 학습 방법 습득 - 학습된 모델로 부터의 인사이트 획득 방법 습득- 출제자 : 신제용 강사--- Step 0. Regression에 대하여 선형 회귀에 대하여 그 외의 회귀 방법 Step 1. 데이터셋 준비하기
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
문제 1. Colab Notebook에 Kaggle API 세팅하기
###Code
import os
# os.environ을 이용하여 Kaggle API Username, Key 세팅하기
os.environ['KAGGLE_USERNAME'] = "hwankihan"
os.environ['KAGGLE_KEY'] = "bf2898a338529cdfeb184d116e2cd40f"
###Output
_____no_output_____
###Markdown
문제 2. 데이터 다운로드 및 압축 해제하기
###Code
# Linux 명령어로 Kaggle API를 이용하여 데이터셋 다운로드하기 (!kaggle ~)
# Linux 명령어로 압축 해제하기
!rm *.*
!kaggle datasets download -d dgomonov/new-york-city-airbnb-open-data
!unzip '*.zip'
###Output
rm: cannot remove '*.*': No such file or directory
Downloading new-york-city-airbnb-open-data.zip to /content
0% 0.00/2.44M [00:00<?, ?B/s]
100% 2.44M/2.44M [00:00<00:00, 80.7MB/s]
Archive: new-york-city-airbnb-open-data.zip
inflating: AB_NYC_2019.csv
inflating: New_York_City_.png
###Markdown
문제 3. Pandas 라이브러리로 csv파일 읽어들이기
###Code
df = pd.read_csv('AB_NYC_2019.csv')
###Output
_____no_output_____
###Markdown
Step 2. EDA 및 데이터 기초 통계 분석 문제 4. 불필요한 데이터 데이터프레임에서 제거하기
###Code
# DataFrame에서 제공하는 메소드를 이용하여 각 데이터프레임의 구조 분석하기 (head(), info(), describe())
# 데이터프레임에서 불필요한 컬럼 제거하기
df.head()
df.info()
df.isna().sum()
df['room_type'].value_counts()
(df['reviews_per_month'].isna() & df['last_review'].isna()).sum()
df['reviews_per_month'].isna().sum()
(df['number_of_reviews'] == 0).sum()
df['availability_365'].hist()
(df['availability_365'] == 0).sum()
df.drop(['id', 'name', 'host_name', 'latitude', 'longitude'], axis=1, inplace=True)
df.head(3)
###Output
_____no_output_____
###Markdown
문제 5. 수치형 데이터와 Price의 Jointplot 분석하기
###Code
sns.jointplot(x='host_id', y='price', data=df, kind='hex') # 상관성이 안보임, 값이 많이 몰려있음(나중에 데이터 좀 정리하고 다시 확인)
sns.jointplot(x='reviews_per_month', y='price', data=df, kind='hex') # 여기도 마찬가지
###Output
_____no_output_____
###Markdown
문제 6. 수치형 데이터와 Price의 상관성 분석하기
###Code
sns.heatmap(df.corr(), annot=True, cmap='YlOrRd')
# price와 다른값들의 상관성들이 매우 낮음
# 아직 특이값이 제거가 안되어서 그런듯하다
# host_id가 의외로 리뷰랑 관련있음, 짧게 운영한사람이 많은 평가가 있어보임
###Output
_____no_output_____
###Markdown
문제 7. 범주형 데이터와 Price의 Boxplot 계열 및 Histogram 분석하기
###Code
sns.boxplot(x='neighbourhood_group', y='price', data=df) #여기도 특이값을 좀 없애야할듯
sns.boxplot(x='room_type', y='price', data=df)
###Output
_____no_output_____
###Markdown
Step 3. 데이터 클리닝 수행하기 문제 8. 미기입, 오기입 데이터 확인하기
###Code
# 각 컬럼을 분석하여 미기입/오기입된 데이터 확인하기
# Hint) 수치형 데이터는 통계를 이용해서, 범주형 데이터는 unique(), value_counts()등으로 확인
df.columns
df.isna().sum() # 아래 결측치가 완전히 겹치는것을 미리 확인했음
df['neighbourhood_group'].value_counts()
neigh = df['neighbourhood'].value_counts()
plt.plot(range(len(neigh)), neigh) # 한 50개까지만 보자
df['neighbourhood'] = df['neighbourhood'].apply(lambda s: s if str(s) not in neigh[50:] else 'others')
df['neighbourhood'].value_counts()
df['room_type'].value_counts() # 얘는 괜찮음
sns.rugplot(x='price', data=df, height=1) # 특이값이 좀 있음
print(df['price'].quantile(0.95))
print(df['price'].quantile(0.005))
# 값이 0이거나 너무 높은건 무시하자
sns.rugplot(x='minimum_nights', data=df, height=1)
print(df['minimum_nights'].quantile(0.98))
sns.rugplot(x='availability_365', data=df, height=1) # 건들게 없음
print(df['availability_365'].quantile(0.3))
###Output
0.0
###Markdown
문제 9. 아웃라이어를 제거하고 통계 재분석하기
###Code
# quantile(), drop() 등 메소드를 이용하여 outlier 제거하고 통계 재분석하기
p1 = df['price'].quantile(0.95)
p2 = df['price'].quantile(0.005)
print(p1, p2)
df = df[(df['price'] < p1) & (df['price'] > p2)]
df['price'].hist()
mn1 = df['minimum_nights'].quantile(0.98)
print(mn1)
df = df[df['minimum_nights'] < mn1]
df['minimum_nights'].hist()
# 0일로 되어있는건 따로 범주로 만들어서 새로운 column으로 추가하자
df['is_avail_zero'] = df['availability_365'].apply(lambda x: 'Zero' if x==0 else 'Nonzero')
###Output
_____no_output_____
###Markdown
문제 10. 미기입 데이터 처리하기
###Code
# fill(), dropna() 등으로 미기입된 데이터를 처리하기
# 10052개 미기입된애들 새로운 컬럼으로 넣자
df['review_exists'] = df['reviews_per_month'].isna().apply(lambda x: 'No' if x is True else 'Yes')
df.fillna(0,inplace=True)
df.isna().sum()
###Output
_____no_output_____
###Markdown
Step 4. 모델 학습을 위한 데이터 전처리 문제 11. get_dummies를 이용한 범주형 데이터 전처리
###Code
df.columns
X_cat = df[['neighbourhood_group', 'neighbourhood', 'room_type', 'is_avail_zero', 'review_exists']]
X_cat = pd.get_dummies(X_cat)
# linear 안할꺼라 drop_first안함
###Output
_____no_output_____
###Markdown
문제 12. StandardScaler를 이용해 수치형 데이터 표준화하기
###Code
from sklearn.preprocessing import StandardScaler
# StandardScaler를 이용해 수치형 데이터를 표준화하기
scaler = StandardScaler()
X_num = df.drop(['neighbourhood_group', 'neighbourhood', 'room_type', 'price', 'last_review', 'is_avail_zero', 'review_exists'], axis=1)
scaler.fit(X_num)
X_scaled = scaler.transform(X_num)
X_scaled = pd.DataFrame(X_scaled, index=X_num.index, columns=X_num.columns)
X = pd.concat([X_scaled, X_cat], axis=1)
y = df['price']
###Output
_____no_output_____
###Markdown
문제 13. 학습데이터와 테스트데이터 분리하기
###Code
from sklearn.model_selection import train_test_split
# train_test_split() 함수로 학습 데이터와 테스트 데이터 분리하기
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=1)
###Output
_____no_output_____
###Markdown
Step 5. Regression 모델 학습하기 문제 14. XGBoost Regression 모델 학습하기
###Code
from xgboost import XGBRegressor
# XGBRegressor 모델 생성/학습
model_reg = XGBRegressor()
model_reg.fit(X_train,y_train)
###Output
[06:29:58] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
###Markdown
문제 15. 모델 학습 결과 평가하기
###Code
from sklearn.metrics import mean_absolute_error, mean_squared_error
from math import sqrt
# Predict를 수행하고 mean_absolute_error, rmse 결과 출력하기
pred = model_reg.predict(X_test)
print(mean_absolute_error(y_test, pred))
print(sqrt(mean_squared_error(y_test, pred)))
###Output
35.37089085317669
49.393558422539826
###Markdown
Step 6. 모델 학습 결과 심화 분석하기 문제 16. 실제 값과 추측 값의 Scatter plot 시각화하기
###Code
# y_test vs. pred Scatter 플랏으로 시각적으로 분석하기
plt.scatter(x=y_test, y=pred, alpha=0.1)
plt.plot([0,350], [0, 350], 'r-')
###Output
_____no_output_____
###Markdown
문제 17. 에러 값의 히스토그램 확인하기
###Code
# err의 히스토그램으로 에러율 히스토그램 확인하기
err = (pred - y_test) / y_test
sns.histplot(err)
plt.grid()
# err의 히스토그램으로 에러율 히스토그램 확인하기
err = pred - y_test
sns.histplot(err)
plt.grid()
###Output
_____no_output_____ |
Homework 5/Pymaceuticals/Homework 5 - Will Doucet.ipynb | ###Markdown
Tumor Response to Treatment
###Code
# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint
grouped_df = combined_df.groupby(['Drug', 'Timepoint'])
grouped_mean = grouped_df.mean()
size_values = []
size_std_errors = []
fig = plt.figure(figsize=(45,45))
fig.suptitle('Average Tumor Size in mm3 Over Time', x=.5, y=1.02, fontsize=20)
#Loop through grouped mean dataframe by drug name and add tumor size values to list
for name in drug_list:
info = grouped_mean['Tumor Volume (mm3)'].xs(name, level='Drug').tolist()
size_values.append(info)
#Loop through combined_df by drug name and time
for name in drug_list:
size_list = [] #reset list for each drug
for time in timepoints:
#Add tumor size values for current drug and timepoint to list and calculate standard error
se_samples = combined_df['Tumor Volume (mm3)'].loc[(combined_df['Drug'] == name) &
(combined_df['Timepoint'] == time)].tolist()
se = sem(se_samples)
#Add standard error to list
size_list.append(se)
#Adds standard error list for all time points for currently selected drug
size_std_errors.append(size_list)
#Plot subplots
for count in range(1, len(size_values) + 1):
fig.add_subplot(5,2,count)
fig.set_figheight(15)
fig.set_figwidth(15)
plt.errorbar(timepoints, size_values[count - 1], yerr=size_std_errors[count-1], label= drug_list[count-1],
color=colors[count - 1], ecolor='black', elinewidth=1.5)
plt.grid()
plt.legend(loc=2)
plt.xlabel(f'Time Passed in Days')
plt.xticks(time_ticks)
plt.yticks(size_ticks) #standardize y axis for comparison
plt.xlim(0,46)
plt.ylabel('Tumor Size (mm3)')
plt.tight_layout()
plt.subplots_adjust(hspace=.5, wspace=.2)
fig.savefig('Graphs/Average Tumor Size Over Time by Drug')
fig_a = plt.figure()
fig_a.set_figheight(10)
fig_a.set_figwidth(15)
for count in range(1, len(size_values) + 1):
plt.errorbar(timepoints, size_values[count - 1], label= drug_list[count-1],
color=colors[count - 1], marker='x')
plt.grid()
plt.legend()
plt.xlabel('Time Passed in Days', fontsize=14)
plt.xticks(time_ticks)
plt.ylabel('Tumor Size (mm3)', fontsize=14)
plt.title('Tumor Size in mm3 Over Time', fontsize=20, y=1.04)
plt.xlim(0,45)
plt.tight_layout()
fig_a.savefig('Graphs/Tumor Size Over Time Grouped')
meta_values = []
meta_std_errors = []
fig2 = plt.figure()
fig2.suptitle('Average # of Metastatic Sites Over Time', x=.5, y=1.04, fontsize=20)
for name in drug_list:
info = grouped_mean['Metastatic Sites'].xs(name, level='Drug').tolist()
meta_values.append(info)
for name in drug_list:
meta_list = []
for time in timepoints:
se_samples = combined_df['Metastatic Sites'].loc[(combined_df['Drug'] == name) &
(combined_df['Timepoint'] == time)].tolist()
se = sem(se_samples)
meta_list.append(se)
meta_std_errors.append(meta_list)
for count in range(1, len(meta_values) + 1):
fig2.add_subplot(5,2,count)
fig2.set_figheight(15)
fig2.set_figwidth(15)
plt.errorbar(timepoints, meta_values[count - 1], yerr=meta_std_errors[count-1], label= drug_list[count-1],
color=colors[count - 1], ecolor='black', elinewidth=1.5)
plt.grid()
plt.legend(loc=2)
plt.xlabel(f'Time Passed in Days')
plt.ylabel('Average # of Metastatic Sites')
plt.xticks(time_ticks)
plt.yticks(site_ticks)
plt.tight_layout()
plt.subplots_adjust(hspace=.5, wspace=.2)
fig2.savefig('Graphs/Average Metastatic Sites by Drug')
fig2_a = plt.figure()
for count in range(1, len(size_values) + 1):
plt.errorbar(timepoints, meta_values[count - 1], label= drug_list[count-1], color=colors[count - 1], marker='x')
plt.grid()
plt.legend()
plt.xlabel('Time Passed in Days', fontsize=14)
plt.ylabel('Average # of Metastatic Sites', fontsize=14)
plt.xticks(time_ticks)
plt.yticks(site_ticks)
plt.xlim(0,45)
plt.ylim(0, 3.5)
plt.title('Average Number of Metastatic Sites Over Time', fontsize=20, y=1.04)
fig2_a.set_figheight(7)
fig2_a.set_figwidth(15)
plt.tight_layout()
fig2_a.savefig('Graphs/Average Metastatic Sites Grouped')
mice_count_all = []
for name in drug_list:
mice_count = []
for time in timepoints:
mice = len(combined_df['Mouse ID'].loc[(combined_df['Drug'] == name) & (combined_df['Timepoint'] == time)].unique())
mice_count.append(mice)
mice_count_all.append(mice_count)
fig_3 = plt.figure()
fig_3.suptitle('Number of Mice Alive Over Time', x=.5, y=1.04, fontsize=20)
for count in range(1, len(drug_list) + 1):
fig_3.add_subplot(5,2,count)
fig_3.set_figheight(15)
fig_3.set_figwidth(15)
plt.errorbar(timepoints, mice_count_all[count-1], marker='x', label= drug_list[count-1], color= colors[count - 1])
plt.xticks(timepoints)
plt.yticks(mice_ticks)
plt.xlabel('Time Passed in Days')
plt.ylabel('Number of Mice Alive')
plt.ylim(5,27.5)
plt.grid()
plt.legend()
plt.tight_layout()
plt.subplots_adjust(hspace=.5, wspace=.2)
fig_3.savefig('Graphs/Number of Mice Alive Over Time by Drug')
fig3_a = plt.figure()
for x in range(0, len(drug_list)):
plt.errorbar(timepoints, mice_count_all[x], marker='x', label= drug_list[x], color= colors[x])
plt.grid()
plt.legend()
plt.xlabel('Time Passed in Days', fontsize=14)
plt.ylabel('Number of Mice Alive', fontsize=14)
plt.title('Number of Mice Alive Over Time', fontsize=20, y=1.05)
plt.xlim(0,45)
plt.xticks(time_ticks)
plt.yticks(mice_ticks)
fig3_a.set_figheight(7)
fig3_a.set_figwidth(15)
plt.tight_layout()
fig3_a.savefig('Graphs/Number of Mice Alive Grouped')
tumor_change = []
for name in drug_list:
size = []
size = grouped_mean['Tumor Volume (mm3)'].xs(name, level='Drug').tolist()
change = round(((size[-1] / size[0]) - 1) * 100, 2)
tumor_change.append(change)
fig4 = plt.figure()
bar_ticks = np.arange(len(drug_list))
for x in range(0, len(drug_list)):
if tumor_change[x] > 0:
plt.bar(x, tumor_change[x], color='red')
plt.annotate('%.2f%%' % tumor_change[x], (x - .2,tumor_change[x] + 1), fontsize=12, fontweight='bold')
else:
plt.bar(x, tumor_change[x], color='green')
plt.annotate('%.2f%%' % tumor_change[x], (x - .22,tumor_change[x] - 2), fontsize=12, fontweight='bold')
plt.xticks(bar_ticks, drug_list)
fig4.set_figheight(10)
fig4.set_figwidth(15)
plt.hlines(0,-1,len(drug_list))
plt.title('Tumor Change Over 45 Day Treatment', fontsize=20, y=1.04)
plt.ylabel('Percentage Change in Size', fontsize=14)
plt.xlim(-.5,9.5)
plt.ylim(-25,60)
plt.grid()
plt.tight_layout()
fig4.savefig('Graphs/Tumor Change Over Treatment')
#Observations:
#Capomulin and Ramicane were the only drugs to reduce tumor size
#They also had the lowest # of metastatic sites and the most amount of mice alive at the end of the trial
#The rest of the drugs are grouped pretty close around the placebo group in each of the graphs
#which might indicate they have no effect on tumors
###Output
_____no_output_____ |
employee-attrition-prediction.ipynb | ###Markdown
INTRODUCTION- Data contain different variable that are sub classified further:- 1.Demographics of the Employee 2.Tenure Information 3.Historical data regarding performance- Using these data we will going to find out whether employee will stay in the company and also will see another pattern with respect to time period of person stay in the company. There are other aspects also see in the notebook you can check in visualization part.- Using this data HR department will find out easily what type of candidate they need to take for particular job role. LIBRARIES
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes = True)
%matplotlib inline
###Output
_____no_output_____
###Markdown
IMPORT_DATA 2.Import Data
###Code
train = pd.read_csv('/kaggle/input/predicting-employee-attrition/train_data.csv')
train.head()
###Output
_____no_output_____
###Markdown
DATA_PREPROCESSING 3. Data Preprocessing
###Code
train.isnull().sum()
train['Target']=np.where(train['LastWorkingDate'].isnull(),0,1)
train.head()
emp_groupby = train.groupby(['Emp_ID'])['Emp_ID','Age','Gender','City','Education_Level','Salary','Joining Designation','Designation',
'Quarterly Rating','Target'].tail(1)
emp_groupby
emp_groupby.reset_index(inplace = True,drop = True)
emp_groupby
total_busi_groupby = train.groupby('Emp_ID').agg({'Total Business Value':'sum'})
total_busi_groupby
total_busi_groupby.reset_index(drop = True,inplace = True)
final = pd.concat([total_busi_groupby,emp_groupby],axis = 1,join = 'inner')
final
corr = final.corr()
plt.figure(figsize = (20,8))
sns.heatmap(corr,annot = True,cmap = 'rocket')
###Output
_____no_output_____
###Markdown
DATA_VISUALIZATION 4.Data Visualization
###Code
sns.catplot('Gender','Total Business Value',hue = 'Education_Level',data = final,col = 'Education_Level',col_wrap = 2)
sns.catplot('Education_Level','Salary',data = final,hue = 'Gender',col = 'City',col_wrap = 2)
plt.subplots(figsize=(20,10))
plt.subplot(231)
sns.countplot('Gender',data = final)
plt.subplot(232)
sns.countplot('Education_Level',data = final)
plt.subplot(233)
sns.countplot('Joining Designation',data = final)
plt.subplot(234)
sns.countplot('Designation',data = final)
plt.figure(figsize = (20,8))
sns.countplot('City',data = final,palette= 'rocket')
sns.countplot('Target',data = final)
final['Quarterly Rating'].unique()
final ['Gender'] = pd.get_dummies(final['Gender'],drop_first = True)
final
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
final['City'] = label.fit_transform(final['City'])
final['Education_Level'] = label.fit_transform(final['Education_Level'])
final.head()
sns.boxplot('Total Business Value',data = final)
Q1 = final['Total Business Value'].quantile(0.25)
Q3 = final['Total Business Value'].quantile(0.75)
IQR = Q3 - Q1
filter = (final['Total Business Value'] >= Q1 - 1.5 * IQR) & (final['Total Business Value']<= Q3 + 1.5 *IQR)
train1 = final.loc[filter]
print("data loss percentage {}%".format(((len(final) - len(train1))/len(final))*100))
sns.boxplot('Salary',data = final)
Q1 = final['Salary'].quantile(0.25)
Q3 = final['Salary'].quantile(0.75)
IQR = Q3 - Q1
filter = (final['Salary'] >= Q1 - 1.5 * IQR) & (final['Salary']<= Q3 + 1.5 *IQR)
train2 = final.loc[filter]
print("data loss percentage {}%".format(((len(final) - len(train2))/len(final))*100))
train2.shape
## Get the Fraud and the normal dataset
not_stay= (train2['Target']== 0 )
stay= (train2['Target']== 1 )
std_x = train2.loc[:,['Total Business Value','Age','Gender','City','Education_Level','Salary','Joining Designation','Designation','Quarterly Rating']]
std_x.shape
y = train2.iloc[:,-1]
y.head()
from imblearn.under_sampling import NearMiss
# Implementing Undersampling for Handling Imbalanced
nm = NearMiss()
X_res,y_res=nm.fit_resample(std_x,y)
X_res.shape,y_res.shape
###Output
_____no_output_____
###Markdown
DATA_VALIDATION 5. Data Validation
###Code
from sklearn.model_selection import train_test_split
#Split data into Train and test format
x_train,x_test,y_train,y_test = train_test_split(X_res,y_res,test_size = 0.25,random_state =105)
print('Shape of Training Xs:{}'.format(x_train.shape))
print('shape of Test:{}'.format(x_test.shape))
#pip install lazypredict
###Output
_____no_output_____
###Markdown
MODEL_BUILDING 6. Model Building
###Code
from lazypredict.Supervised import LazyClassifier
clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)
models,predictions = clf.fit(x_train, x_test, y_train, y_test)
print(models)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(x_train,y_train)
y_predicted = clf.predict(x_test)
score = clf.score(x_test,y_test)
print(score)
###Output
0.8125
###Markdown
CONFUSION_MATRIX 7. Confusion Matirx
###Code
from sklearn.metrics import confusion_matrix
#Confusion Matrix
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_predicted)
np.set_printoptions(precision=2)
cnf_matrix
from sklearn.metrics import classification_report
print(classification_report(y_test,y_predicted))
test = pd.read_csv('/kaggle/input/predicting-employee-attrition/test_data.csv')
test.shape
test_f = pd.merge(test,final,on = 'Emp_ID')
test_f
###Output
_____no_output_____
###Markdown
TEST_PREDICTION 8. Prediction on Test Data
###Code
test_final = test_f.drop(['Emp_ID','Target'],axis = 1)
test_final.head()
test_predicted = clf.predict(test_final)
test_predicted.shape
submission=pd.read_csv('/kaggle/input/predicting-employee-attrition/sample_submission.csv')
submission
submission['Target']=test_predicted
submission.to_csv('Random_forest.csv',index=False)
###Output
_____no_output_____ |
notebooks/tutorial/Drive_NFA_9_21_17_Class.ipynb | ###Markdown
Chapter 7 fully illustrated using Jove
###Code
# PLAY this Youtube to know how to use the contents of this file + follow some of the Def_DFA.ipynb defns
from IPython.display import YouTubeVideo
YouTubeVideo('xjFtLF95uBc')
import sys
sys.path[0:0] = ['../..','../../3rdparty'] # Put these at the head of the search path
from jove.DotBashers import *
from jove.Def_md2mc import *
from jove.Def_NFA import *
from jove.Def_DFA import *
from jove.Def_RE2NFA import *
from jove.Def_NFA2RE import *
###Output
You may use any of these help commands:
help(ResetStNum)
help(NxtStateStr)
You may use any of these help commands:
help(md2mc)
.. and if you want to dig more, then ..
help(default_line_attr)
help(length_ok_input_items)
help(union_line_attr_list_fld)
help(extend_rsltdict)
help(form_delta)
help(get_machine_components)
You may use any of these help commands:
help(mkp_dfa)
help(mk_dfa)
help(totalize_dfa)
help(addtosigma_delta)
help(step_dfa)
help(run_dfa)
help(accepts_dfa)
help(comp_dfa)
help(union_dfa)
help(intersect_dfa)
help(pruneUnreach)
help(iso_dfa)
help(langeq_dfa)
help(same_status)
help(h_langeq_dfa)
help(fixptDist)
help(min_dfa)
help(pairFR)
help(state_combos)
help(sepFinNonFin)
help(bash_eql_classes)
help(listminus)
help(bash_1)
help(mk_rep_eqc)
help(F_of)
help(rep_of_s)
help(q0_of)
help(Delta_of)
help(mk_state_eqc_name)
You may use any of these help commands:
help(mk_nfa)
help(totalize_nfa)
help(step_nfa)
help(run_nfa)
help(ec_step_nfa)
help(Eclosure)
help(Echelp)
help(accepts_nfa)
help(nfa2dfa)
help(n2d)
help(inSets)
help(rev_dfa)
help(min_dfa_brz)
You may use any of these help commands:
help(re2nfa)
You may use any of these help commands:
help(RE2Str)
help(mk_gnfa)
help(mk_gnfa_from_D)
help(dfa2nfa)
help(del_gnfa_states)
help(gnfa_w_REStr)
help(del_one_gnfa_state)
help(Edges_Exist_Via)
help(choose_state_to_del)
help(form_alt_RE)
help(form_concat_RE)
help(form_kleene_RE)
###Markdown
An NFA is a machine $(Q, \Sigma, \delta, Q_0, F)$ which is somewhat like a DFA except that 1. It can start from a __set__ of starting states $Q_0$ > i.e., the NFA can start from more than one starting state 2. Its transition function $\delta$ maps $Q\times (\Sigma\cup \{\varepsilon\})$ to $2^{Q}$ > i.e., the NFA takes a state $Q$ and a symbol and returns a set of states You can see these aspects being illustrated in the NFA to follow Limitations of DFA
###Code
secondlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> F
''')
dotObj_nfa(secondlast, FuseEdges=True)
thirdlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> F
''')
dotObj_nfa(thirdlast, FuseEdges=True)
fourthlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> F
''')
dotObj_nfa(fourthlast, FuseEdges=True)
fifthlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> S3
S3 : 0 | 1 -> F
''')
dotObj_nfa(fifthlast, FuseEdges=True)
dotObj_dfa(min_dfa(nfa2dfa(secondlast)))
dotObj_dfa(min_dfa(nfa2dfa(thirdlast)))
dotObj_dfa(min_dfa(nfa2dfa(fourthlast)))
dotObj_dfa(min_dfa(nfa2dfa(fifthlast)))
len(min_dfa(nfa2dfa(secondlast))["Q"])
len(min_dfa(nfa2dfa(thirdlast))["Q"])
len(min_dfa(nfa2dfa(fourthlast))["Q"])
len(min_dfa(nfa2dfa(fifthlast))["Q"])
###Output
_____no_output_____
###Markdown
Clear evidence of exponential blowup! Another problem with DFA : No natural way to specify many languages
###Code
# NFA for the language {'a','ba','cd','eb'}
nfa_abcde = md2mc('''
NFA
I : a -> F
I : b -> Sb
Sb : a -> F
I : c -> Sc
Sc : d -> F
I : e -> Se
Se : b -> F
''')
dotObj_nfa(nfa_abcde)
# With DFA we do something else (build DFA for 'a' with alphabet being {a,b,c,d,e} etc...)
# This results in this DFA
dotObj_dfa_w_bh(min_dfa(nfa2dfa(nfa_abcde)), FuseEdges=True)
###Output
_____no_output_____
###Markdown
Chapter-7: Nondeterministic Finite AutomataIn this chapter, we will cover virtually all aspects of NFA, following the style of presentation used in Chapter-3 on DFA. We will sometimes be (re-) writing print (and dot-object generation) routines that look quite similar to those defined for DFA. This is because the routines are short, and we want a self-contained notebook. Besides there are subtle differences between an NFA and a DFA and it's best to make these differences manifest in dedicated routines -- than overload the former routines with extra arguments. **We begin defining NFA by providing its structure. ** We will first set up a series of definitions ending at the mk_nfa function that helps build NFA. Unlike with a DFA, there is no mkp_nfa function, as all NFAs are partial! (We can define a mktot_nfa function to print all moves, including unspecified moves leading to the se({}) state.)We will define step_nfa, run_nfa and accepts_nfa which are key functions that clearly spell out how NFA differ from DFA.We will also be defining functions for displaying NFA with the help of the _dot_ tool. The design will be similar to that with DFA. **------** __We will follow Kozen and endow an NFA with multiple start states __ This will allow the NFA to be more naturally handled. For instance, the reverse of a DFA is an NFA. When we reverse a DFA, all its final states become initial states of the NFA (that models the reversed language). There are 2 ways to handle this:1. Introduce a fake new initial state and jump from it via $\varepsilon$ onto (what were the final state of the DFA).2. Don't introduce the fake new initial state, but rather allow the NFA to start from all of F being really its start state. * Of course, in almost all situations, this is a minor difference * But to enjoy the topic as completely as one can, it is best to be "clean" and follow clean definitions. - I've seen Ed Clarke also use multiple initial states - Hence our code will be for this relaxed setup - Of course if you __REALLY__ wanted to have only one start state, then make it a singleton set of states and work that through your NFA.__So now, following Brzozowski, we have__An NFA is a quintuple $(Q,\Sigma,\delta,Q_0,F)$, where:* $Q$ is a _finite nonempty_ set of states.* $\Sigma$ is a _finite nonempty_ alphabet containing _symbols_.* $\delta$ is a (partial) transition function, containing a set of _transitions_. The transitions take a pair from $Q\times \Sigma$ and return a __subset__ of states in $Q$. All this is succinctly captured by writing $\delta: Q\times \Sigma \rightarrow 2^Q$. Here we use $2^Q$ to denote the powerset of $Q$. * $Q_0\subseteq Q$, is __a set of initial states__. Notice that we change from q0 (or $q_0$) which is what you find books such as Sipser and Linz using.* $F\subseteq Q$ is a _finite_ (and _possibly empty_) set of final (or _accepting_) states. These are shown as double-circled nodes in the graph of a DFA. > There is no other change. I.e. $\delta$ remains the same as before.> It is that when an NFA starts, it can find itself in a set of start states.> Most NFAs start from a __singleton__ Q0, which is then, effectively, an NFA that matches most books say.Some terminology:> We call $Q$,$\Sigma$, $\delta$, $Q_0$, and $F$ the **_traits_** of the NFA.> We will call an NFA **_structurally consistent_** or simply **"consistent"** if its traits pass the aforesaid checks.Here is how the checks will be broken down:* The supplied $\delta$ function will be checked to see if it has allowed domain and range points. - The domain points must be a subset of $Q\times \Sigma$ - The range points must be a subset of $2^Q$ We do no insist that the supplied $\delta$ be total. * $Q_0\subseteq Q$, is _the_ initial state.* $F\subseteq Q$ is a _finite_ (and _possibly empty_) set of final (or _accepting_) states. We will often use the state set({}) to be the equivalent of a black-hole state for an NFA.
###Code
nfa1 = md2mc('''NFA
I : 0 -> A
I : 0 -> F''')
dotObj_nfa(nfa1)
nfa2 = md2mc('''NFA
I : '' -> F
I : 0 -> A
''')
dotObj_nfa(nfa2)
###Output
Generating LALR tables
###Markdown
The $\delta$ function of the NFA
###Code
help(step_nfa)
step_nfa(nfa1, 'I', '')
step_nfa(nfa1, 'I', '0')
step_nfa(nfa2, 'I','')
fig71a = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> F
''')
dotObj_nfa(fig71a, FuseEdges=True)
fig71b = md2mc('''
NFA
I : 0 | 1 -> I
I : '' -> S0
S0 : 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> F
''')
dotObj_nfa(fig71b, FuseEdges=True)
help(Eclosure)
Eclosure(fig71b, {'F'})
###Output
_____no_output_____
###Markdown
Stepping and Running NFANow that we've defined NFA and allied actions such as consistency checking and printing, let's write functions to step and run them.* How the state transition function $\delta$ "works" - captured in step_nfa
###Code
help(step_nfa)
step_nfa(fig71b, "I", '')
step_nfa(fig71b, "S0", '')
step_nfa(fig71b, "I", '0')
help(run_nfa)
run_nfa(fig71b, "I", "0")
run_nfa(fig71b, "I", "0", chatty=True)
step_nfa(fig71b, "I", '1')
run_nfa(fig71a, "I", '0100100', chatty = True)
run_nfa(fig71b, "I", '1')
run_nfa(fig71b, {"I"}, "")
Eclosure(fig71b, {"I"})
###Output
_____no_output_____
###Markdown
The EClosure Function (defined on a set of states)
###Code
run_nfa(fig71b, {"I"}, "0101")
run_nfa(fig71b, {"I"}, "0101", True)
import ipywidgets as wdg
def run_nfa_slider(firstTime, N, s, n):
"""Run NFA N from N["Q0"] (which is a set..) on substring s[0:n]
"""
if firstTime:
print("Eclosure of N's start state is")
print(Eclosure(N, N["Q0"]))
firstTime = False
S = N["Q0"]
if (n > len(s)):
n = len(s)
print("string = ", s[0:n])
run_nfa(N, S, s[0:n], True)
def run_nfa_int(N1, N2):
"""Run interactively from the given NFA .. from {q0}
on input string's substring
as picked by slider.
"""
inp = input("Please provide string: ")
wdg.interact(run_nfa_slider, firstTime=True, N = {'N1': N1, 'N2': N2},
s = inp, n=(0,32) )
run_nfa_int(fig71a, fig71b)
fig74a = md2mc('''
NFA
I : '' -> A, G
A : '' -> B, C
B : 1 -> D
C : 0 -> E
D : '' -> A, G
E : '' -> A, G
G : 1 -> F
''')
dotObj_nfa(fig74a)
run_nfa_int(fig74a, fig71b)
###Output
Eclosure of N's start state is
{'G', 'I', 'A', 'C', 'B'}
string = 0101010
States reached = {'G', 'E', 'A', 'C', 'B'}
States reached = {'G', 'A', 'F', 'C', 'D', 'B'}
States reached = {'G', 'E', 'A', 'C', 'B'}
States reached = {'G', 'A', 'F', 'C', 'D', 'B'}
States reached = {'G', 'E', 'A', 'C', 'B'}
States reached = {'G', 'A', 'F', 'C', 'D', 'B'}
States reached = {'G', 'E', 'A', 'C', 'B'}
###Markdown
DFA to NFA conversion (!)This is a useful helper and helps understand the theory, but not widely used.Its main use is within "NFA2RE". Suppose you want to convert a DFA to an RE? Then use dfa2nfa and then NFA2RE :-)
###Code
def dfa2nfa(D):
"""Given a DFA D, make a language-equivalent NFA.
"""
assert(
is_partially_consistent_dfa(D)
), "DFA given to dfa2nfa is not part. consist."
return { "Q" : D["Q"],
"Sigma" : D["Sigma"],
"Delta" : dict((a,{b}) for (a,b) in D["Delta"].items()),
"Q0" : { D["q0"] },
"F" : D["F"] }
###Output
_____no_output_____
###Markdown
NFA to DFA conversion* Input: An NFA, N* Output: A language-equivalent DFA, D* Method: Subset Construction - Data structure to maintain : * A set called "Unexpanded" (UNEXP for short), which holds SETS of states of the given NFA, N * These serve as the states of the DFA (D) being built - Note that UNEXP is a set of state-sets (set of 'set-of-states' if you will)> * Let INIT (DFA's initial state) = Eclosure of N["Q0"] > * Add INIT to UNEXP> WHILE (UNEXP $\neq \emptyset$) DO> > Choose a state S from UNEXP> > Delete S from UNEXP> > Expand(S) -- Expand(S) will add all the 'c' moves out of S where c $\in \Sigma$> END WHILE ** Expand(S): **> For each symbol $c$ in $\Sigma$> > For each state s ∈ S do -- Recall that S is a set of states> > > Let $NS_c$ = $\delta(s,c)$ -- Find the next __set of states__ the NFA can be, starting from s, moving on c > > > Let $NSE_c$ = Eclose($NS_c$) -- Eclose $NS_c$ which means Eclose every state in $NS_c$ and union them> > > Introduce a transition in D from S to $NSE_c$> > If $NSE_c$ does not exist in the DFA D, add it to UNEXP** Eclose(S): **> For each state $x \in S$:> > Move $x$ through $\varepsilon$, obtaining a set of next state $S_x$> Union these $S_x$ and return that union The Actual NFA 2 DFA Conversion Code
###Code
def nfa2dfa(N):
"""In : N (consistent NFA)
Out: A consistent DFA that is language-equivalent to N.
"""
assert(
is_consistent_nfa(N)
), "nfa2dfa was given an inconsistent NFA."
# EClose the starting state of the NFA
EC = Eclosure(N, N["Q0"])
return n2d(Frontier=[EC], Visited=[EC], Delta=dict({}), Nfa=N)
def n2d(Frontier, Visited, Delta, Nfa):
"""Helper for nfa2dfa.
---
In : Frontier (list of state sets; initially Eclosed Q0)
Visited (list of visited state sets; initially Eclosed Q0)
Delta (the DFA transition function being formed)
Nfa (the NFA being converted)
Helper to nfa2dfa. Given a (BFS) frontier, a Visited
set of states, the Delta being formed, and NFA Nfa, see
if all new moves are in Visited:
do last gasp of Delta update; make and return a DFA;
else: extend Frontier, Visited, Delta; recurse.
"""
All_c_Moves = [ ((Q,c),ec_step_nfa(Nfa,Q,c))
for Q in Frontier
for c in Nfa["Sigma"] ]
New_c_Moves = list(filter(lambda QcQ: trTrg(QcQ) not in Visited,
All_c_Moves))
if New_c_Moves == []:
# Add last-gasp c-moves that curl back!
last_gasp_c_moves = dict([ ((mkSSnam(Qfrom),c),mkSSnam(Qto))
for ((Qfrom, c), Qto) in All_c_Moves ])
Delta.update(last_gasp_c_moves)
# DFA states are visited states
DFA_Q = { mkSSnam(Q) for Q in Visited }
# Retain alphabet
DFA_Sigma = Nfa["Sigma"]
# Delta is ready to go
DFA_Delta = Delta
# DFA starts at Eclosure of Nfa's Q0 set of states
DFA_q0 = mkSSnam(Eclosure(Nfa, Nfa["Q0"]))
# DFA's final states are those in visited that contain an NFA
# F-state but don't retain any empty sets, in case the NFA given
# has no F-states!
# This is another corner-case (i.e. don't shove-in black hole
# states!)
DFA_F = set(map(lambda Q: mkSSnam(Q),
filter(lambda Q: (Nfa["F"]&Q) != set({}),
Visited)))
# Make the DFA; send it to the DFA-shrink to bask ugly long
# state names...
return shrink_dfastates(mk_dfa(DFA_Q,
DFA_Sigma,
DFA_Delta,
DFA_q0,
DFA_F))
else:
newFrontier = list(map(lambda QcQ: trTrg(QcQ), New_c_Moves))
newVisited = Visited + newFrontier
# Even though the NFA has not closed back on itself, we MUST
# accommodate for the "curl-backs" along the way !! Thus, run it
# over All_c_Moves which may include "partial revisits along the
# way". We MUST pick up those curl-backs!
NewMovesDelta = dict([ ((mkSSnam(Qfrom),c),mkSSnam(Qto))
for ((Qfrom, c), Qto) in All_c_Moves ])
Delta.update(NewMovesDelta)
return n2d(newFrontier, newVisited, Delta, Nfa)
#---NFA to DFA
fig74a = md2mc('''
NFA
I : '' -> A, G
A : '' -> B, C
B : 1 -> D
C : 0 -> E
D : '' -> A, G
E : '' -> A, G
G : 1 -> F
''')
dotObj_nfa(fig74a)
dotObj_dfa(nfa2dfa(fig74a))
###Output
_____no_output_____
###Markdown
Brzozowski's DFA MinimizationPicking up from our earlier discussions, to minimize a DFA using Brzozowski's algorithm, here are the steps:* Make sure that the given DFA has no unreachable states* Reverse the DFA* Determinize it* Reverse that DFA* Determinize itThus we need to write a routine to reverse a DFA. We already have a way to ensure that a DFA does not have unreachable states (in another Jupyter notebook; we won't bother to include it here, and trust the user to always provide such DFA only).We can observe that if a DFA has black-hole states, then those states won't matter in the reversed machine (reversed NFA). Thus, we can work with __partial__ dfa (i.e., DFA that are partially consistent). DFA reversal
###Code
def inSets(D,trg,ch):
"""In : D = partially consistent dfa,
trg = a target state in D["q"]
ch = a member of D["Sigma"]
Out: a set of states. { q s.t. Delta[q,ch] == trg }
"""
return { q for q in D["Q"] if D["Delta"][(q,ch)] == trg }
def rev_dfa(D):
"""In : D = a partially consistent DFA without any unreachable states.
Out: A consistent NFA whose language is D's language reversed.
"""
# 1. Given that NFAs start from a SET of states, we already have that
# info. No need to add any transitions from "a new initial state"
# etc
# 2. Now add the inSets of each state as the NFA next set of states
NDict = { (q,ch) : inSets(D,q,ch)
for q in D["Q"]
for ch in D["Sigma"] }
# Notice that we retain D["Q"] and start from Q0 = D["F"]
# going backwards along NDict toward F_dfa = { D["q0"] }
return mk_nfa(D["Q"], D["Sigma"], NDict, D["F"], {D["q0"]})
nfaMultiQ0 = md2mc('''
NFA
I0 : a | b | c -> A, B
I0 : c -> F
I1 : a | b -> A, B
A : c -> F
B : d -> F
''')
dotObj_nfa(nfaMultiQ0)
dotObj_nfa(nfaMultiQ0, FuseEdges=True)
dfaMQ0 = nfa2dfa(nfaMultiQ0)
dotObj_dfa(dfaMQ0)
dotObj_dfa(dfaMQ0, FuseEdges=True)
dotObj_nfa(rev_dfa(dfaMQ0))
dotObj_nfa(rev_dfa(dfaMQ0), FuseEdges=True)
help(min_dfa_brz)
dotObj_dfa(dfaMQ0)
dotObj_dfa(min_dfa_brz(dfaMQ0))
###Output
_____no_output_____
###Markdown
Brzozowski Minimization : All Steps
###Code
blimp = md2mc('''
DFA
I1 : a -> F2
I1 : b -> F3
F2 : a -> S8
F2 : b -> S5
F3 : a -> S7
F3 : b -> S4
S4 : a | b -> F6
S5 : a | b -> F6
F6 : a | b -> F6
S7 : a | b -> F6
S8 : a -> F6
S8 : b -> F9
F9 : a -> F9
F9 : b -> F6
''')
DOblimp = dotObj_dfa(blimp)
DOblimp
dotObj_dfa(blimp, FuseEdges=True)
###Output
_____no_output_____
###Markdown
Classical minimization results in the following
###Code
classic_min = min_dfa(blimp)
dotObj_dfa(classic_min)
###Output
_____no_output_____
###Markdown
Now let's study Brzozowski minimization; its code is a one-liner!
###Code
# Brzozowski Minimizer code
def min_dfa_brz(D):
"""Minimize a DFA as per Brzozowski's algorithm.
"""
return nfa2dfa(rev_dfa(nfa2dfa(rev_dfa(D))))
###Output
_____no_output_____
###Markdown
Step 1: Reverse the given DFA
###Code
rblimp = rev_dfa(blimp)
DOrblimp = dotObj_nfa(rblimp)
DOrblimp
dotObj_nfa(rblimp, FuseEdges=True)
###Output
_____no_output_____
###Markdown
Step 2: Determinize the result of Step 1
###Code
drblimp = nfa2dfa(rblimp)
drblimp
DOdrblimp = dotObj_dfa(drblimp)
DOdrblimp
###Output
_____no_output_____
###Markdown
Step 3: Reverse the result of Step 2
###Code
rdrblimp = rev_dfa(drblimp)
DOrdrblimp = dotObj_nfa(rdrblimp)
DOrdrblimp
###Output
_____no_output_____
###Markdown
Step 4: Determinize the result of Step 3
###Code
drdrblimp = nfa2dfa(rdrblimp)
DOdrdrblimp = dotObj_dfa(drdrblimp)
DOdrdrblimp
###Output
_____no_output_____
###Markdown
End result is isomorphic!
###Code
iso_dfa(drdrblimp, classic_min)
###Output
_____no_output_____
###Markdown
RE to NFA
###Code
dotObj_nfa(re2nfa("''"))
dotObj_nfa(re2nfa("a"))
dotObj_nfa(re2nfa("ab"))
dotObj_nfa(re2nfa("a+b"))
dotObj_nfa(re2nfa("a*"))
dotObj_nfa(re2nfa("''*"))
nfromr = re2nfa("ab(a+b)*")
dotObj_nfa(nfromr)
mk_gnfa
help(mk_gnfa)
gnfromr = mk_gnfa(nfromr)
dotObj_gnfa(gnfromr)
del_gnfa_states
help(del_gnfa_states)
(Gf, DO, RE) = del_gnfa_states(gnfromr)
RE
DO[0]
DO[6]
dotObj_nfa(re2nfa("''"))
dotObj_nfa(re2nfa("a"))
dotObj_nfa(re2nfa('ab'))
dotObj_nfa(re2nfa("a*"))
###Output
_____no_output_____
###Markdown
$\varepsilon$
###Code
dotObj_nfa(re2nfa("a(a+b)*b"))
dotObj_nfa(re2nfa("dotObj_nfa(re2nfa("''"))a+b"))
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)"))))
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)(0+1)(0+1)(0+1)"))))
len(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)(0+1)(0+1)(0+1)(0+1)")))["Q"])
###Output
_____no_output_____
###Markdown
Chapter 7 fully illustrated using Jove
###Code
# PLAY this Youtube to know how to use the contents of this file + follow some of the Def_DFA.ipynb defns
from IPython.display import YouTubeVideo
YouTubeVideo('xjFtLF95uBc')
from jove.DotBashers import *
from jove.Def_md2mc import *
from jove.Def_NFA import *
from jove.Def_DFA import *
from jove.Def_RE2NFA import *
from jove.Def_NFA2RE import *
###Output
_____no_output_____
###Markdown
An NFA is a machine $(Q, \Sigma, \delta, Q_0, F)$ which is somewhat like a DFA except that 1. It can start from a __set__ of starting states $Q_0$ > i.e., the NFA can start from more than one starting state 2. Its transition function $\delta$ maps $Q\times (\Sigma\cup \{\varepsilon\})$ to $2^{Q}$ > i.e., the NFA takes a state $Q$ and a symbol and returns a set of states You can see these aspects being illustrated in the NFA to follow Limitations of DFA
###Code
secondlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> F
''')
dotObj_nfa(secondlast, FuseEdges=True)
thirdlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> F
''')
dotObj_nfa(thirdlast, FuseEdges=True)
fourthlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> F
''')
dotObj_nfa(fourthlast, FuseEdges=True)
fifthlast = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> S3
S3 : 0 | 1 -> F
''')
dotObj_nfa(fifthlast, FuseEdges=True)
dotObj_dfa(min_dfa(nfa2dfa(secondlast)))
dotObj_dfa(min_dfa(nfa2dfa(thirdlast)))
dotObj_dfa(min_dfa(nfa2dfa(fourthlast)))
dotObj_dfa(min_dfa(nfa2dfa(fifthlast)))
len(min_dfa(nfa2dfa(secondlast))["Q"])
len(min_dfa(nfa2dfa(thirdlast))["Q"])
len(min_dfa(nfa2dfa(fourthlast))["Q"])
len(min_dfa(nfa2dfa(fifthlast))["Q"])
###Output
_____no_output_____
###Markdown
Clear evidence of exponential blowup! Another problem with DFA : No natural way to specify many languages
###Code
# NFA for the language {'a','ba','cd','eb'}
nfa_abcde = md2mc('''
NFA
I : a -> F
I : b -> Sb
Sb : a -> F
I : c -> Sc
Sc : d -> F
I : e -> Se
Se : b -> F
''')
dotObj_nfa(nfa_abcde)
# With DFA we do something else (build DFA for 'a' with alphabet being {a,b,c,d,e} etc...)
# This results in this DFA
dotObj_dfa_w_bh(min_dfa(nfa2dfa(nfa_abcde)), FuseEdges=True)
###Output
_____no_output_____
###Markdown
Chapter-7: Nondeterministic Finite AutomataIn this chapter, we will cover virtually all aspects of NFA, following the style of presentation used in Chapter-3 on DFA. We will sometimes be (re-) writing print (and dot-object generation) routines that look quite similar to those defined for DFA. This is because the routines are short, and we want a self-contained notebook. Besides there are subtle differences between an NFA and a DFA and it's best to make these differences manifest in dedicated routines -- than overload the former routines with extra arguments. **We begin defining NFA by providing its structure. ** We will first set up a series of definitions ending at the mk_nfa function that helps build NFA. Unlike with a DFA, there is no mkp_nfa function, as all NFAs are partial! (We can define a mktot_nfa function to print all moves, including unspecified moves leading to the se({}) state.)We will define step_nfa, run_nfa and accepts_nfa which are key functions that clearly spell out how NFA differ from DFA.We will also be defining functions for displaying NFA with the help of the _dot_ tool. The design will be similar to that with DFA. **------** __We will follow Kozen and endow an NFA with multiple start states __ This will allow the NFA to be more naturally handled. For instance, the reverse of a DFA is an NFA. When we reverse a DFA, all its final states become initial states of the NFA (that models the reversed language). There are 2 ways to handle this:1. Introduce a fake new initial state and jump from it via $\varepsilon$ onto (what were the final state of the DFA).2. Don't introduce the fake new initial state, but rather allow the NFA to start from all of F being really its start state. * Of course, in almost all situations, this is a minor difference * But to enjoy the topic as completely as one can, it is best to be "clean" and follow clean definitions. - I've seen Ed Clarke also use multiple initial states - Hence our code will be for this relaxed setup - Of course if you __REALLY__ wanted to have only one start state, then make it a singleton set of states and work that through your NFA.__So now, following Brzozowski, we have__An NFA is a quintuple $(Q,\Sigma,\delta,Q_0,F)$, where:* $Q$ is a _finite nonempty_ set of states.* $\Sigma$ is a _finite nonempty_ alphabet containing _symbols_.* $\delta$ is a (partial) transition function, containing a set of _transitions_. The transitions take a pair from $Q\times \Sigma$ and return a __subset__ of states in $Q$. All this is succinctly captured by writing $\delta: Q\times \Sigma \rightarrow 2^Q$. Here we use $2^Q$ to denote the powerset of $Q$. * $Q_0\subseteq Q$, is __a set of initial states__. Notice that we change from q0 (or $q_0$) which is what you find books such as Sipser and Linz using.* $F\subseteq Q$ is a _finite_ (and _possibly empty_) set of final (or _accepting_) states. These are shown as double-circled nodes in the graph of a DFA. > There is no other change. I.e. $\delta$ remains the same as before.> It is that when an NFA starts, it can find itself in a set of start states.> Most NFAs start from a __singleton__ Q0, which is then, effectively, an NFA that matches most books say.Some terminology:> We call $Q$,$\Sigma$, $\delta$, $Q_0$, and $F$ the **_traits_** of the NFA.> We will call an NFA **_structurally consistent_** or simply **"consistent"** if its traits pass the aforesaid checks.Here is how the checks will be broken down:* The supplied $\delta$ function will be checked to see if it has allowed domain and range points. - The domain points must be a subset of $Q\times \Sigma$ - The range points must be a subset of $2^Q$ We do no insist that the supplied $\delta$ be total. * $Q_0\subseteq Q$, is _the_ initial state.* $F\subseteq Q$ is a _finite_ (and _possibly empty_) set of final (or _accepting_) states. We will often use the state set({}) to be the equivalent of a black-hole state for an NFA.
###Code
nfa1 = md2mc('''NFA
I : 0 -> A
I : 0 -> F''')
dotObj_nfa(nfa1)
nfa2 = md2mc('''NFA
I : '' -> F
I : 0 -> A
''')
dotObj_nfa(nfa2)
###Output
_____no_output_____
###Markdown
The $\delta$ function of the NFA
###Code
help(step_nfa)
step_nfa(nfa1, 'I', '')
step_nfa(nfa1, 'I', '0')
step_nfa(nfa2, 'I','')
fig71a = md2mc('''
NFA
I : 0 -> I
I : 1 -> I, S0
S0 : 0 | 1 -> S1
S1 : 0 | 1 -> F
''')
dotObj_nfa(fig71a, FuseEdges=True)
fig71b = md2mc('''
NFA
I : 0 | 1 -> I
I : '' -> S0
S0 : 1 -> S1
S1 : 0 | 1 -> S2
S2 : 0 | 1 -> F
''')
dotObj_nfa(fig71b, FuseEdges=True)
help(Eclosure)
Eclosure(fig71b, {'F'})
###Output
_____no_output_____
###Markdown
Stepping and Running NFANow that we've defined NFA and allied actions such as consistency checking and printing, let's write functions to step and run them.* How the state transition function $\delta$ "works" - captured in step_nfa
###Code
help(step_nfa)
step_nfa(fig71b, "I", '')
step_nfa(fig71b, "S0", '')
step_nfa(fig71b, "I", '0')
help(run_nfa)
run_nfa(fig71b, "I", "0")
run_nfa(fig71b, "I", "0", chatty=True)
step_nfa(fig71b, "I", '1')
run_nfa(fig71a, "I", '0100100', chatty = True)
run_nfa(fig71b, "I", '1')
run_nfa(fig71b, {"I"}, "")
Eclosure(fig71b, {"I"})
###Output
_____no_output_____
###Markdown
The EClosure Function (defined on a set of states)
###Code
run_nfa(fig71b, {"I"}, "0101")
run_nfa(fig71b, {"I"}, "0101", True)
import ipywidgets as wdg
def run_nfa_slider(firstTime, N, s, n):
"""Run NFA N from N["Q0"] (which is a set..) on substring s[0:n]
"""
if firstTime:
print("Eclosure of N's start state is")
print(Eclosure(N, N["Q0"]))
firstTime = False
S = N["Q0"]
if (n > len(s)):
n = len(s)
print("string = ", s[0:n])
run_nfa(N, S, s[0:n], True)
def run_nfa_int(N1, N2):
"""Run interactively from the given NFA .. from {q0}
on input string's substring
as picked by slider.
"""
inp = input("Please provide string: ")
wdg.interact(run_nfa_slider, firstTime=True, N = {'N1': N1, 'N2': N2},
s = inp, n=(0,32) )
run_nfa_int(fig71a, fig71b)
fig74a = md2mc('''
NFA
I : '' -> A, G
A : '' -> B, C
B : 1 -> D
C : 0 -> E
D : '' -> A, G
E : '' -> A, G
G : 1 -> F
''')
dotObj_nfa(fig74a)
run_nfa_int(fig74a, fig71b)
###Output
_____no_output_____
###Markdown
DFA to NFA conversion (!)This is a useful helper and helps understand the theory, but not widely used.Its main use is within "NFA2RE". Suppose you want to convert a DFA to an RE? Then use dfa2nfa and then NFA2RE :-)
###Code
def dfa2nfa(D):
"""Given a DFA D, make a language-equivalent NFA.
"""
assert(
is_partially_consistent_dfa(D)
), "DFA given to dfa2nfa is not part. consist."
return { "Q" : D["Q"],
"Sigma" : D["Sigma"],
"Delta" : dict((a,{b}) for (a,b) in D["Delta"].items()),
"Q0" : { D["q0"] },
"F" : D["F"] }
###Output
_____no_output_____
###Markdown
NFA to DFA conversion* Input: An NFA, N* Output: A language-equivalent DFA, D* Method: Subset Construction - Data structure to maintain : * A set called "Unexpanded" (UNEXP for short), which holds SETS of states of the given NFA, N * These serve as the states of the DFA (D) being built - Note that UNEXP is a set of state-sets (set of 'set-of-states' if you will)> * Let INIT (DFA's initial state) = Eclosure of N["Q0"] > * Add INIT to UNEXP> WHILE (UNEXP $\neq \emptyset$) DO> > Choose a state S from UNEXP> > Delete S from UNEXP> > Expand(S) -- Expand(S) will add all the 'c' moves out of S where c $\in \Sigma$> END WHILE ** Expand(S): **> For each symbol $c$ in $\Sigma$> > For each state s ∈ S do -- Recall that S is a set of states> > > Let $NS_c$ = $\delta(s,c)$ -- Find the next __set of states__ the NFA can be, starting from s, moving on c > > > Let $NSE_c$ = Eclose($NS_c$) -- Eclose $NS_c$ which means Eclose every state in $NS_c$ and union them> > > Introduce a transition in D from S to $NSE_c$> > If $NSE_c$ does not exist in the DFA D, add it to UNEXP** Eclose(S): **> For each state $x \in S$:> > Move $x$ through $\varepsilon$, obtaining a set of next state $S_x$> Union these $S_x$ and return that union The Actual NFA 2 DFA Conversion Code
###Code
def nfa2dfa(N):
"""In : N (consistent NFA)
Out: A consistent DFA that is language-equivalent to N.
"""
assert(
is_consistent_nfa(N)
), "nfa2dfa was given an inconsistent NFA."
# EClose the starting state of the NFA
EC = Eclosure(N, N["Q0"])
return n2d(Frontier=[EC], Visited=[EC], Delta=dict({}), Nfa=N)
def n2d(Frontier, Visited, Delta, Nfa):
"""Helper for nfa2dfa.
---
In : Frontier (list of state sets; initially Eclosed Q0)
Visited (list of visited state sets; initially Eclosed Q0)
Delta (the DFA transition function being formed)
Nfa (the NFA being converted)
Helper to nfa2dfa. Given a (BFS) frontier, a Visited
set of states, the Delta being formed, and NFA Nfa, see
if all new moves are in Visited:
do last gasp of Delta update; make and return a DFA;
else: extend Frontier, Visited, Delta; recurse.
"""
All_c_Moves = [ ((Q,c),ec_step_nfa(Nfa,Q,c))
for Q in Frontier
for c in Nfa["Sigma"] ]
New_c_Moves = list(filter(lambda QcQ: trTrg(QcQ) not in Visited,
All_c_Moves))
if New_c_Moves == []:
# Add last-gasp c-moves that curl back!
last_gasp_c_moves = dict([ ((mkSSnam(Qfrom),c),mkSSnam(Qto))
for ((Qfrom, c), Qto) in All_c_Moves ])
Delta.update(last_gasp_c_moves)
# DFA states are visited states
DFA_Q = { mkSSnam(Q) for Q in Visited }
# Retain alphabet
DFA_Sigma = Nfa["Sigma"]
# Delta is ready to go
DFA_Delta = Delta
# DFA starts at Eclosure of Nfa's Q0 set of states
DFA_q0 = mkSSnam(Eclosure(Nfa, Nfa["Q0"]))
# DFA's final states are those in visited that contain an NFA
# F-state but don't retain any empty sets, in case the NFA given
# has no F-states!
# This is another corner-case (i.e. don't shove-in black hole
# states!)
DFA_F = set(map(lambda Q: mkSSnam(Q),
filter(lambda Q: (Nfa["F"]&Q) != set({}),
Visited)))
# Make the DFA; send it to the DFA-shrink to bask ugly long
# state names...
return shrink_dfastates(mk_dfa(DFA_Q,
DFA_Sigma,
DFA_Delta,
DFA_q0,
DFA_F))
else:
newFrontier = list(map(lambda QcQ: trTrg(QcQ), New_c_Moves))
newVisited = Visited + newFrontier
# Even though the NFA has not closed back on itself, we MUST
# accommodate for the "curl-backs" along the way !! Thus, run it
# over All_c_Moves which may include "partial revisits along the
# way". We MUST pick up those curl-backs!
NewMovesDelta = dict([ ((mkSSnam(Qfrom),c),mkSSnam(Qto))
for ((Qfrom, c), Qto) in All_c_Moves ])
Delta.update(NewMovesDelta)
return n2d(newFrontier, newVisited, Delta, Nfa)
#---NFA to DFA
fig74a = md2mc('''
NFA
I : '' -> A, G
A : '' -> B, C
B : 1 -> D
C : 0 -> E
D : '' -> A, G
E : '' -> A, G
G : 1 -> F
''')
dotObj_nfa(fig74a)
dotObj_dfa(nfa2dfa(fig74a))
###Output
_____no_output_____
###Markdown
Brzozowski's DFA MinimizationPicking up from our earlier discussions, to minimize a DFA using Brzozowski's algorithm, here are the steps:* Make sure that the given DFA has no unreachable states* Reverse the DFA* Determinize it* Reverse that DFA* Determinize itThus we need to write a routine to reverse a DFA. We already have a way to ensure that a DFA does not have unreachable states (in another Jupyter notebook; we won't bother to include it here, and trust the user to always provide such DFA only).We can observe that if a DFA has black-hole states, then those states won't matter in the reversed machine (reversed NFA). Thus, we can work with __partial__ dfa (i.e., DFA that are partially consistent). DFA reversal
###Code
def inSets(D,trg,ch):
"""In : D = partially consistent dfa,
trg = a target state in D["q"]
ch = a member of D["Sigma"]
Out: a set of states. { q s.t. Delta[q,ch] == trg }
"""
return { q for q in D["Q"] if D["Delta"][(q,ch)] == trg }
def rev_dfa(D):
"""In : D = a partially consistent DFA without any unreachable states.
Out: A consistent NFA whose language is D's language reversed.
"""
# 1. Given that NFAs start from a SET of states, we already have that
# info. No need to add any transitions from "a new initial state"
# etc
# 2. Now add the inSets of each state as the NFA next set of states
NDict = { (q,ch) : inSets(D,q,ch)
for q in D["Q"]
for ch in D["Sigma"] }
# Notice that we retain D["Q"] and start from Q0 = D["F"]
# going backwards along NDict toward F_dfa = { D["q0"] }
return mk_nfa(D["Q"], D["Sigma"], NDict, D["F"], {D["q0"]})
nfaMultiQ0 = md2mc('''
NFA
I0 : a | b | c -> A, B
I0 : c -> F
I1 : a | b -> A, B
A : c -> F
B : d -> F
''')
dotObj_nfa(nfaMultiQ0)
dotObj_nfa(nfaMultiQ0, FuseEdges=True)
dfaMQ0 = nfa2dfa(nfaMultiQ0)
dotObj_dfa(dfaMQ0)
dotObj_dfa(dfaMQ0, FuseEdges=True)
dotObj_nfa(rev_dfa(dfaMQ0))
dotObj_nfa(rev_dfa(dfaMQ0), FuseEdges=True)
help(min_dfa_brz)
dotObj_dfa(dfaMQ0)
dotObj_dfa(min_dfa_brz(dfaMQ0))
###Output
_____no_output_____
###Markdown
Brzozowski Minimization : All Steps
###Code
blimp = md2mc('''
DFA
I1 : a -> F2
I1 : b -> F3
F2 : a -> S8
F2 : b -> S5
F3 : a -> S7
F3 : b -> S4
S4 : a | b -> F6
S5 : a | b -> F6
F6 : a | b -> F6
S7 : a | b -> F6
S8 : a -> F6
S8 : b -> F9
F9 : a -> F9
F9 : b -> F6
''')
DOblimp = dotObj_dfa(blimp)
DOblimp
dotObj_dfa(blimp, FuseEdges=True)
###Output
_____no_output_____
###Markdown
Classical minimization results in the following
###Code
classic_min = min_dfa(blimp)
dotObj_dfa(classic_min)
###Output
_____no_output_____
###Markdown
Now let's study Brzozowski minimization; its code is a one-liner!
###Code
# Brzozowski Minimizer code
def min_dfa_brz(D):
"""Minimize a DFA as per Brzozowski's algorithm.
"""
return nfa2dfa(rev_dfa(nfa2dfa(rev_dfa(D))))
###Output
_____no_output_____
###Markdown
Step 1: Reverse the given DFA
###Code
rblimp = rev_dfa(blimp)
DOrblimp = dotObj_nfa(rblimp)
DOrblimp
dotObj_nfa(rblimp, FuseEdges=True)
###Output
_____no_output_____
###Markdown
Step 2: Determinize the result of Step 1
###Code
drblimp = nfa2dfa(rblimp)
drblimp
DOdrblimp = dotObj_dfa(drblimp)
DOdrblimp
###Output
_____no_output_____
###Markdown
Step 3: Reverse the result of Step 2
###Code
rdrblimp = rev_dfa(drblimp)
DOrdrblimp = dotObj_nfa(rdrblimp)
DOrdrblimp
###Output
_____no_output_____
###Markdown
Step 4: Determinize the result of Step 3
###Code
drdrblimp = nfa2dfa(rdrblimp)
DOdrdrblimp = dotObj_dfa(drdrblimp)
DOdrdrblimp
###Output
_____no_output_____
###Markdown
End result is isomorphic!
###Code
iso_dfa(drdrblimp, classic_min)
###Output
_____no_output_____
###Markdown
RE to NFA
###Code
dotObj_nfa(re2nfa("''"))
dotObj_nfa(re2nfa("a"))
dotObj_nfa(re2nfa("ab"))
dotObj_nfa(re2nfa("a+b"))
dotObj_nfa(re2nfa("a*"))
dotObj_nfa(re2nfa("''*"))
nfromr = re2nfa("ab(a+b)*")
dotObj_nfa(nfromr)
mk_gnfa
help(mk_gnfa)
gnfromr = mk_gnfa(nfromr)
dotObj_gnfa(gnfromr)
del_gnfa_states
help(del_gnfa_states)
(Gf, DO, RE) = del_gnfa_states(gnfromr)
RE
DO[0]
DO[6]
dotObj_nfa(re2nfa("''"))
dotObj_nfa(re2nfa("a"))
dotObj_nfa(re2nfa('ab'))
dotObj_nfa(re2nfa("a*"))
###Output
_____no_output_____
###Markdown
$\varepsilon$
###Code
dotObj_nfa(re2nfa("a(a+b)*b"))
dotObj_nfa(re2nfa("dotObj_nfa(re2nfa("''"))a+b"))
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)"))))
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)(0+1)(0+1)(0+1)"))))
len(min_dfa(nfa2dfa(re2nfa("(0+1)*1(0+1)(0+1)(0+1)(0+1)(0+1)(0+1)")))["Q"])
###Output
_____no_output_____ |
nbs/08_vision.data.ipynb | ###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#|export
@delegates(subplots)
def get_grid(
n:int, # Number of axes in the returned grid
nrows:int=None, # Number of rows in the returned grid, defaulting to `int(math.sqrt(n))`
ncols:int=None, # Number of columns in the returned grid, defaulting to `ceil(n/rows)`
figsize:tuple=None, # Width, height in inches of the returned figure
double:bool=False, # Whether to double the number of columns and `n`
title:str=None, # If passed, title set to the figure
return_fig:bool=False, # Whether to return the figure created by `subplots`
flatten:bool=True, # Whether to flatten the matplot axes such that they can be iterated over with a single loop
**kwargs,
) -> (plt.Figure, plt.Axes): # Returns just `axs` by default, and (`fig`, `axs`) if `return_fig` is set to True
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. The default `figsize` is `(cols*imsize, rows*imsize+0.6)`. `imsize` is passed down to `subplots`. `suptitle`, `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to [plt.subplots](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.htmlmatplotlib-pyplot-subplots). If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
#|export
def clip_remove_empty(
bbox:TensorBBox, # Coordinates of bounding boxes
label:TensorMultiCategory # Labels of the bounding boxes
):
"Clip bounding boxes with image border and remove empty boxes along with corresponding labels"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
###Output
_____no_output_____
###Markdown
This is used in `bb_pad`
###Code
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#|export
def bb_pad(
samples:list, # List of 3-tuples like (image, bounding_boxes, labels)
pad_idx:int=0 # Label that will be used to pad each list of labels
):
"Function that collects `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
###Output
_____no_output_____
###Markdown
This is used in `BBoxBlock`
###Code
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#|export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#|export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#|export
def ImageBlock(cls:PILBase=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#|export
def MaskBlock(
codes:list=None # Vocab labels for segmentation masks
):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#|export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#|export
def BBoxLblBlock(
vocab:list=None, # Vocab labels for bounding boxes
add_na:bool=True # Add NaN as a background class
):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#|export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls,
path:(str, Path), # Set the default path to a directory that a `Learner` can use to save files like models
fnames:list, # A list of `os.Pathlike`'s to individual image files
label_func:callable, # A function that receives a string (the file name) and outputs a label
**kwargs
) -> DataLoaders:
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#|export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
/home/hamel/anaconda3/lib/python3.9/site-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
ret = func(*args, **kwargs)
###Markdown
Export -
###Code
#|hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted index_original.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataBunch` un the vision applicaiton and higher class `ImageDataBunch` ImageDataBunch -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, valid_pct=valid_pct, seed=seed, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, **kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataBunch.from_csv = delegates(to=ImageDataBunch.from_df)(ImageDataBunch.from_csv)
ImageDataBunch.from_path_re = delegates(to=ImageDataBunch.from_path_func)(ImageDataBunch.from_path_re)
show_doc(ImageDataBunch.from_folder)
show_doc(ImageDataBunch.from_path_func)
show_doc(ImageDataBunch.from_path_re)
show_doc(ImageDataBunch.from_name_func)
show_doc(ImageDataBunch.from_name_re)
show_doc(ImageDataBunch.from_df)
show_doc(ImageDataBunch.from_csv)
show_doc(ImageDataBunch.from_lists)
#export
class SegmentationDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, ImageBlock(cls=PILMask)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
if codes is not None: res.vocab = codes
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
MaskBlock = TransformBlock(type_tfms=PILMask.create, batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit-Copy1.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
Converted migrating.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataBunch` un the vision applicaiton and higher class `ImageDataBunch` ImageDataBunch -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, valid_pct=valid_pct, seed=seed, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, **kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataBunch.from_csv = delegates(to=ImageDataBunch.from_df)(ImageDataBunch.from_csv)
ImageDataBunch.from_path_re = delegates(to=ImageDataBunch.from_path_func)(ImageDataBunch.from_path_re)
show_doc(ImageDataBunch.from_folder)
show_doc(ImageDataBunch.from_path_func)
show_doc(ImageDataBunch.from_path_re)
show_doc(ImageDataBunch.from_name_func)
show_doc(ImageDataBunch.from_name_re)
show_doc(ImageDataBunch.from_df)
show_doc(ImageDataBunch.from_csv)
show_doc(ImageDataBunch.from_lists)
#export
class SegmentationDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(
n:int, # Number of axes in the returned grid
nrows:int=None, # Number of rows in the returned grid, defaulting to `int(math.sqrt(n))`
ncols:int=None, # Number of columns in the returned grid, defaulting to `ceil(n/rows)`
figsize:tuple=None, # Width, height in inches of the returned figure
double:bool=False, # Whether to double the number of columns and `n`
title:str=None, # If passed, title set to the figure
return_fig:bool=False, # Whether to return the figure created by `subplots`
flatten:bool=True, # Whether to flatten the matplot axes such that they can be iterated over with a single loop
**kwargs,
) -> (plt.Figure, plt.Axes): # Returns just `axs` by default, and (`fig`, `axs`) if `return_fig` is set to True
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. The default `figsize` is `(cols*imsize, rows*imsize+0.6)`. `imsize` is passed down to `subplots`. `suptitle`, `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to [plt.subplots](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.htmlmatplotlib-pyplot-subplots). If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(
bbox:TensorBBox, # Coordinates of bounding boxes
label:TensorMultiCategory # Labels of the bounding boxes
):
"Clip bounding boxes with image border and remove empty boxes along with corresponding labels"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
###Output
_____no_output_____
###Markdown
This is used in `bb_pad`
###Code
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#export
def bb_pad(
samples:list, # List of 3-tuples like (image, bounding_boxes, labels)
pad_idx:int=0 # Label that will be used to pad each list of labels
):
"Function that collects `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
###Output
_____no_output_____
###Markdown
This is used in `BBoxBlock`
###Code
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls:PILBase=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(
codes:list=None # Vocab labels for segmentation masks
):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(
vocab:list=None, # Vocab labels for bounding boxes
add_na:bool=True # Add NaN as a background class
):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls,
path:(str, Path), # Set the default path to a directory that a `Learner` can use to save files like models
fnames:list, # A list of `os.Pathlike`'s to individual image files
label_func:callable, # A function that receives a string (the file name) and outputs a label
**kwargs
) -> DataLoaders:
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
/home/hamel/anaconda3/lib/python3.9/site-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
ret = func(*args, **kwargs)
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted index_original.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, valid_pct=valid_pct, seed=seed, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, **kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
This cell doesn't have an export destination and was ignored:
e
This cell doesn't have an export destination and was ignored:
e
This cell doesn't have an export destination and was ignored:
e
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationsDataLoaders`. They provide factory methods that are a great way to quickly get your data ready for training. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set. If a `vocab` is passed, only the folders with names in `vocab` are kept.
###Code
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x):
return re.search(r'^(.*)_\d+.jpg', x).groups()[0]
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
dls.show_batch()
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(
n:int, # Number of axes in the returned grid
nrows:int=None, # Number of rows in the returned grid, defaulting to `int(math.sqrt(n))`
ncols:int=None, # Number of columns in the returned grid, defaulting to `ceil(n/rows)`
add_vert=0,
figsize:tuple=None, # Width, height in inches of the returned figure
double:bool=False, # Whether to double the number of columns and `n`
title:str=None, # If passed, title set to the figure
return_fig:bool=False, # Whether to return the figure created by `subplots`
flatten:bool=True, # Whether to flatten the matplot axes such that they can be iterated over with a single loop
**kwargs,
) -> (plt.Figure, plt.Axes): # Returns just `axs` by default, and (`fig`, `axs`) if `return_fig` is set to True
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. The default `figsize` is `(cols*imsize, rows*imsize+0.6)`. `imsize` is passed down to `subplots`. `suptitle`, `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to [plt.subplots](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.htmlmatplotlib-pyplot-subplots). If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(
bbox:TensorBBox, # Coordinates of bounding boxes
label:TensorMultiCategory # Labels of the bounding boxes
):
"Clip bounding boxes with image border and remove empty boxes along with corresponding labels"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
###Output
_____no_output_____
###Markdown
This is used in `bb_pad`
###Code
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#export
def bb_pad(
samples:list, # List of 3-tuples like (image, bounding_boxes, labels)
pad_idx:int=0 # Label that will be used to pad each list of labels
):
"Function that collects `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
###Output
_____no_output_____
###Markdown
This is used in `BBoxBlock`
###Code
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls,
path:(str, Path), # Set the default path to a directory that a `Learner` can use to save files like models
fnames:list, # A list of `os.Pathlike`'s to individual image files
label_func:callable, # A function that receives a string (the file name) and outputs a label
**kwargs
) -> DataLoaders:
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
/home/hamel/anaconda3/lib/python3.9/site-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
ret = func(*args, **kwargs)
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted index_original.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
@delegates(subplots)
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://dev.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://dev.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be prefered instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x): return '_'.join(x.name.split('_')[:-1])
dls = ImageDataLoaders.from_path_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the PETS dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)_\d+.jpg$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
def label_func(x): return '_'.join(x.split('_')[:-1])
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.Here is how to create the same `DataLoaders` on the PETS dataset:
###Code
pat = r'^(.*)_\d+.jpg$'
dls = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid', item_tfms=Resize(224))
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid', item_tfms=Resize(224))
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels, item_tfms=Resize(224))
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted index.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
def label_func(x):
return re.search(r'^(.*)_\d+.jpg', x).groups()[0]
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
dls = ImageDataLoaders.from_name_func(path, fnames, label_func, item_tfms=Resize(224))
dls.show_batch()
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
@delegates(subplots)
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2,5]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataBunch` un the vision applicaiton and higher class `ImageDataBunch` ImageDataBunch -
###Code
#export
class ImageDataBunch(DataBunch):
@classmethod
@delegates(DataBunch.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None, y_block=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None: y_block = MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataBunch.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, **kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed))
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataBunch.from_csv = delegates(to=ImageDataBunch.from_df)(ImageDataBunch.from_csv)
ImageDataBunch.from_name_re = delegates(to=ImageDataBunch.from_name_func)(ImageDataBunch.from_name_re)
show_doc(ImageDataBunch.from_folder)
show_doc(ImageDataBunch.from_name_func)
show_doc(ImageDataBunch.from_name_re)
show_doc(ImageDataBunch.from_df)
show_doc(ImageDataBunch.from_csv)
show_doc(ImageDataBunch.from_lists)
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
MaskBlock = TransformBlock(type_tfms=PILMask.create, batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dbunch_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.model.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False,
flatten=True, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
if nrows:
ncols = ncols or int(np.ceil(n/nrows))
elif ncols:
nrows = nrows or int(np.ceil(n/ncols))
else:
nrows = int(math.sqrt(n))
ncols = int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
if flatten: axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`. `flatten` will flatten the matplot axes such that they can be iterated over with a single loop.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[TensorBase(~empty)])
bb = TensorBBox([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5], [-2, -0.5, -1.5, 0.5]])
bb,lbl = clip_remove_empty(bb, TensorMultiCategory([1,2,3,2,5]))
test_eq(bb, TensorBBox([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, TensorMultiCategory([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls,
path:(str, Path), # Set the default path to a directory that a `Learner` can use to save files like models
fnames:list, # A list of `os.Pathlike`'s to individual image files
label_func:callable, # A function that receives a string (the file name) and outputs a label
**kwargs
) -> DataLoaders:
"Create from the name attrs of `fnames` in `path`s with `label_func`"
if sys.platform == 'win32' and isinstance(label_func, types.LambdaType) and label_func.__name__ == '<lambda>':
# https://medium.com/@jwnx/multiprocessing-serialization-in-python-with-pickle-9844f6fa1812
raise ValueError("label_func couldn't be lambda function on Windows")
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
/home/hamel/anaconda3/lib/python3.9/site-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
ret = func(*args, **kwargs)
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted index_original.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
@delegates(subplots)
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` un the vision applicaiton and higher class `ImageDataLoaders` ImageDataLoaders -
###Code
#export
def _using_attr(f, attr, x):
return f(getattr(x,attr))
#export
def using_attr(f, attr):
"Change function `f` to operate on `attr`"
return partial(_using_attr, f, attr)
t = Path('/a/b.txt')
f = using_attr(str.upper, 'name')
test_eq(f(t), 'B.TXT')
#export
class ImageDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_image_files,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`."
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from name attrs in list of `fnames` in `path`s with re expression `pat`."
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` in `path`."
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
TransformBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_folder)
show_doc(ImageDataLoaders.from_path_func)
show_doc(ImageDataLoaders.from_path_re)
show_doc(ImageDataLoaders.from_name_func)
show_doc(ImageDataLoaders.from_name_re)
show_doc(ImageDataLoaders.from_df)
show_doc(ImageDataLoaders.from_csv)
show_doc(ImageDataLoaders.from_lists)
#export
class SegmentationDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
###Output
_____no_output_____
###Markdown
Show methods
###Code
#export
def get_grid(n, rows=None, cols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False):
rows = rows or int(np.ceil(math.sqrt(n)))
cols = cols or int(np.ceil(n/rows))
if double: cols*=2 ; n*=2
figsize = (cols*3, rows*3+add_vert) if figsize is None else figsize
fig,axs = subplots(rows, cols, figsize=figsize)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, rows=None, cols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), rows=rows, cols=cols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
Helper functions for object detection
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones."
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision
###Code
#export
def ImageBlock(cls=PILImage): return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
#export
def BBoxLblBlock(vocab=None, add_na=True):
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 13a_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.transfer_learning.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.ulmfit.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.learner.ipynb.
Converted 43_tabular.model.ipynb.
Converted 45_collab.ipynb.
Converted 50_datablock_examples.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 97_test_utils.ipynb.
Converted index.ipynb.
###Markdown
Vision data> Helper functions to get data in a `DataLoaders` in the vision application and higher class `ImageDataLoaders` The main classes defined in this module are `ImageDataLoaders` and `SegmentationDataLoaders`, so you probably want to jump to their definitions. They provide factory methods that are a great way to quickly get your data ready for training, see the [vision tutorial](http://docs.fast.ai/tutorial.vision) for examples. Helper functions
###Code
#export
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
###Output
_____no_output_____
###Markdown
This is used by the type-dispatched versions of `show_batch` and `show_results` for the vision application. By default, there will be `int(math.sqrt(n))` rows and `ceil(n/rows)` columns. `double` will double the number of columns and `n`. The default `figsize` is `(cols*imsize, rows*imsize+add_vert)`. If a `title` is passed it is set to the figure. `sharex`, `sharey`, `squeeze`, `subplot_kw` and `gridspec_kw` are all passed down to `plt.subplots`. If `return_fig` is `True`, returns `fig,axs`, otherwise just `axs`.
###Code
# export
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) < 0.)
return (bbox[~empty], label[~empty])
bb = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
bb,lbl = clip_remove_empty(bb, tensor([1,2,3,2]))
test_eq(bb, tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(lbl, tensor([1,2,2]))
#export
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
img1,img2 = TensorImage(torch.randn(16,16,3)),TensorImage(torch.randn(16,16,3))
bb1 = tensor([[-2,-0.5,0.5,1.5], [-0.5,-0.5,0.5,0.5], [1,0.5,0.5,0.75], [-0.5,-0.5,0.5,0.5]])
lbl1 = tensor([1, 2, 3, 2])
bb2 = tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]])
lbl2 = tensor([2, 2])
samples = [(img1, bb1, lbl1), (img2, bb2, lbl2)]
res = bb_pad(samples)
non_empty = tensor([True,True,False,True])
test_eq(res[0][0], img1)
test_eq(res[0][1], tensor([[-1,-0.5,0.5,1.], [-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5]]))
test_eq(res[0][2], tensor([1,2,2]))
test_eq(res[1][0], img2)
test_eq(res[1][1], tensor([[-0.5,-0.5,0.5,0.5], [-0.5,-0.5,0.5,0.5], [0,0,0,0]]))
test_eq(res[1][2], tensor([2,2,0]))
###Output
_____no_output_____
###Markdown
Show methods -
###Code
#export
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
#export
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
###Output
_____no_output_____
###Markdown
`TransformBlock`s for vision These are the blocks the vision application provide for the [data block API](http://docs.fast.ai/data.block).
###Code
#export
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
#export
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
#export
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
show_doc(PointBlock, name='PointBlock')
show_doc(BBoxBlock, name='BBoxBlock')
#export
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
###Output
_____no_output_____
###Markdown
If `add_na` is `True`, a new category is added for NaN (that will represent the background class). ImageDataLoaders -
###Code
#export
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:- `item_tfms`: one or several transforms applied to the items before batching them- `batch_tfms`: one or several transforms applied to the batches once they are formed- `bs`: the batch size- `val_bs`: the batch size for the validation `DataLoader` (defaults to `bs`)- `shuffle_train`: if we shuffle the training `DataLoader` or not- `device`: the PyTorch device to use (defaults to `default_device()`)
###Code
show_doc(ImageDataLoaders.from_folder)
###Output
_____no_output_____
###Markdown
If `valid_pct` is provided, a random split is performed (with an optional `seed`) by setting aside that percentage of the data for the validation set (instead of looking at the grandparents folder). If a `vocab` is passed, only the folders with names in `vocab` are kept.Here is an example loading a subsample of MNIST:
###Code
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_folder(path)
###Output
_____no_output_____
###Markdown
Passing `valid_pct` will ignore the valid/train folders and do a new random split:
###Code
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2)
dls.valid_ds.items[:3]
show_doc(ImageDataLoaders.from_path_func)
###Output
_____no_output_____
###Markdown
The validation set is a random `subset` of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example with a `label_func`:
###Code
fnames = get_image_files(path)
def label_func(x): return x.parent.name
dls = ImageDataLoaders.from_path_func(path, fnames, label_func)
###Output
_____no_output_____
###Markdown
Here is another example on the pets dataset. Here filenames are all in an "images" folder and their names have the form `class_name_123.jpg`. One way to properly label them is thus to throw away everything after the last `_`:
###Code
show_doc(ImageDataLoaders.from_path_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility.Here is how to create the same `DataLoaders` on the MNIST dataset as the previous example (you will need to change the initial two / by a \ on Windows):
###Code
pat = r'/([^/]*)/\d+.png$'
dls = ImageDataLoaders.from_path_re(path, fnames, pat)
show_doc(ImageDataLoaders.from_name_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_func` except `label_func` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_name_re)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. This method does the same as `ImageDataLoaders.from_path_re` except `pat` is applied to the name of each filenames, and not the full path.
###Code
show_doc(ImageDataLoaders.from_df)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. Alternatively, if your `df` contains a `valid_col`, give its name or its index to that argument (the column should have `True` for the elements going to the validation set). You can add an additional `folder` to the filenames in `df` if they should not be concatenated directly to `path`. If they do not contain the proper extensions, you can add `suff`. If your label column contains multiple labels on each row, you can use `label_delim` to warn the library you have a multi-label problem. `y_block` should be passed when the task automatically picked by the library is wrong, you should then give `CategoryBlock`, `MultiCategoryBlock` or `RegressionBlock`. For more advanced uses, you should use the data block API.The tiny mnist example from before also contains a version in a dataframe:
###Code
path = untar_data(URLs.MNIST_TINY)
df = pd.read_csv(path/'labels.csv')
df.head()
###Output
_____no_output_____
###Markdown
Here is how to load it using `ImageDataLoaders.from_df`:
###Code
dls = ImageDataLoaders.from_df(df, path)
###Output
_____no_output_____
###Markdown
Here is another example with a multi-label problem:
###Code
path = untar_data(URLs.PASCAL_2007)
df = pd.read_csv(path/'train.csv')
df.head()
dls = ImageDataLoaders.from_df(df, path, folder='train', valid_col='is_valid')
###Output
_____no_output_____
###Markdown
Note that can also pass `2` to valid_col (the index, starting with 0).
###Code
show_doc(ImageDataLoaders.from_csv)
###Output
_____no_output_____
###Markdown
Same as `ImageDataLoaders.from_df` after loading the file with `header` and `delimiter`.Here is how to load the same dataset as before with this method:
###Code
dls = ImageDataLoaders.from_csv(path, 'train.csv', folder='train', valid_col='is_valid')
show_doc(ImageDataLoaders.from_lists)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `y_block` can be passed to specify the type of the targets.
###Code
path = untar_data(URLs.PETS)
fnames = get_image_files(path/"images")
labels = ['_'.join(x.name.split('_')[:-1]) for x in fnames]
dls = ImageDataLoaders.from_lists(path, fnames, labels)
#export
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
show_doc(SegmentationDataLoaders.from_label_func)
###Output
_____no_output_____
###Markdown
The validation set is a random subset of `valid_pct`, optionally created with `seed` for reproducibility. `codes` contain the mapping index to label.
###Code
path = untar_data(URLs.CAMVID_TINY)
fnames = get_image_files(path/'images')
def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
codes = np.loadtxt(path/'codes.txt', dtype=str)
dls = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes)
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.cutmix.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.
|
doc/source/cookbook/geographic_xforms_and_projections.ipynb | ###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import os
import re
import netCDF4 as nc4
import numpy as np
import yt
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(), n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace("vertical level", "altitude") for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r"([a-zA-Z]+)(.*)")
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims), bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will create a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", "AIRDENS")
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", "AIRDENS")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", "AIRDENS")
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequence describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", "AIRDENS")
p.set_mpl_projection(("Robinson", (37.5,)))
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection("Orthographic")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same transform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(("Orthographic", (90, 45)))
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(
("Orthographic", (), {"central_latitude": -45, "central_longitude": 275})
)
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(("RotatedPole", (177.5, 37.5)))
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection(
("RotatedPole", (), {"pole_latitude": 37.5, "pole_longitude": 177.5})
)
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection("NorthPolarStereo")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection("AlbersEqualArea")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection("InterruptedGoodeHomolosine")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
p.set_mpl_projection("Gnomonic")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordinates, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"] = "Miller"
p = yt.SlicePlot(ds, "altitude", "AIRDENS")
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots["AIRDENS"].axes.set_global()
p.plots["AIRDENS"].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import yt
import numpy as np
import re
import netCDF4 as nc4
import os
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(),
n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace('vertical level', 'altitude') for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r'([a-zA-Z]+)(.*)')
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims),
bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will create a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequence describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection(("Robinson",(37.5,)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection('Orthographic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same transform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(('Orthographic', (90, 45)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(('Orthographic', (), {'central_latitude':-45, 'central_longitude':275}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(('RotatedPole', (177.5, 37.5)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection(('RotatedPole', (), {'pole_latitude':37.5, 'pole_longitude':177.5}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('NorthPolarStereo')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('AlbersEqualArea')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('InterruptedGoodeHomolosine')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Gnomonic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordinates, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"]="Miller"
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import yt
import numpy as np
import re
import netCDF4 as nc4
import os
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(),
n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace('vertical level', 'altitude') for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r'([a-zA-Z]+)(.*)')
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims),
bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will crate a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequence describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection(("Robinson",(37.5,)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection('Orthographic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same transform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(('Orthographic', (90, 45)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(('Orthographic', (), {'central_latitude':-45, 'central_longitude':275}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(('RotatedPole', (177.5, 37.5)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection(('RotatedPole', (), {'pole_latitude':37.5, 'pole_longitude':177.5}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('NorthPolarStereo')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('AlbersEqualArea')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('InterruptedGoodeHomolosine')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Gnomonic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordinates, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"]="Miller"
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import yt
import numpy as np
import re
import netCDF4 as nc4
import os
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(),
n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace('vertical level', 'altitude') for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r'([a-zA-Z]+)(.*)')
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims),
bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will crate a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequence describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection(("Robinson",(37.5,)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection('Orthographic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same transform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(('Orthographic', (90, 45)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(('Orthographic', (), {'central_latitude':-45, 'central_longitude':275}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(('RotatedPole', (177.5, 37.5)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection(('RotatedPole', (), {'pole_latitude':37.5, 'pole_longitude':177.5}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('NorthPolarStereo')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('AlbersEqualArea')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('InterruptedGoodeHomolosine')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Gnomonic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordinates, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"]="Miller"
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import pprint
import yt
import numpy as np
import re
import netCDF4 as nc4
import os
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(),
n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace('vertical level', 'altitude') for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r'([a-zA-Z]+)(.*)')
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims),
bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will crate a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequenece describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection(("Robinson",(37.5,)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection('Orthographic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same tranform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(('Orthographic', (90, 45)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(('Orthographic', (), {'central_latitude':-45, 'central_longitude':275}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(('RotatedPole', (177.5, 37.5)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection(('RotatedPole', (), {'pole_latitude':37.5, 'pole_longitude':177.5}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('NorthPolarStereo')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('AlbersEqualArea')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('InterruptedGoodeHomolosine')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Gnomonic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordiantes, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"]="Miller"
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Geographic Transforms and Projections Loading the GEOS data For this analysis we'll be loading some global climate data into yt. A frontend does not exist for this dataset yet, so we'll load it in as a uniform grid with netcdf4.
###Code
import pprint
import yt
import numpy as np
import re
import netCDF4 as nc4
import os
def get_data_path(arg):
if os.path.exists(arg):
return arg
else:
return os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"), arg)
n = nc4.Dataset(get_data_path("geos/GEOS.fp.asm.inst3_3d_aer_Nv.20180822_0900.V01.nc4"))
###Output
_____no_output_____
###Markdown
Using the loaded data we'll fill arrays with the data dimensions and limits. We'll also rename `vertical level` to `altitude` to be clearer.
###Code
dims = []
sizes = []
bbox = []
ndims = len(n.dimensions)
for dim in n.dimensions.keys():
size = n.variables[dim].size
if size > 1:
bbox.append([n.variables[dim][:].min(),
n.variables[dim][:].max()])
dims.append(n.variables[dim].long_name)
sizes.append(size)
dims.reverse() # Fortran ordering
sizes.reverse()
bbox.reverse()
dims = [f.replace('vertical level', 'altitude') for f in dims]
bbox = np.array(bbox)
###Output
_____no_output_____
###Markdown
We'll also load the data into a container dictionary and create a lookup for the short to the long names
###Code
w_regex = re.compile(r'([a-zA-Z]+)(.*)')
def regex_parser(s):
try:
return "**".join(filter(None, w_regex.search(s).groups()))
except AttributeError:
return s
data = {}
names = {}
for field, d in n.variables.items():
if d.ndim != ndims:
continue
units = n.variables[field].units
units = " * ".join(map(regex_parser, units.split()))
data[field] = (np.squeeze(d), str(units))
names[field] = n.variables[field].long_name.replace("_", " ")
###Output
_____no_output_____
###Markdown
Now the data can be loaded with yt's `load_uniform_grid` function. We also need to say that the geometry is a `geographic` type. This will ensure that the axes created are matplotlib GeoAxes and that the transform functions are available to use for projections.
###Code
ds = yt.load_uniform_grid(data, sizes, 1.0, geometry=("geographic", dims),
bbox=bbox)
###Output
_____no_output_____
###Markdown
Default projection with geographic geometry Now that the data is loaded, we can plot it with a yt SlicePlot along the altitude. This will crate a figure with latitude and longitude as the plot axes and the colormap will correspond to the air density. Because no projection type has been set, the geographic geometry type assumes that the data is of the `PlateCarree` form. The resulting figure will be a `Mollweide` plot.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.show()
###Output
_____no_output_____
###Markdown
Note that this doesn't have a lot of contextual information. We can add annotations for the coastlines just as we would with matplotlib. Before the annotations are set, we need to call `p._setup_plots` to make the axes available for annotation.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Using geographic transforms to project data If a projection other than the default `Mollweide` is desired, then we can pass an argument to the `set_mpl_projection()` function to set a different projection than the default. This will set the projection to a Robinson projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection("Robinson")
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`geo_projection` accepts a string or a 2- to 3- length sequence describing the projection the second item in the sequence are the args and the third item is the kwargs. This can be used for further customization of the projection.
###Code
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.set_mpl_projection(("Robinson",(37.5,)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
We don't actually need to keep creating a SlicePlot to change the projection type. We can use the function `set_mpl_projection()` and pass in a string of the transform type that we desire after an existing `SlicePlot` instance has been created. This will set the figure to an `Orthographic` projection.
###Code
p.set_mpl_projection('Orthographic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
`set_mpl_projection()` can be used in a number of ways to customize the projection type. * If a **string** is passed, then the string must correspond to the transform name, which is exclusively cartopy transforms at this time. This looks like: `set_mpl_projection('ProjectionType')`* If a **tuple** is passed, the first item of the tuple is a string of the transform name and the second two items are args and kwargs. These can be used to further customize the transform (by setting the latitude and longitude, for example. This looks like: * `set_mpl_projection(('ProjectionType', (args)))` * `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* A **transform object** can also be passed. This can be any transform type -- a cartopy transform or a matplotlib transform. This allows users to either pass the same transform object around between plots or define their own transform and use that in yt's plotting functions. With a standard cartopy transform, this would look like: * `set_mpl_projection(cartopy.crs.PlateCarree())` To summarize:The function `set_mpl_projection` can take one of several input types:* `set_mpl_projection('ProjectionType')`* `set_mpl_projection(('ProjectionType', (args)))`* `set_mpl_projection(('ProjectionType', (args), {kwargs}))`* `set_mpl_projection(cartopy.crs.MyTransform())`For example, we can make the same Orthographic projection and pass in the central latitude and longitude for the projection:
###Code
p.set_mpl_projection(('Orthographic', (90, 45)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Or we can pass in the arguments to this function as kwargs by passing a three element tuple.
###Code
p.set_mpl_projection(('Orthographic', (), {'central_latitude':-45, 'central_longitude':275}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
A few examples of different projections This next section will show a few of the different projections that one can use. This isn't meant to be complete, but it'll give you a visual idea of how these transforms can be used to illustrate geographic data for different purposes.
###Code
p.set_mpl_projection(('RotatedPole', (177.5, 37.5)))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection(('RotatedPole', (), {'pole_latitude':37.5, 'pole_longitude':177.5}))
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('NorthPolarStereo')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('AlbersEqualArea')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('InterruptedGoodeHomolosine')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
p.set_mpl_projection('Gnomonic')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Modifying the data transform While the data projection modifies how the data is displayed in our plot, the data transform describes the coordinate system that the data is actually described by. By default, the data is assumed to have a `PlateCarree` data transform. If you would like to change this, you can access the dictionary in the coordinate handler and set it to something else. The dictionary is structured such that each axis has its own default transform, so be sure to set the axis you intend to change. This next example changes the transform to a Miller type. Because our data is not in Miller coordinates, it will be skewed.
###Code
ds.coordinates.data_transform["altitude"]="Miller"
p = yt.SlicePlot(ds, "altitude", 'AIRDENS')
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____
###Markdown
Because the transform type shouldn't change as we make subsequent figures, once it is changed it will be the same for all other figures made with the same dataset object. Note that this particular dataset is not actually in a Miller system, which is why the data now doesn't span the entire globe. Setting the new projection to Robinson results in Miller-skewed data in our next figure.
###Code
p.set_mpl_projection('Robinson')
p._setup_plots()
p.plots['AIRDENS'].axes.set_global()
p.plots['AIRDENS'].axes.coastlines()
p.show()
###Output
_____no_output_____ |
sdk/cosmos/azure-cosmos-spark_3-1_2-12/Samples/Python/Predicate-Pushdown/Predicate-PushDown-Sample.ipynb | ###Markdown
**Secrets**The secrets below like the Cosmos account key are retrieved from a secret scope. If you don't have defined a secret scope for a Cosmos Account you want to use when going through this sample you can find the instructions on how to create one here:- Here you can [Create a new secret scope](./secrets/createScope) for the current Databricks workspace - See how you can create an [Azure Key Vault backed secret scope](https://docs.microsoft.com/azure/databricks/security/secrets/secret-scopes--create-an-azure-key-vault-backed-secret-scope) - See how you can create a [Databricks backed secret scope](https://docs.microsoft.com/azure/databricks/security/secrets/secret-scopescreate-a-databricks-backed-secret-scope)- And here you can find information on how to [add secrets to your Spark configuration](https://docs.microsoft.com/azure/databricks/security/secrets/secretsread-a-secret)If you don't want to use secrets at all you can of course also just assign the values in clear-text below - but for obvious reasons we recommend the usage of secrets.
###Code
cosmosEndpoint = spark.conf.get("spark.cosmos.accountEndpoint")
cosmosMasterKey = spark.conf.get("spark.cosmos.accountKey")
###Output
_____no_output_____
###Markdown
**Preparation - creating the Cosmos DB container to ingest the data into**Configure the Catalog API to be used
###Code
import uuid
spark.conf.set("spark.sql.catalog.cosmosCatalog", "com.azure.cosmos.spark.CosmosCatalog")
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.accountEndpoint", cosmosEndpoint)
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.accountKey", cosmosMasterKey)
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.views.repositoryPath", "/viewDefinitions" + str(uuid.uuid4()))
###Output
_____no_output_____
###Markdown
Creating a new container to be used for the push down sample and inserting a couple of test records
###Code
%sql
CREATE DATABASE IF NOT EXISTS cosmosCatalog.PushDownSample;
CREATE TABLE IF NOT EXISTS cosmosCatalog.PushDownSample.PushDownSample
USING cosmos.oltp
TBLPROPERTIES(partitionKeyPath = '/id', manualThroughput = '400', indexingPolicy = 'OnlySystemProperties');
###Output
_____no_output_____
###Markdown
Setting up the write config to ingest data into the new container
###Code
writeCfg = {
"spark.cosmos.accountEndpoint": cosmosEndpoint,
"spark.cosmos.accountKey": cosmosMasterKey,
"spark.cosmos.database": "PushDownSample",
"spark.cosmos.container": "PushDownSample",
"spark.cosmos.write.strategy": "ItemOverwrite",
}
readCfg = {
"spark.cosmos.accountEndpoint": cosmosEndpoint,
"spark.cosmos.accountKey": cosmosMasterKey,
"spark.cosmos.database": "PushDownSample",
"spark.cosmos.container": "PushDownSample",
"spark.cosmos.read.inferSchemaIncludeSystemProperties": "True"
}
###Output
_____no_output_____
###Markdown
Ingesting some sample data with a 5 seconds delay to ensure different _ts values
###Code
from pyspark.sql import Row
import time
initialRows = [('00001','First Record'),('00002','Second Record')]
initialRdd = sc.parallelize(initialRows)
initialDF = sqlContext.createDataFrame(initialRdd.map(lambda x: Row(id=x[0], someValue=x[1])))
initialDF \
.write \
.format("cosmos.oltp") \
.mode("Append") \
.options(**writeCfg) \
.save()
time.sleep(5)
tsThreshold = int(time.time())
nextRows = [('00003','Third Record'),('00004','Fourth Record')]
nextRdd = sc.parallelize(nextRows)
nextDF = sqlContext.createDataFrame(nextRdd.map(lambda x: Row(id=x[0], someValue=x[1])))
nextDF \
.write \
.format("cosmos.oltp") \
.mode("Append") \
.options(**writeCfg) \
.save()
###Output
_____no_output_____
###Markdown
Get all records to be able to see the _ts values
###Code
query_df = spark.read.format("cosmos.oltp").options(**readCfg).load()
query_df.show()
assert query_df.count() == 4
###Output
_____no_output_____
###Markdown
Show the query plan for the unfiltered query
###Code
query_df.explain()
###Output
_____no_output_____
###Markdown
Get all records with a _ts high enough to filter only some of the records
###Code
raw_query_df = spark.read.format("cosmos.oltp").options(**readCfg).load()
filtered_query_df = raw_query_df.where("_ts >= " + str(tsThreshold))
filtered_query_df.show()
assert filtered_query_df.count() == 2
###Output
_____no_output_____
###Markdown
Show the query plan for the filtered query
###Code
filtered_query_df.explain()
###Output
_____no_output_____
###Markdown
**Cleanup - deleting the Cosmos DB container and database again (to reduce cost) - skip this step if you want to keep them**
###Code
%sql
DROP TABLE IF EXISTS cosmosCatalog.PushDownSample.PushDownSample;
DROP DATABASE IF EXISTS cosmosCatalog.PushDownSample CASCADE;
###Output
_____no_output_____
###Markdown
**Secrets**The secrets below like the Cosmos account key are retrieved from a secret scope. If you don't have defined a secret scope for a Cosmos Account you want to use when going through this sample you can find the instructions on how to create one here:- Here you can [Create a new secret scope](./secrets/createScope) for the current Databricks workspace - See how you can create an [Azure Key Vault backed secret scope](https://docs.microsoft.com/azure/databricks/security/secrets/secret-scopes--create-an-azure-key-vault-backed-secret-scope) - See how you can create a [Databricks backed secret scope](https://docs.microsoft.com/azure/databricks/security/secrets/secret-scopescreate-a-databricks-backed-secret-scope)- And here you can find information on how to [add secrets to your Spark configuration](https://docs.microsoft.com/azure/databricks/security/secrets/secretsread-a-secret)If you don't want to use secrets at all you can of course also just assign the values in clear-text below - but for obvious reasons we recommend the usage of secrets.
###Code
cosmosEndpoint = spark.conf.get("spark.cosmos.accountEndpoint")
cosmosMasterKey = spark.conf.get("spark.cosmos.accountKey")
###Output
_____no_output_____
###Markdown
**Preparation - creating the Cosmos DB container to ingest the data into**Configure the Catalog API to be used
###Code
import uuid
spark.conf.set("spark.sql.catalog.cosmosCatalog", "com.azure.cosmos.spark.CosmosCatalog")
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.accountEndpoint", cosmosEndpoint)
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.accountKey", cosmosMasterKey)
spark.conf.set("spark.sql.catalog.cosmosCatalog.spark.cosmos.views.repositoryPath", "/viewDefinitions" + str(uuid.uuid4()))
###Output
_____no_output_____
###Markdown
Creating a new container to be used for the push down sample and inserting a couple of test records
###Code
%sql
CREATE DATABASE IF NOT EXISTS cosmosCatalog.PushDownSample;
CREATE TABLE IF NOT EXISTS cosmosCatalog.PushDownSample.PushDownSample
USING cosmos.oltp
TBLPROPERTIES(partitionKeyPath = '/id', manualThroughput = '400', indexingPolicy = 'OnlySystemProperties');
###Output
_____no_output_____
###Markdown
Setting up the write config to ingest data into the new container
###Code
writeCfg = {
"spark.cosmos.accountEndpoint": cosmosEndpoint,
"spark.cosmos.accountKey": cosmosMasterKey,
"spark.cosmos.database": "PushDownSample",
"spark.cosmos.container": "PushDownSample",
"spark.cosmos.write.strategy": "ItemOverwrite",
}
readCfg = {
"spark.cosmos.accountEndpoint": cosmosEndpoint,
"spark.cosmos.accountKey": cosmosMasterKey,
"spark.cosmos.database": "PushDownSample",
"spark.cosmos.container": "PushDownSample",
"spark.cosmos.read.inferSchema.includeSystemProperties": "True"
}
###Output
_____no_output_____
###Markdown
Ingesting some sample data with a 5 seconds delay to ensure different _ts values
###Code
from pyspark.sql import Row
import time
initialRows = [('00001','First Record'),('00002','Second Record')]
initialRdd = sc.parallelize(initialRows)
initialDF = sqlContext.createDataFrame(initialRdd.map(lambda x: Row(id=x[0], someValue=x[1])))
initialDF \
.write \
.format("cosmos.oltp") \
.mode("Append") \
.options(**writeCfg) \
.save()
time.sleep(5)
tsThreshold = int(time.time())
nextRows = [('00003','Third Record'),('00004','Fourth Record')]
nextRdd = sc.parallelize(nextRows)
nextDF = sqlContext.createDataFrame(nextRdd.map(lambda x: Row(id=x[0], someValue=x[1])))
nextDF \
.write \
.format("cosmos.oltp") \
.mode("Append") \
.options(**writeCfg) \
.save()
###Output
_____no_output_____
###Markdown
Get all records to be able to see the _ts values
###Code
query_df = spark.read.format("cosmos.oltp").options(**readCfg).load()
query_df.show()
assert query_df.count() == 4
###Output
_____no_output_____
###Markdown
Show the query plan for the unfiltered query
###Code
query_df.explain()
###Output
_____no_output_____
###Markdown
Get all records with a _ts high enough to filter only some of the records
###Code
raw_query_df = spark.read.format("cosmos.oltp").options(**readCfg).load()
filtered_query_df = raw_query_df.where("_ts >= " + str(tsThreshold))
filtered_query_df.show()
assert filtered_query_df.count() == 2
###Output
_____no_output_____
###Markdown
Show the query plan for the filtered query
###Code
filtered_query_df.explain()
###Output
_____no_output_____
###Markdown
**Cleanup - deleting the Cosmos DB container and database again (to reduce cost) - skip this step if you want to keep them**
###Code
%sql
DROP TABLE IF EXISTS cosmosCatalog.PushDownSample.PushDownSample;
DROP DATABASE IF EXISTS cosmosCatalog.PushDownSample CASCADE;
###Output
_____no_output_____ |
intro_notebooks/1_Intro_ListToNumpy.ipynb | ###Markdown
All libraries needed for this notebook outside of jupyter itself, run this cell to make sure you have the proper libraries.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Initialize two Python list objects below.Remember that [] initiates a list object the same as calling list().This is also true for other data structures in Python such as dictionaries (hash map / hash table) {} or dict().
###Code
a = [1, 2, 3, 4, 5]
b = [6, 7, 8, 9, 10]
print(a)
print(b)
###Output
[1, 2, 3, 4, 5]
[6, 7, 8, 9, 10]
###Markdown
What does a + b create?
###Code
a + b
###Output
_____no_output_____
###Markdown
Notice that the two lists did not sum the internal elements of each list, but rather contantenated the two list objects in a single list, while maintaining the order of the internal elements. Lists are ordered objects, Dictionaries are not.
###Code
sum_result = []
for first, second in zip(a,b):
sum_temp = first + second
sum_result.append(sum_temp)
print(sum_result)
###Output
[7, 9, 11, 13, 15]
###Markdown
While this works for a small amount of numbers in a single dimensional list, it will become increasingly slow as each new dimension requires an additional for loop. Each new nested for loop increases the time complexity from n -> n^2 -> n^3. As you will soon see image data is often represented as 2, 3 or 4 dimensional arrays.Arrays are not Lists, though they often first appear similiar in Python. We can instantiate numpy arrays by providing a list.
###Code
c = np.array(a)
d = np.array(b)
print(type(a), type(b))
print(type(c), type(d))
c + d
###Output
_____no_output_____
###Markdown
Notice how we did not need to create a for loop in order to add the internal elements of the two arrays c and d together. Instead numpy automatically vectorized the sum operation. It is also possible to do this with a vector (array) and a scalar (a single value or constant).
###Code
c + 2
###Output
_____no_output_____
###Markdown
However, both lists and numpy arrays share a common index slicing syntax, which can cause confusion early in your Python career. Notice that objects of class ndarray do not have commas between the elements, while Python list objects do.
###Code
print('Type of a: {}'.format(type(a)))
print('Complete value of a: {}'.format(a))
print('First element of a: {}'.format(a[0]))
print('First two elements of a: {}'.format(a[0:2]))
print('Last element of a: {}'.format(a[-1]))
print('Object a reversed order: {}'.format(a[::-1]))
print('Type of c: {}'.format(type(c)))
print('Complete value of c: {}'.format(c))
print('First element of c: {}'.format(c[0]))
print('First two elements of c: {}'.format(c[0:2]))
print('Last element of c: {}'.format(c[-1]))
print('Object c reversed order: {}'.format(c[::-1]))
###Output
Type of a: <class 'list'>
Complete value of a: [1, 2, 3, 4, 5]
First element of a: 1
First two elements of a: [1, 2]
Last element of a: 5
Object a reversed order: [5, 4, 3, 2, 1]
Type of c: <class 'numpy.ndarray'>
Complete value of c: [1 2 3 4 5]
First element of c: 1
First two elements of c: [1 2]
Last element of c: 5
Object c reversed order: [5 4 3 2 1]
###Markdown
One of the reasons index slicing is so powerful is for value reassignment or selection. Notice that Python is 0-indexed, hence a 5-element list or array has indices 0-4 not 1-5.
###Code
a[0] = 10
c[4] = 10
print(a)
print(c)
print(len(a))
print(c.shape)
print(c.ndim)
###Output
5
(5,)
1
###Markdown
Notice that the shape of an array returns an immutable tuple where each element is a dimension and the integer value of the number of elements in that dimension (i.e. len() of c.shape is c.ndim or the of dimensions). But how do you create a 2 dimensional array, (i.e. a matrix)?
###Code
# e is created from a list of lists
e = np.array([a, c])
print(type(e))
print(e.shape)
print(e.ndim)
print(e)
###Output
<class 'numpy.ndarray'>
(2, 5)
2
[[10 2 3 4 5]
[ 1 2 3 4 10]]
###Markdown
Exercise: Create a 3 dimensional array and print the type, shape, n-dimensions, and size. How are shape and size related?
###Code
# Cell for Exercise
###Output
_____no_output_____
###Markdown
Solution:
###Code
f = np.array([e,e])
print(type(f))
print(f.shape)
print(f.ndim)
print(f)
###Output
<class 'numpy.ndarray'>
(2, 2, 5)
3
[[[10 2 3 4 5]
[ 1 2 3 4 10]]
[[10 2 3 4 5]
[ 1 2 3 4 10]]]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.